Exemplo n.º 1
0
    def test_h5(self):
        # Create HDF5 test data
        with h5py.File(self.testH5FileName) as f:
            f.create_group("volume")
            shape = (1, 2, 3, 4, 5)
            f["volume"].create_dataset(
                "data", data=numpy.indices(shape).sum(0).astype(numpy.float32), chunks=True, compression="gzip"
            )

        # Read the entire HDF5 file and verify the contents
        h5Reader = OpInputDataReader(graph=self.graph)
        try:
            h5Reader.FilePath.setValue(self.testH5FileName + "/volume/data")  # Append internal path
            cwd = os.path.split(__file__)[0]
            h5Reader.WorkingDirectory.setValue(cwd)

            # Grab a section of the h5 data
            h5Data = h5Reader.Output[0, 0, :, :, :].wait()
            assert h5Data.shape == (1, 1, 3, 4, 5)
            # (Just check part of the data)
            for k in range(0, shape[2]):
                for l in range(0, shape[3]):
                    for m in range(0, shape[4]):
                        assert h5Data[0, 0, k, l, m] == k + l + m

        finally:
            # Call cleanUp() to close the file that this operator opened
            h5Reader.cleanUp()
            assert not h5Reader._file  # Whitebox assertion...
Exemplo n.º 2
0
    def test_via_OpExportSlot(self):
        data = 255 * numpy.random.random((64, 128, 128, 1))
        data = data.astype(numpy.uint8)
        data = vigra.taggedView(data, vigra.defaultAxistags("zyxc"))

        graph = Graph()

        opPiper = OpArrayPiper(graph=graph)
        opPiper.Input.setValue(data)

        opExport = OpExportSlot(graph=graph)
        opExport.Input.connect(opPiper.Output)
        opExport.OutputFormat.setValue("dvid")
        url = "http://{hostname}/api/node/{data_uuid}/{data_name}".format(
            **self.__dict__)
        opExport.OutputFilenameFormat.setValue(url)

        assert opExport.ExportPath.ready()
        assert opExport.ExportPath.value == url

        opExport.run_export()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(opExport.ExportPath.value)
            expected_data = data.view(numpy.ndarray)
            read_data = opRead.Output(*roiFromShape(data.shape)).wait()
            assert (read_data == expected_data
                    ).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 3
0
    def test_npz(self):
        # Create two Numpy test arrays
        a = numpy.zeros((10, 11))
        for x in range(0, 10):
            for y in range(0, 11):
                a[x, y] = x + y

        b = numpy.arange((3 * 9)).reshape((3, 9))
        numpy.savez(self.testNpzDataFileName, a=a, b=b)

        # Now read back our test data using an OpInputDataReader operator
        npyReader = OpInputDataReader(graph=self.graph)

        try:
            for internalPath, referenceArray in zip(["a", "b"], [a, b]):
                npyReader.FilePath.setValue("{}/{}".format(
                    self.testNpzDataFileName, internalPath))
                cwd = os.path.split(__file__)[0]
                npyReader.WorkingDirectory.setValue(cwd)

                npzData = npyReader.Output[:].wait()
                assert npzData.shape == referenceArray.shape
                numpy.testing.assert_array_equal(npzData, referenceArray)
        finally:
            npyReader.cleanUp()
Exemplo n.º 4
0
            def handleImportLabelsAction():
                # Find the directory of the most recently opened image file
                mostRecentImageFile = PreferencesManager().get(
                    'DataSelection', 'recent image')
                if mostRecentImageFile is not None:
                    defaultDirectory = os.path.split(mostRecentImageFile)[0]
                else:
                    defaultDirectory = os.path.expanduser('~')
                fileNames = DataSelectionGui.getImageFileNamesToOpen(
                    self, defaultDirectory)
                fileNames = map(str, fileNames)

                # For now, we require a single hdf5 file
                if len(fileNames) > 1:
                    QMessageBox.critical(
                        self, "Too many files",
                        "Labels must be contained in a single hdf5 volume.")
                    return
                if len(fileNames) == 0:
                    # user cancelled
                    return

                file_path = fileNames[0]
                internal_paths = DataSelectionGui.getPossibleInternalPaths(
                    file_path)
                if len(internal_paths) == 0:
                    QMessageBox.critical(
                        self, "No volumes in file",
                        "Couldn't find a suitable dataset in your hdf5 file.")
                    return
                if len(internal_paths) == 1:
                    internal_path = internal_paths[0]
                else:
                    dlg = H5VolumeSelectionDlg(internal_paths, self)
                    if dlg.exec_() == QDialog.Rejected:
                        return
                    selected_index = dlg.combo.currentIndex()
                    internal_path = str(internal_paths[selected_index])

                path_components = PathComponents(file_path)
                path_components.internalPath = str(internal_path)

                try:
                    top_op = self.topLevelOperatorView
                    opReader = OpInputDataReader(parent=top_op.parent)
                    opReader.FilePath.setValue(path_components.totalPath())

                    # Reorder the axes
                    op5 = OpReorderAxes(parent=top_op.parent)
                    op5.AxisOrder.setValue(
                        top_op.LabelInputs.meta.getAxisKeys())
                    op5.Input.connect(opReader.Output)

                    # Finally, import the labels
                    top_op.importLabels(top_op.current_view_index(),
                                        op5.Output)

                finally:
                    op5.cleanUp()
                    opReader.cleanUp()
Exemplo n.º 5
0
 def testBasic_Hdf5(self):
     data = numpy.random.random( (100,100) ).astype( numpy.float32 )
     data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
     
     graph = Graph()
     opExport = OpExportSlot(graph=graph)
     opExport.Input.setValue(data)
     opExport.OutputFormat.setValue( 'hdf5' )
     opExport.OutputFilenameFormat.setValue( self._tmpdir + '/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}' )
     opExport.OutputInternalPath.setValue('volume/data')
     opExport.CoordinateOffset.setValue( (10, 20) )
     
     assert opExport.ExportPath.ready()
     export_file = PathComponents( opExport.ExportPath.value ).externalPath
     assert os.path.split(export_file)[1] == 'test_export_x10-110_y20-120.h5'
     #print "exporting data to: {}".format( opExport.ExportPath.value )
     opExport.run_export()
     
     opRead = OpInputDataReader( graph=graph )
     opRead.FilePath.setValue( opExport.ExportPath.value )
     expected_data = data.view(numpy.ndarray)
     read_data = opRead.Output[:].wait()
     assert (read_data == expected_data).all(), "Read data didn't match exported data!"
     
     opRead.cleanUp()
Exemplo n.º 6
0
    def test_h5(self):
        # Create HDF5 test data
        with h5py.File(self.testH5FileName, "w") as f:
            f.create_group("volume")
            shape = (1, 2, 3, 4, 5)
            f["volume"].create_dataset(
                "data", data=numpy.indices(shape).sum(0).astype(numpy.float32), chunks=True, compression="gzip"
            )

        # Read the entire HDF5 file and verify the contents
        h5Reader = OpInputDataReader(graph=self.graph)
        try:
            h5Reader.FilePath.setValue(self.testH5FileName + "/volume/data")  # Append internal path
            cwd = os.path.split(__file__)[0]
            h5Reader.WorkingDirectory.setValue(cwd)

            # Grab a section of the h5 data
            h5Data = h5Reader.Output[0, 0, :, :, :].wait()
            assert h5Data.shape == (1, 1, 3, 4, 5)
            # (Just check part of the data)
            for k in range(0, shape[2]):
                for l in range(0, shape[3]):
                    for m in range(0, shape[4]):
                        assert h5Data[0, 0, k, l, m] == k + l + m

        finally:
            # Call cleanUp() to close the file that this operator opened
            h5Reader.cleanUp()
            assert not h5Reader._file  # Whitebox assertion...
Exemplo n.º 7
0
    def test_h5_stack_single_file(self, sequence_axis):
        """Test stack/sequence reading in hdf5-files for given 'sequence_axis'"""
        shape = (4, 8, 16, 32, 3)  # assuming axis guess order is 'tzyxc'
        data = numpy.random.randint(0, 255, size=shape).astype(numpy.uint8)
        with h5py.File(self.testH5FileName) as f:
            data_group = f.create_group("volumes")
            for index, t_slice in enumerate(data):
                data_group.create_dataset("timepoint-{index:02d}".format(index=index), data=t_slice)

        if sequence_axis == "z":
            data = numpy.concatenate(data, axis=0)
        elif sequence_axis == "c":
            data = numpy.concatenate(data, axis=-1)

        h5SequenceReader = OpInputDataReader(graph=self.graph)
        h5SequenceReader.SequenceAxis.setValue(sequence_axis)
        filenamePlusGlob = "{}/volumes/timepoint-*".format(self.testH5FileName)
        try:
            h5SequenceReader.FilePath.setValue(filenamePlusGlob)

            h5data = h5SequenceReader.Output[:].wait()
            assert h5data.shape == data.shape, f"{h5data.shape}, {data.shape}"
            numpy.testing.assert_array_equal(h5data, data)
        finally:
            # Call cleanUp() to close the file that this operator opened
            h5SequenceReader.cleanUp()
    def test_basic(self):
        opData = OpArrayCache( graph=self.graph )
        opData.blockShape.setValue( self.testData.shape )
        opData.Input.setValue( self.testData )
        
        filepath = os.path.join( self._tmpdir, 'multipage.tiff' )
        logger.debug( "writing to: {}".format(filepath) )
        
        opExport = OpExportMultipageTiff(graph=self.graph)
        opExport.Filepath.setValue( filepath )
        opExport.Input.connect( opData.Output )

        # Run the export
        opExport.run_export()

        opReader = OpInputDataReader( graph=self.graph )
        opReader.FilePath.setValue( filepath )

        # The reader assumes xyzc order.
        # We have to transpose the data before we compare.
        opReorderAxes = OpReorderAxes( graph=self.graph )
        opReorderAxes.AxisOrder.setValue( self._axisorder )
        opReorderAxes.Input.connect( opReader.Output )
        
        readData = opReorderAxes.Output[:].wait()
        logger.debug("Expected shape={}".format( self.testData.shape ) )
        logger.debug("Read shape={}".format( readData.shape ) )
        
        assert opReorderAxes.Output.meta.shape == self.testData.shape, "Exported files were of the wrong shape or number."
        assert (opReorderAxes.Output[:].wait() == self.testData.view( numpy.ndarray )).all(), "Exported data was not correct"
        
        # Cleanup
        opReorderAxes.cleanUp()
        opReader.cleanUp()
Exemplo n.º 9
0
    def test_h5_stack_single_file(self, sequence_axis):
        """Test stack/sequence reading in hdf5-files for given 'sequence_axis'"""
        shape = (4, 8, 16, 32, 3)  # assuming axis guess order is 'tzyxc'
        data = numpy.random.randint(0, 255, size=shape).astype(numpy.uint8)
        with h5py.File(self.testH5FileName, "w") as f:
            data_group = f.create_group("volumes")
            for index, t_slice in enumerate(data):
                data_group.create_dataset("timepoint-{index:02d}".format(index=index), data=t_slice)

        if sequence_axis == "z":
            data = numpy.concatenate(data, axis=0)
        elif sequence_axis == "c":
            data = numpy.concatenate(data, axis=-1)

        h5SequenceReader = OpInputDataReader(graph=self.graph)
        h5SequenceReader.SequenceAxis.setValue(sequence_axis)
        filenamePlusGlob = "{}/volumes/timepoint-*".format(self.testH5FileName)
        try:
            h5SequenceReader.FilePath.setValue(filenamePlusGlob)

            h5data = h5SequenceReader.Output[:].wait()
            assert h5data.shape == data.shape, f"{h5data.shape}, {data.shape}"
            numpy.testing.assert_array_equal(h5data, data)
        finally:
            # Call cleanUp() to close the file that this operator opened
            h5SequenceReader.cleanUp()
Exemplo n.º 10
0
    def test_h5_stack_multi_file(self, sequence_axis):
        """Test stack/sequence reading in hdf5-files"""
        shape = (4, 8, 16, 32, 3)
        data = numpy.random.randint(0, 255, size=shape).astype(numpy.uint8)
        for index, t_slice in enumerate(data):
            fname = self.testmultiH5FileName.format(index=index)
            with h5py.File(fname) as f:
                data_group = f.create_group("volume")
                data_group.create_dataset("data", data=t_slice)

        if sequence_axis == "z":
            data = numpy.concatenate(data, axis=0)
        elif sequence_axis == "c":
            data = numpy.concatenate(data, axis=-1)

        h5SequenceReader = OpInputDataReader(graph=self.graph)
        h5SequenceReader.SequenceAxis.setValue(sequence_axis)
        globString = self.testmultiH5FileName.replace("02d}", "s}").format(index="*")
        filenamePlusGlob = "{}/volume/data".format(globString)
        try:
            h5SequenceReader.FilePath.setValue(filenamePlusGlob)
            h5data = h5SequenceReader.Output[:].wait()
            assert h5data.shape == data.shape
            numpy.testing.assert_array_equal(h5data, data)
        finally:
            # Call cleanUp() to close the file that this operator opened
            h5SequenceReader.cleanUp()
Exemplo n.º 11
0
    def test_h5_stack_multi_file(self, sequence_axis):
        """Test stack/sequence reading in hdf5-files"""
        shape = (4, 8, 16, 32, 3)
        data = numpy.random.randint(0, 255, size=shape).astype(numpy.uint8)
        for index, t_slice in enumerate(data):
            fname = self.testmultiH5FileName.format(index=index)
            with h5py.File(fname, "w") as f:
                data_group = f.create_group("volume")
                data_group.create_dataset("data", data=t_slice)

        if sequence_axis == "z":
            data = numpy.concatenate(data, axis=0)
        elif sequence_axis == "c":
            data = numpy.concatenate(data, axis=-1)

        h5SequenceReader = OpInputDataReader(graph=self.graph)
        h5SequenceReader.SequenceAxis.setValue(sequence_axis)
        globString = self.testmultiH5FileName.replace("02d}", "s}").format(index="*")
        filenamePlusGlob = "{}/volume/data".format(globString)
        try:
            h5SequenceReader.FilePath.setValue(filenamePlusGlob)
            h5data = h5SequenceReader.Output[:].wait()
            assert h5data.shape == data.shape
            numpy.testing.assert_array_equal(h5data, data)
        finally:
            # Call cleanUp() to close the file that this operator opened
            h5SequenceReader.cleanUp()
Exemplo n.º 12
0
    def testBasic_Hdf5(self):
        data = numpy.random.random((100, 100)).astype(numpy.float32)
        data = vigra.taggedView(data, vigra.defaultAxistags('xy'))

        graph = Graph()
        opPiper = OpArrayPiper(graph=graph)
        opPiper.Input.setValue(data)

        opExport = OpExportSlot(graph=graph)
        opExport.Input.connect(opPiper.Output)
        opExport.OutputFormat.setValue('hdf5')
        opExport.OutputFilenameFormat.setValue(
            self._tmpdir +
            '/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}')
        opExport.OutputInternalPath.setValue('volume/data')
        opExport.CoordinateOffset.setValue((10, 20))

        assert opExport.ExportPath.ready()
        export_file = PathComponents(opExport.ExportPath.value).externalPath
        assert os.path.split(
            export_file)[1] == 'test_export_x10-110_y20-120.h5'
        #print "exporting data to: {}".format( opExport.ExportPath.value )
        opExport.run_export()

        opRead = OpInputDataReader(graph=graph)
        opRead.FilePath.setValue(opExport.ExportPath.value)
        expected_data = data.view(numpy.ndarray)
        read_data = opRead.Output[:].wait()
        assert (read_data == expected_data
                ).all(), "Read data didn't match exported data!"

        opRead.cleanUp()
Exemplo n.º 13
0
    def test_basic(self):
        opData = OpArrayCache(graph=self.graph)
        opData.blockShape.setValue(self.testData.shape)
        opData.Input.setValue(self.testData)

        filepath = os.path.join(self._tmpdir, 'multipage.tiff')
        logger.debug("writing to: {}".format(filepath))

        opExport = OpExportMultipageTiff(graph=self.graph)
        opExport.Filepath.setValue(filepath)
        opExport.Input.connect(opData.Output)

        # Run the export
        opExport.run_export()

        opReader = OpInputDataReader(graph=self.graph)
        opReader.FilePath.setValue(filepath)

        # The reader assumes xyzc order.
        # We have to transpose the data before we compare.
        opReorderAxes = OpReorderAxes(graph=self.graph)
        opReorderAxes.AxisOrder.setValue(self._axisorder)
        opReorderAxes.Input.connect(opReader.Output)

        readData = opReorderAxes.Output[:].wait()
        logger.debug("Expected shape={}".format(self.testData.shape))
        logger.debug("Read shape={}".format(readData.shape))

        assert opReorderAxes.Output.meta.shape == self.testData.shape, "Exported files were of the wrong shape or number."
        assert (opReorderAxes.Output[:].wait() == self.testData.view(
            numpy.ndarray)).all(), "Exported data was not correct"

        # Cleanup
        opReorderAxes.cleanUp()
        opReader.cleanUp()
Exemplo n.º 14
0
    def testBasic(self):
        data = numpy.random.random((100, 100)).astype(numpy.float32)
        data = vigra.taggedView(data, vigra.defaultAxistags('xy'))

        graph = Graph()

        opPiper = OpArrayPiper(graph=graph)
        opPiper.Input.setValue(data)

        opWriter = OpNpyWriter(graph=graph)
        opWriter.Input.connect(opPiper.Output)
        opWriter.Filepath.setValue(self._tmpdir +
                                   '/npy_writer_test_output.npy')

        # Write it...
        opWriter.write()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(opWriter.Filepath.value)
            expected_data = data.view(numpy.ndarray)
            read_data = opRead.Output[:].wait()
            assert (read_data == expected_data
                    ).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 15
0
    def testBasic_2d(self):
        data = 255 * numpy.random.random((50, 100))
        data = data.astype(numpy.uint8)
        data = vigra.taggedView(data, vigra.defaultAxistags("yx"))

        graph = Graph()

        opPiper = OpArrayPiper(graph=graph)
        opPiper.Input.setValue(data)

        opExport = OpExportSlot(graph=graph)
        opExport.Input.connect(opPiper.Output)
        opExport.OutputFormat.setValue("png")
        opExport.OutputFilenameFormat.setValue(self._tmpdir + "/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}")
        opExport.CoordinateOffset.setValue((10, 20))

        assert opExport.ExportPath.ready()
        assert os.path.split(opExport.ExportPath.value)[1] == "test_export_x20-120_y10-60.png"
        opExport.run_export()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(opExport.ExportPath.value)
            expected_data = data.view(numpy.ndarray)
            read_data = opRead.Output[:].wait()

            # Note: vigra inserts a channel axis, so read_data is xyc
            assert (read_data[..., 0] == expected_data).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
    def test_via_OpExportSlot(self):
        data = 255 * numpy.random.random( (64, 128, 128, 1) )
        data = data.astype( numpy.uint8 )
        data = vigra.taggedView( data, vigra.defaultAxistags('zyxc') )
         
        graph = Graph()
         
        opPiper = OpArrayPiper(graph=graph)
        opPiper.Input.setValue( data )
         
        opExport = OpExportSlot(graph=graph)
        opExport.Input.connect( opPiper.Output )
        opExport.OutputFormat.setValue( 'dvid' )
        url = 'http://{hostname}/api/node/{data_uuid}/{data_name}'.format( **self.__dict__ )
        opExport.OutputFilenameFormat.setValue( url )
         
        assert opExport.ExportPath.ready()
        assert opExport.ExportPath.value == url
 
        opExport.run_export()
 
        opRead = OpInputDataReader( graph=graph )
        try:
            opRead.FilePath.setValue( opExport.ExportPath.value )
            expected_data = data.view(numpy.ndarray)
            read_data = opRead.Output( *roiFromShape(data.shape) ).wait()
            assert (read_data == expected_data).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 17
0
    def testBasic_Npy(self):
        data = numpy.random.random((100, 100)).astype(numpy.float32)
        data = vigra.taggedView(data, vigra.defaultAxistags("xy"))

        graph = Graph()
        opPiper = OpArrayPiper(graph=graph)
        opPiper.Input.setValue(data)

        opExport = OpExportSlot(graph=graph)
        opExport.Input.connect(opPiper.Output)
        opExport.OutputFormat.setValue("numpy")
        opExport.OutputFilenameFormat.setValue(self._tmpdir + "/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}")
        opExport.CoordinateOffset.setValue((10, 20))

        assert opExport.ExportPath.ready()
        assert os.path.split(opExport.ExportPath.value)[1] == "test_export_x10-110_y20-120.npy"
        # print "exporting data to: {}".format( opExport.ExportPath.value )
        opExport.run_export()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(opExport.ExportPath.value)
            expected_data = data.view(numpy.ndarray)
            read_data = opRead.Output[:].wait()
            assert (read_data == expected_data).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 18
0
    def testBasic(self):
        graph = Graph()
        opExport = OpFormattedDataExport(graph=graph)

        data = numpy.random.random((100, 100)).astype(numpy.float32) * 100
        data = vigra.taggedView(data, vigra.defaultAxistags('xy'))
        opExport.Input.setValue(data)

        sub_roi = [(10, 0), (None, 80)]
        opExport.RegionStart.setValue(sub_roi[0])
        opExport.RegionStop.setValue(sub_roi[1])

        opExport.ExportDtype.setValue(numpy.uint8)

        opExport.InputMin.setValue(0.0)
        opExport.InputMax.setValue(100.0)
        opExport.ExportMin.setValue(100)
        opExport.ExportMax.setValue(200)

        opExport.OutputFormat.setValue('hdf5')
        opExport.OutputFilenameFormat.setValue(
            self._tmpdir + '/export_x{x_start}-{x_stop}_y{y_start}-{y_stop}')
        opExport.OutputInternalPath.setValue('volume/data')

        opExport.TransactionSlot.setValue(True)

        assert opExport.ImageToExport.ready()
        assert opExport.ExportPath.ready()
        assert opExport.ImageToExport.meta.drange == (100, 200)

        #print "exporting data to: {}".format( opExport.ExportPath.value )
        assert opExport.ExportPath.value == self._tmpdir + '/' + 'export_x10-100_y0-80.h5/volume/data'
        opExport.run_export()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(opExport.ExportPath.value)

            # Compare with the correct subregion and convert dtype.
            sub_roi[1] = (100, 80)  # Replace 'None' with full extent
            expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
            expected_data = expected_data.astype(numpy.uint8)
            expected_data += 100  # see renormalization settings

            assert opRead.Output.meta.shape == expected_data.shape
            assert opRead.Output.meta.dtype == expected_data.dtype
            read_data = opRead.Output[:].wait()

            # Due to rounding errors, the actual result and the expected result may differ by 1
            #  e.g. if the original pixel value was 32.99999999
            # Also, must promote to signed values to avoid unsigned rollover
            # See issue ( https://github.com/ilastik/lazyflow/issues/165 ).
            expected_data_signed = expected_data.astype(numpy.int16)
            read_data_signed = expected_data.astype(numpy.int16)
            difference_from_expected = expected_data_signed - read_data_signed
            assert (numpy.abs(difference_from_expected) <=
                    1).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 19
0
    def testBasic(self):
        graph = Graph()
        opExport = OpDataExport(graph=graph)
        try:
            opExport.TransactionSlot.setValue(True)
            opExport.WorkingDirectory.setValue(self._tmpdir)

            # Simulate the important fields of a DatasetInfo object
            class MockDatasetInfo(object):
                pass

            rawInfo = MockDatasetInfo()
            rawInfo.nickname = 'test_nickname'
            rawInfo.filePath = './somefile.h5'
            opExport.RawDatasetInfo.setValue(rawInfo)

            opExport.SelectionNames.setValue(['Mock Export Data'])

            data = numpy.random.random((100, 100)).astype(numpy.float32) * 100
            data = vigra.taggedView(data, vigra.defaultAxistags('xy'))

            opExport.Inputs.resize(1)
            opExport.Inputs[0].setValue(data)

            sub_roi = [(10, 20), (90, 80)]
            opExport.RegionStart.setValue(sub_roi[0])
            opExport.RegionStop.setValue(sub_roi[1])

            opExport.ExportDtype.setValue(numpy.uint8)

            opExport.OutputFormat.setValue('hdf5')
            opExport.OutputFilenameFormat.setValue(
                '{dataset_dir}/{nickname}_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}'
            )
            opExport.OutputInternalPath.setValue('volume/data')

            assert opExport.ImageToExport.ready()
            assert opExport.ExportPath.ready()

            expected_path = self._tmpdir + '/' + rawInfo.nickname + '_export_x10-90_y20-80.h5/volume/data'
            computed_path = opExport.ExportPath.value
            assert os.path.normpath(computed_path) == os.path.normpath(expected_path), \
                "Expected {}\nGot: {}".format( expected_path, computed_path )
            opExport.run_export()
        finally:
            opExport.cleanUp()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(computed_path)

            # Compare with the correct subregion and convert dtype.
            expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
            expected_data = expected_data.astype(numpy.uint8)
            read_data = opRead.Output[:].wait()
            assert (read_data == expected_data
                    ).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 20
0
    def testBasic(self, tmp_h5_single_dataset: Path):
        graph = Graph()
        opExport = OpDataExport(graph=graph)
        try:
            opExport.TransactionSlot.setValue(True)
            opExport.WorkingDirectory.setValue(self._tmpdir)

            rawInfo = FilesystemDatasetInfo(filePath=str(
                tmp_h5_single_dataset / "test_group/test_data"),
                                            nickname="test_nickname")

            opExport.RawDatasetInfo.setValue(rawInfo)

            opExport.SelectionNames.setValue(["Mock Export Data"])

            data = numpy.random.random((100, 100)).astype(numpy.float32) * 100
            data = vigra.taggedView(data, vigra.defaultAxistags("xy"))

            opExport.Inputs.resize(1)
            opExport.Inputs[0].setValue(data)

            sub_roi = [(10, 20), (90, 80)]
            opExport.RegionStart.setValue(sub_roi[0])
            opExport.RegionStop.setValue(sub_roi[1])

            opExport.ExportDtype.setValue(numpy.uint8)

            opExport.OutputFormat.setValue("hdf5")
            opExport.OutputFilenameFormat.setValue(
                "{dataset_dir}/{nickname}_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}"
            )
            opExport.OutputInternalPath.setValue("volume/data")

            assert opExport.ImageToExport.ready()
            assert opExport.ExportPath.ready()

            expected_path = tmp_h5_single_dataset.parent.joinpath(
                rawInfo.nickname +
                "_export_x10-90_y20-80.h5/volume/data").as_posix()
            computed_path = opExport.ExportPath.value
            assert os.path.normpath(computed_path) == os.path.normpath(
                expected_path)
            opExport.run_export()
        finally:
            opExport.cleanUp()

        opRead = OpInputDataReader(graph=graph)
        try:
            opRead.FilePath.setValue(computed_path)

            # Compare with the correct subregion and convert dtype.
            expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
            expected_data = expected_data.astype(numpy.uint8)
            read_data = opRead.Output[:].wait()
            assert (read_data == expected_data
                    ).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
    def testBasic(self):
        graph = Graph()
        opExport = OpFormattedDataExport(graph=graph)
        
        data = numpy.random.random( (100,100) ).astype( numpy.float32 ) * 100
        data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
        opExport.Input.setValue(data)

        sub_roi = [(10, 0), (None, 80)]
        opExport.RegionStart.setValue( sub_roi[0] )
        opExport.RegionStop.setValue( sub_roi[1] )
        
        opExport.ExportDtype.setValue( numpy.uint8 )

        opExport.InputMin.setValue( 0.0 )
        opExport.InputMax.setValue( 100.0 )
        opExport.ExportMin.setValue( 100 )
        opExport.ExportMax.setValue( 200 )
        
        opExport.OutputFormat.setValue( 'hdf5' )
        opExport.OutputFilenameFormat.setValue( self._tmpdir + '/export_x{x_start}-{x_stop}_y{y_start}-{y_stop}' )
        opExport.OutputInternalPath.setValue('volume/data')
        
        opExport.TransactionSlot.setValue( True )

        assert opExport.ImageToExport.ready()
        assert opExport.ExportPath.ready()
        assert opExport.ImageToExport.meta.drange == (100,200)
        
        #print "exporting data to: {}".format( opExport.ExportPath.value )
        assert opExport.ExportPath.value == self._tmpdir + '/' + 'export_x10-100_y0-80.h5/volume/data'
        opExport.run_export()
        
        opRead = OpInputDataReader( graph=graph )
        try:
            opRead.FilePath.setValue( opExport.ExportPath.value )
    
            # Compare with the correct subregion and convert dtype.
            sub_roi[1] = (100, 80) # Replace 'None' with full extent
            expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
            expected_data = expected_data.astype(numpy.uint8)
            expected_data += 100 # see renormalization settings
    
            assert opRead.Output.meta.shape == expected_data.shape
            assert opRead.Output.meta.dtype == expected_data.dtype
            read_data = opRead.Output[:].wait()
    
            # Due to rounding errors, the actual result and the expected result may differ by 1
            #  e.g. if the original pixel value was 32.99999999
            # Also, must promote to signed values to avoid unsigned rollover
            # See issue ( https://github.com/ilastik/lazyflow/issues/165 ).
            expected_data_signed = expected_data.astype(numpy.int16)
            read_data_signed = expected_data.astype(numpy.int16)
            difference_from_expected = expected_data_signed - read_data_signed
            assert (numpy.abs(difference_from_expected) <= 1).all(), "Read data didn't match exported data!"
        finally:
            opRead.cleanUp()
Exemplo n.º 22
0
    def setupOutputs(self):
        if self._opReader is not None:
            self.Output.disconnect()
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None

        try:
            # Configure the reader
            dataReady = True
            self._opReader = OpInputDataReader(parent=self)
            self._opReader.WorkingDirectory.setValue(
                self.WorkingDirectory.value)
            self._opReader.FilePath.setValue(self.DatasetPath.value)

            # Since most file formats don't save meta-info,
            # The reader output's axis order may be incorrect.
            # (For example, if we export in npy format with zxy order,
            #  the Npy reader op will simply assume xyz order when it reads the data.)

            # Force the metadata back to the correct state by copying select items from Input.meta
            metadata = {}
            metadata['axistags'] = self.Input.meta.axistags
            metadata['drange'] = self.Input.meta.drange
            metadata['display_mode'] = self.Input.meta.display_mode
            self._opMetadataInjector = OpMetadataInjector(parent=self)
            self._opMetadataInjector.Input.connect(self._opReader.Output)
            self._opMetadataInjector.Metadata.setValue(metadata)

            dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect(self._opMetadataInjector.Output)
            else:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
                self._opReader.cleanUp()
                self._opReader = None
                self.Output.meta.NOTREADY = True

        #except OpInputDataReader.DatasetReadError:
        except Exception as ex:
            #logger.debug( "On-disk image can't be read: {}".format(ex) )
            # Note: If the data is exported as a 'sequence', then this will always be NOTREADY
            #       because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
            #       That's okay because a stack is probably too slow to be of use for a preview anyway.
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None
            # The dataset doesn't exist yet.
            self.Output.meta.NOTREADY = True
Exemplo n.º 23
0
 def testBasic(self):
     graph = Graph()
     opExport = OpDataExport(graph=graph)
     try:
         opExport.TransactionSlot.setValue(True)        
         opExport.WorkingDirectory.setValue( self._tmpdir )
         
         # Simulate the important fields of a DatasetInfo object
         class MockDatasetInfo(object): pass
         rawInfo = MockDatasetInfo()
         rawInfo.nickname = 'test_nickname'
         rawInfo.filePath = './somefile.h5'
         opExport.RawDatasetInfo.setValue( rawInfo )
 
         opExport.SelectionNames.setValue(['Mock Export Data'])
 
         data = numpy.random.random( (100,100) ).astype( numpy.float32 ) * 100
         data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
         
         opExport.Inputs.resize(1)
         opExport.Inputs[0].setValue(data)
 
         sub_roi = [(10, 20), (90, 80)]
         opExport.RegionStart.setValue( sub_roi[0] )
         opExport.RegionStop.setValue( sub_roi[1] )
         
         opExport.ExportDtype.setValue( numpy.uint8 )
         
         opExport.OutputFormat.setValue( 'hdf5' )
         opExport.OutputFilenameFormat.setValue( '{dataset_dir}/{nickname}_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}' )
         opExport.OutputInternalPath.setValue('volume/data')
 
         assert opExport.ImageToExport.ready()
         assert opExport.ExportPath.ready()
         
         expected_path = self._tmpdir + '/' + rawInfo.nickname + '_export_x10-90_y20-80.h5/volume/data'
         computed_path = opExport.ExportPath.value
         assert os.path.normpath(computed_path) == os.path.normpath(expected_path), \
             "Expected {}\nGot: {}".format( expected_path, computed_path )
         opExport.run_export()
     finally:
         opExport.cleanUp()
     
     opRead = OpInputDataReader( graph=graph )
     try:
         opRead.FilePath.setValue( computed_path )
 
         # Compare with the correct subregion and convert dtype.
         expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
         expected_data = expected_data.astype(numpy.uint8)
         read_data = opRead.Output[:].wait()
         assert (read_data == expected_data).all(), "Read data didn't match exported data!"
     finally:
         opRead.cleanUp()
Exemplo n.º 24
0
        def handleImportLabelsAction():
            # Find the directory of the most recently opened image file
            mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )
            if mostRecentImageFile is not None:
                defaultDirectory = os.path.split(mostRecentImageFile)[0]
            else:
                defaultDirectory = os.path.expanduser('~')
            fileNames = DataSelectionGui.getImageFileNamesToOpen(self, defaultDirectory)
            fileNames = list(map(str, fileNames))
            
            # For now, we require a single hdf5 file
            if len(fileNames) > 1:
                QMessageBox.critical(self, "Too many files", 
                                     "Labels must be contained in a single hdf5 volume.")
                return
            if len(fileNames) == 0:
                # user cancelled
                return
            
            file_path = fileNames[0]
            internal_paths = DataSelectionGui.getPossibleInternalPaths(file_path)
            if len(internal_paths) == 0:
                QMessageBox.critical(self, "No volumes in file", 
                                     "Couldn't find a suitable dataset in your hdf5 file.")
                return
            if len(internal_paths) == 1:
                internal_path = internal_paths[0]
            else:
                dlg = H5VolumeSelectionDlg(internal_paths, self)
                if dlg.exec_() == QDialog.Rejected:
                    return
                selected_index = dlg.combo.currentIndex()
                internal_path = str(internal_paths[selected_index])

            path_components = PathComponents(file_path)
            path_components.internalPath = str(internal_path)
            
            try:
                top_op = self.topLevelOperatorView
                opReader = OpInputDataReader(parent=top_op.parent)
                opReader.FilePath.setValue( path_components.totalPath() )
                
                # Reorder the axes
                op5 = OpReorderAxes(parent=top_op.parent)
                op5.AxisOrder.setValue( top_op.LabelInputs.meta.getAxisKeys() )
                op5.Input.connect( opReader.Output )
            
                # Finally, import the labels
                top_op.importLabels( top_op.current_view_index(), op5.Output )
                    
            finally:
                op5.cleanUp()
                opReader.cleanUp()
Exemplo n.º 25
0
        def handleImportLabelsAction():
            fileNames = ImageFileDialog(
                self,
                preferences_group="DataSelection",
                preferences_setting="recent image").getSelectedPaths()
            fileNames = list(map(str, fileNames))

            # For now, we require a single hdf5 file
            if len(fileNames) > 1:
                QMessageBox.critical(
                    self, "Too many files",
                    "Labels must be contained in a single hdf5 volume.")
                return
            if len(fileNames) == 0:
                # user cancelled
                return

            file_path = fileNames[0]
            internal_paths = DatasetInfo.getPossibleInternalPathsFor(file_path)
            if len(internal_paths) == 0:
                QMessageBox.critical(
                    self, "No volumes in file",
                    "Couldn't find a suitable dataset in your hdf5 file.")
                return
            if len(internal_paths) == 1:
                internal_path = internal_paths[0]
            else:
                dlg = SubvolumeSelectionDlg(internal_paths, self)
                if dlg.exec_() == QDialog.Rejected:
                    return
                selected_index = dlg.combo.currentIndex()
                internal_path = str(internal_paths[selected_index])

            path_components = PathComponents(file_path)
            path_components.internalPath = str(internal_path)

            try:
                top_op = self.topLevelOperatorView
                opReader = OpInputDataReader(parent=top_op.parent)
                opReader.FilePath.setValue(path_components.totalPath())

                # Reorder the axes
                op5 = OpReorderAxes(parent=top_op.parent)
                op5.AxisOrder.setValue(top_op.LabelInputs.meta.getAxisKeys())
                op5.Input.connect(opReader.Output)

                # Finally, import the labels
                top_op.importLabels(top_op.current_view_index(), op5.Output)

            finally:
                op5.cleanUp()
                opReader.cleanUp()
Exemplo n.º 26
0
    def test_npy_with_roi(self):
        a = numpy.indices((100, 100, 200)).astype(numpy.uint8).sum(0)
        assert a.shape == (100, 100, 200)
        numpy.save(self.testNpyDataFileName, a)
        opReader = OpInputDataReader(graph=lazyflow.graph.Graph())
        try:
            opReader.FilePath.setValue(self.testNpyDataFileName)
            opReader.SubVolumeRoi.setValue(((10, 20, 30), (50, 70, 90)))

            all_data = opReader.Output[:].wait()
            assert all_data.shape == (40, 50, 60)
            assert (all_data == a[10:50, 20:70, 30:90]).all()
        finally:
            opReader.cleanUp()
Exemplo n.º 27
0
    def test_npy_with_roi(self):
        a = numpy.indices((100, 100, 200)).astype(numpy.uint8).sum(0)
        assert a.shape == (100, 100, 200)
        numpy.save(self.testNpyDataFileName, a)
        opReader = OpInputDataReader(graph=lazyflow.graph.Graph())
        try:
            opReader.FilePath.setValue(self.testNpyDataFileName)
            opReader.SubVolumeRoi.setValue(((10, 20, 30), (50, 70, 90)))

            all_data = opReader.Output[:].wait()
            assert all_data.shape == (40, 50, 60)
            assert (all_data == a[10:50, 20:70, 30:90]).all()
        finally:
            opReader.cleanUp()
Exemplo n.º 28
0
    def testBasic_Dvid(self):
        if _skip_dvid:
            raise nose.SkipTest

        # Spin up a mock dvid server to test with.
        dvid_dataset, data_uuid, data_name = "datasetA", "abcde", "indices_data"
        mockserver_data_file = self._tmpdir + '/mockserver_data.h5'
        with H5MockServerDataFile(mockserver_data_file) as test_h5file:
            test_h5file.add_node(dvid_dataset, data_uuid)
        server_proc, shutdown_event = H5MockServer.create_and_start(
            mockserver_data_file,
            "localhost",
            8000,
            same_process=False,
            disable_server_logging=True)

        try:
            data = 255 * numpy.random.random((100, 100, 4))
            data = data.astype(numpy.uint8)
            data = vigra.taggedView(data, vigra.defaultAxistags('xyc'))

            graph = Graph()

            opPiper = OpArrayPiper(graph=graph)
            opPiper.Input.setValue(data)

            opExport = OpExportSlot(graph=graph)
            opExport.Input.connect(opPiper.Output)
            opExport.OutputFormat.setValue('dvid')
            url = 'http://localhost:8000/api/node/{data_uuid}/{data_name}'.format(
                **locals())
            opExport.OutputFilenameFormat.setValue(url)

            assert opExport.ExportPath.ready()
            assert opExport.ExportPath.value == url
            opExport.run_export()

            try:
                opRead = OpInputDataReader(graph=graph)
                opRead.FilePath.setValue(opExport.ExportPath.value)
                expected_data = data.view(numpy.ndarray)
                read_data = opRead.Output[:].wait()
                assert (read_data == expected_data
                        ).all(), "Read data didn't match exported data!"
            finally:
                opRead.cleanUp()
        finally:
            shutdown_event.set()
            server_proc.join()
Exemplo n.º 29
0
 def aaaNumpyFile(self):
     g =graph.Graph()
     npfile = "/home/akreshuk/data/synapse_small_4d.npy"
     reader = OpInputDataReader(graph=g)
     reader.FilePath.setValue(npfile)
     #out = reader.Output[:].wait()
     #print out.shape
     
     opFeatures = OpPixelFeaturesPresmoothed(graph=g)
     opFeatures.Scales.setValue(self.scales)
     opFeatures.FeatureIds.setValue(self.featureIds)
     opFeatures.Input.connect(reader.Output)
     opFeatures.Matrix.setValue(self.selectedFeatures[5])
     out = opFeatures.Output[:].wait()
     print out.shape
     
     opFeaturesInterp = OpPixelFeaturesInterpPresmoothed(graph=g)
     opFeaturesInterp.Scales.setValue(self.scales)
     opFeaturesInterp.FeatureIds.setValue(self.featureIds)
     opFeaturesInterp.Input.connect(reader.Output)
     opFeaturesInterp.Matrix.setValue(self.selectedFeatures[5])
     opFeaturesInterp.InterpolationScaleZ.setValue(2)
     out = opFeaturesInterp.Output[:].wait()
     
     print out.shape
    def setupOutputs(self):
        datasetInfo = self.Dataset.value
        internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId

        # Data only comes from the project file if the user said so AND it exists in the project
        datasetInProject = (datasetInfo.location == DatasetInfo.Location.ProjectInternal)
        datasetInProject &= self.ProjectFile.connected() and \
                            internalPath in self.ProjectFile.value

        if self._opReader is not None:
            self.Image.disconnect()
            self._opReader.cleanUp()
        
        # If we should find the data in the project file, use a dataset reader
        if datasetInProject:
            self._opReader = OpStreamingHdf5Reader(parent=self)
            self._opReader.Hdf5File.setValue(self.ProjectFile.value)
            self._opReader.InternalPath.setValue(internalPath)
            providerSlot = self._opReader.OutputImage
        else:
            # Use a normal (filesystem) reader
            self._opReader = OpInputDataReader(parent=self)
            if datasetInfo.axisorder is not None:
                self._opReader.DefaultAxisOrder.setValue( datasetInfo.axisorder )
            self._opReader.WorkingDirectory.connect( self.WorkingDirectory )
            self._opReader.FilePath.setValue(datasetInfo.filePath)
            providerSlot = self._opReader.Output        
        
        # Connect our external outputs to the internal operators we chose
        self.Image.connect(providerSlot)
        
        # Set the image name and usage flag
        self.AllowLabels.setValue( datasetInfo.allowLabels )
        self.ImageName.setValue(datasetInfo.filePath)
Exemplo n.º 31
0
def convert_predictions_to_uncertainties( input_path, parsed_export_args ):
    """
    Read exported pixel predictions and calculate/export the uncertainties.
    
    input_path: The path to the prediction output file. If hdf5, must include the internal dataset name.
    parsed_export_args: The already-parsed cmd-line arguments generated from a DataExportApplet-compatible ArgumentParser.
    """
    graph = Graph()
    opReader = OpInputDataReader(graph=graph)
    opReader.WorkingDirectory.setValue( os.getcwd() )
    opReader.FilePath.setValue(input_path)
    
    opUncertainty = OpEnsembleMargin( graph=graph )
    opUncertainty.Input.connect( opReader.Output )
        
    opExport = OpFormattedDataExport( graph=graph )
    opExport.Input.connect( opUncertainty.Output )

    # Apply command-line arguments.
    DataExportApplet._configure_operator_with_parsed_args(parsed_export_args, opExport)

    last_progress = [-1]
    def print_progress(progress_percent):
        if progress_percent != last_progress[0]:
            last_progress[0] = progress_percent
            sys.stdout.write( " {}".format(progress_percent) )
    
    print "Exporting results to : {}".format( opExport.ExportPath.value )    
    sys.stdout.write("Progress:")
    opExport.progressSignal.subscribe(print_progress)

    # Begin export
    opExport.run_export()
    sys.stdout.write("\n")
    print "DONE."
Exemplo n.º 32
0
    def setupOutputs(self):
        if self._opReader is not None:
            self.Output.disconnect()
            self._opReader.cleanUp()

        self._opDummyData.Input.connect(self.Input)

        dataReady = True
        try:
            # Configure the reader (shouldn't need to specify axis order; should be provided in data)
            self._opReader = OpInputDataReader(parent=self)
            self._opReader.WorkingDirectory.setValue(
                self.WorkingDirectory.value)
            self._opReader.FilePath.setValue(self.DatasetPath.value)

            dataReady &= self._opReader.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opReader.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect(self._opReader.Output)

        except OpInputDataReader.DatasetReadError:
            # The dataset doesn't exist yet.
            dataReady = False

        if not dataReady:
            # The dataset isn't ready.
            # That's okay, we'll just return dummy data.
            self._opReader = None
            self.Output.connect(self._opDummyData.Output)
Exemplo n.º 33
0
    def compare_results(self, opReaderResult, compare_path, input_axes, post_process=None,
                        max_mse=None, max_part_uneqaul=None):
        if os.path.exists(compare_path):
            result = opReaderResult.Output[:].wait()

            opReaderCompare = OpInputDataReader(graph=Graph())
            opReaderCompare.FilePath.setValue(compare_path)
            opReorderCompare = OpReorderAxes(parent=opReaderCompare)
            opReorderCompare.Input.connect(opReaderCompare.Output)
            opReorderCompare.AxisOrder.setValue(input_axes)

            compare = opReorderCompare.Output[:].wait()

            assert result.shape == compare.shape, (result.shape, compare.shape)

            if post_process:
                result = post_process(result)
                compare = post_process(compare)

            # for easy debugging:
            # -----------------------------------------------------------------
            # import matplotlib.pyplot as plt

            # res = result.squeeze()
            # comp = compare.squeeze()
            # if len(res.shape) > 2:
            #     res = res.reshape(-1, max(res.shape))
            #     comp = comp.reshape(-1, max(comp.shape))

            # plt.figure()
            # plt.imshow(res)
            # plt.title(f'res {result.shape}')
            # plt.colorbar()
            # plt.figure()
            # plt.imshow(comp)
            # plt.title('comp')
            # plt.colorbar()
            # plt.figure()
            # plt.imshow(res - comp)
            # plt.title('diff')
            # plt.colorbar()
            # plt.show()
            # -----------------------------------------------------------------

            if max_mse:
                assert max_mse > numpy.mean(numpy.square(result - compare)), \
                    numpy.mean(numpy.square(result - compare))
            elif max_part_uneqaul:
                assert max_part_uneqaul > numpy.mean(~numpy.isclose(result, compare)), \
                    numpy.mean(~numpy.isclose(result, compare))
            else:
                assert numpy.allclose(result, compare), f'{result.shape}, {compare.shape}'
        else:
            writer = OpFormattedDataExport(graph=Graph())
            writer.Input.connect(opReaderResult.Output)

            writer.OutputFilenameFormat.setValue(compare_path)
            writer.TransactionSlot.setValue(True)
            writer.run_export()
            warnings.warn(f'created comparison data: {compare_path} with axis order {input_axes}')
Exemplo n.º 34
0
    def create_input(cls,
                     filepath,
                     input_axes,
                     outmin=None,
                     outmax=None,
                     dtype=None):
        """ Creates a file from the data at 'filepath' that has 'input_axes' """
        basename = os.path.basename(filepath)
        reader = OpInputDataReader(graph=Graph())
        assert os.path.exists(filepath), "{} not found".format(filepath)
        reader.FilePath.setValue(os.path.abspath(filepath))

        writer = OpFormattedDataExport(parent=reader)
        if outmin is not None:
            writer.ExportMin.setValue(outmin)
        if outmax is not None:
            writer.ExportMax.setValue(outmax)
        if dtype is not None:
            writer.ExportDtype.setValue(dtype)

        writer.OutputAxisOrder.setValue(input_axes)
        writer.Input.connect(reader.Output)
        writer.OutputFilenameFormat.setValue(
            os.path.join(cls.dir,
                         basename.split(".")[0] + "_" + input_axes))
        writer.TransactionSlot.setValue(True)
        input_path = writer.ExportPath.value
        writer.run_export()

        return input_path
Exemplo n.º 35
0
 def get_non_transposed_provider_slot(
         self,
         parent: Optional[Operator] = None,
         graph: Optional[Graph] = None) -> OutputSlot:
     op_reader = OpInputDataReader(parent=parent,
                                   graph=graph,
                                   FilePath=self.url)
     return op_reader.Output
Exemplo n.º 36
0
    def testCreateExportDirectory(self):
        """
        Test that the batch operator can create the export directory if it doesn't exist yet.
        """
        # Start by writing some test data to disk.
        self.testData = numpy.random.random((1, 10, 10, 10, 1))
        numpy.save(self.testDataFileName, self.testData)

        cwd = os.getcwd()
        info = DatasetInfo()
        info.filePath = os.path.join(cwd, 'NpyTestData.npy')

        graph = Graph()
        opBatchIo = OpBatchIo(graph=graph)
        opInput = OpInputDataReader(graph=graph)
        opInput.FilePath.setValue(info.filePath)

        # Our test "processing pipeline" is just a smoothing operator.
        opSmooth = OpGaussianSmoothing(graph=graph)
        opSmooth.Input.connect(opInput.Output)
        opSmooth.sigma.setValue(3.0)

        exportDir = os.path.join(cwd, 'exported_data')
        opBatchIo.ExportDirectory.setValue(exportDir)
        opBatchIo.Suffix.setValue('_smoothed')
        opBatchIo.Format.setValue(ExportFormat.H5)
        opBatchIo.DatasetPath.setValue(info.filePath)

        internalPath = 'path/to/data'
        opBatchIo.InternalPath.setValue(internalPath)

        opBatchIo.ImageToExport.connect(opSmooth.Output)

        dirty = opBatchIo.Dirty.value
        assert dirty == True

        outputPath = opBatchIo.OutputDataPath.value
        assert outputPath == os.path.join(exportDir, 'NpyTestData_smoothed.h5',
                                          internalPath)

        result = opBatchIo.ExportResult.value
        assert result

        dirty = opBatchIo.Dirty.value
        assert dirty == False

        # Check the file
        smoothedPath = PathComponents(outputPath).externalPath
        with h5py.File(smoothedPath, 'r') as f:
            assert internalPath in f
            assert f[internalPath].shape == self.testData.shape
        try:
            os.remove(smoothedPath)
            os.rmdir(exportDir)
        except:
            pass
Exemplo n.º 37
0
    def testBasic_Dvid(self):
        if _skip_dvid:
            raise nose.SkipTest
        
        # Spin up a mock dvid server to test with.
        dvid_dataset, data_uuid, data_name = "datasetA", "abcde", "indices_data"
        mockserver_data_file = self._tmpdir + '/mockserver_data.h5'
        with H5MockServerDataFile( mockserver_data_file ) as test_h5file:
            test_h5file.add_node( dvid_dataset, data_uuid )
        server_proc, shutdown_event = H5MockServer.create_and_start( mockserver_data_file, "localhost", 8000,
                                                                     same_process=False, disable_server_logging=True )

        try:            
            data = 255 * numpy.random.random( (100,100, 4) )
            data = data.astype( numpy.uint8 )
            data = vigra.taggedView( data, vigra.defaultAxistags('xyc') )
            
            graph = Graph()
            
            opPiper = OpArrayPiper(graph=graph)
            opPiper.Input.setValue( data )
            
            opExport = OpExportSlot(graph=graph)
            opExport.Input.connect( opPiper.Output )
            opExport.OutputFormat.setValue( 'dvid' )
            url = 'http://localhost:8000/api/node/{data_uuid}/{data_name}'.format( **locals() )
            opExport.OutputFilenameFormat.setValue( url )
            
            assert opExport.ExportPath.ready()
            assert opExport.ExportPath.value == url
            opExport.run_export()

            try:
                opRead = OpInputDataReader( graph=graph )
                opRead.FilePath.setValue( opExport.ExportPath.value )
                expected_data = data.view(numpy.ndarray)
                read_data = opRead.Output[:].wait()
                assert (read_data == expected_data).all(), "Read data didn't match exported data!"
            finally:
                opRead.cleanUp()
        finally:
            shutdown_event.set()
            server_proc.join()
Exemplo n.º 38
0
    def __init__(
        self,
        *,
        filePath: str,
        project_file: h5py.File = None,
        sequence_axis: str = None,
        nickname: str = "",
        drange: Tuple[Number, Number] = None,
        **info_kwargs,
    ):
        """
        sequence_axis: Axis along which to stack (only applicable for stacks).
        """
        self.sequence_axis = sequence_axis
        self.base_dir = str(Path(project_file.filename).absolute().parent
                            ) if project_file else os.getcwd()
        assert os.path.isabs(
            self.base_dir
        )  # FIXME: if file_project was opened as a relative path, this would break
        self.expanded_paths = self.expand_path(filePath, cwd=self.base_dir)
        assert len(self.expanded_paths) == 1 or self.sequence_axis
        if len({PathComponents(ep).extension
                for ep in self.expanded_paths}) > 1:
            raise Exception(
                f"Multiple extensions unsupported as a single data source: {self.expanded_paths}"
            )
        self.filePath = os.path.pathsep.join(self.expanded_paths)

        op_reader = OpInputDataReader(graph=Graph(),
                                      WorkingDirectory=self.base_dir,
                                      FilePath=self.filePath,
                                      SequenceAxis=self.sequence_axis)
        meta = op_reader.Output.meta.copy()
        op_reader.cleanUp()

        super().__init__(
            default_tags=meta.axistags,
            nickname=nickname or self.create_nickname(self.expanded_paths),
            laneShape=meta.shape,
            laneDtype=meta.dtype,
            drange=drange or meta.get("drange"),
            **info_kwargs,
        )
Exemplo n.º 39
0
    def testBasic(self):
        data = numpy.random.random( (100,100) ).astype( numpy.float32 )
        data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
        
        graph = Graph()
        opWriter = OpNpyWriter(graph=graph)
        opWriter.Input.setValue(data)
        opWriter.Filepath.setValue( self._tmpdir + '/npy_writer_test_output.npy' )

        # Write it...
        opWriter.write()
        
        opRead = OpInputDataReader( graph=graph )
        opRead.FilePath.setValue( opWriter.Filepath.value )
        expected_data = data.view(numpy.ndarray)
        read_data = opRead.Output[:].wait()
        assert (read_data == expected_data).all(), "Read data didn't match exported data!"
        
        opRead.cleanUp()
Exemplo n.º 40
0
    def _test_counting(self, dims, input_axes):
        project = f"CellDensityCounting{dims}.ilp"
        try:
            self.untested_projects.remove(project)
        except ValueError:
            pass

        project_file = os.path.join(self.PROJECT_FILE_BASE, project)

        if not os.path.exists(project_file):
            raise IOError('project file "{}" not found'.format(project_file))

        compare_path = os.path.join(self.dir, f"Counts{dims}_compare")
        output_path = compare_path.replace("_compare", f"_{input_axes}_result")

        args = []
        args.append("--headless")
        args.append("--project=" + project_file)

        # Batch export options
        # If we were actually launching from the command line, 'png sequence'
        # would be in quotes...
        # args.append('--output_format=png sequence')
        args.append("--export_source=Probabilities")
        args.append("--output_filename_format=" + output_path)
        args.append("--output_format=hdf5")
        args.append("--export_dtype=float32")
        args.append("--pipeline_result_drange=(0,1)")
        args.append("--export_drange=(0,1)")

        # Input args
        args.append("--input_axes={}".format(input_axes))
        input_source_path1 = os.path.join(self.PROJECT_FILE_BASE, "inputdata",
                                          "{}.h5".format(dims))
        input_path1 = self.create_input(input_source_path1, input_axes)
        args.append("--raw_data=" + input_path1)

        # Clear the existing commandline args so it looks like we're starting fresh.
        sys.argv = ["ilastik.py"]
        sys.argv += args

        # Start up the ilastik.py entry script as if we had launched it from the command line
        # This will execute the batch mode script
        self.ilastik_startup.main()

        output_path += ".h5"
        compare_path += ".h5"

        opReaderResult = OpInputDataReader(graph=Graph())
        opReaderResult.FilePath.setValue(output_path)

        self.compare_results(opReaderResult,
                             compare_path,
                             input_axes,
                             max_mse=0.001)
Exemplo n.º 41
0
    def _test_object_classification(self, dims, variant, input_axes):
        project = f'ObjectClassification{dims}_{variant}.ilp'
        try:
            self.untested_projects.remove(project)
        except ValueError:
            pass

        project_file = os.path.join(self.PROJECT_FILE_BASE, project)

        if not os.path.exists(project_file):
            raise IOError(f'project file "{project_file}" not found')

        compare_path = os.path.join(self.dir, f'Object_Predictions_{dims}_{variant}_compare')
        output_path = compare_path.replace('_compare', f'_{input_axes}_result')

        args = [
            '--headless',
            '--project', project_file,
            # Batch export options
            '--export_source', 'Object Predictions',
            '--output_filename_format', output_path,
            '--output_format', 'hdf5',
            '--export_dtype', 'uint8',
            '--pipeline_result_drange', '(0,255)',
            '--export_drange', '(0,255)',
            # Input args
            '--input_axes', input_axes,
            '--raw_data',
            self.create_input(os.path.join(self.PROJECT_FILE_BASE, 'inputdata', f'{dims}.h5'), input_axes)
        ]

        if variant == 'wPred':
            args.append('--prediction_maps')
            args.append(self.create_input(
                os.path.join(self.PROJECT_FILE_BASE, 'inputdata', f'{dims}_Probabilities.h5'),
                input_axes))
        elif variant == 'wSeg':
            args.append('--segmentation_image')
            args.append(self.create_input(
                os.path.join(self.PROJECT_FILE_BASE, 'inputdata', f'{dims}_Binary_Segmentation.h5'),
                input_axes))
        else:
            raise NotImplementedError(f'unknown variant: {variant}')

        # Start up the ilastik.py entry script as if we had launched it from the command line
        # This will execute the batch mode script
        sys.argv = ['ilastik.py'] + args
        self.ilastik_startup.main()
        output_path += '.h5'
        compare_path += '.h5'

        opReaderResult = OpInputDataReader(graph=Graph())
        opReaderResult.FilePath.setValue(output_path)

        self.compare_results(opReaderResult, compare_path, input_axes)
Exemplo n.º 42
0
 def __init__(self, *, url: str, nickname: str = "", **info_kwargs):
     self.url = url
     op_reader = OpInputDataReader(graph=Graph(), FilePath=self.url)
     meta = op_reader.Output.meta.copy()
     super().__init__(
         default_tags=meta.axistags,
         nickname=nickname or self.url.rstrip("/").split("/")[-1],
         laneShape=meta.shape,
         laneDtype=meta.dtype,
         **info_kwargs,
     )
Exemplo n.º 43
0
    def setupOutputs( self ):
        if self._opReader is not None:
            self.Output.disconnect()
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None

        try:
            # Configure the reader
            dataReady = True
            self._opReader = OpInputDataReader( parent=self )
            self._opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
            self._opReader.FilePath.setValue( self.DatasetPath.value )

            # Since most file formats don't save meta-info,
            # The reader output's axis order may be incorrect.
            # (For example, if we export in npy format with zxy order, 
            #  the Npy reader op will simply assume xyz order when it reads the data.)

            # Force the metadata back to the correct state by copying select items from Input.meta
            metadata = {}
            metadata['axistags'] = self.Input.meta.axistags
            metadata['drange'] = self.Input.meta.drange
            metadata['display_mode'] = self.Input.meta.display_mode
            self._opMetadataInjector = OpMetadataInjector( parent=self )
            self._opMetadataInjector.Input.connect( self._opReader.Output )
            self._opMetadataInjector.Metadata.setValue( metadata )

            dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect( self._opMetadataInjector.Output )
            else:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
                self._opReader.cleanUp()
                self._opReader = None
                self.Output.meta.NOTREADY = True

        #except OpInputDataReader.DatasetReadError:
        except Exception as ex:
            #logger.debug( "On-disk image can't be read: {}".format(ex) )
            # Note: If the data is exported as a 'sequence', then this will always be NOTREADY
            #       because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
            #       That's okay because a stack is probably too slow to be of use for a preview anyway.
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None
            # The dataset doesn't exist yet.
            self.Output.meta.NOTREADY = True
Exemplo n.º 44
0
    def test_npy(self):
        # Create Numpy test data
        a = numpy.zeros((10, 11))
        for x in range(0, 10):
            for y in range(0, 11):
                a[x, y] = x + y
        numpy.save(self.testNpyDataFileName, a)

        # Now read back our test data using an OpInputDataReader operator
        npyReader = OpInputDataReader(graph=self.graph)
        try:
            npyReader.FilePath.setValue(self.testNpyDataFileName)
            cwd = os.path.split(__file__)[0]
            npyReader.WorkingDirectory.setValue(cwd)

            # Read the entire NPY file and verify the contents
            npyData = npyReader.Output[:].wait()
            assert npyData.shape == (10, 11)
            for x in range(0, 10):
                for y in range(0, 11):
                    assert npyData[x, y] == x + y
        finally:
            npyReader.cleanUp()
Exemplo n.º 45
0
    def test_tiff_stack_multi_file(self, sequence_axis):
        """Test stack/sequence reading in hdf5-files"""
        shape = (4, 8, 16, 3)
        data = numpy.random.randint(0, 255, size=shape).astype(numpy.uint8)
        for idx, data_slice in enumerate(data):
            im = Image.fromarray(data_slice, mode="RGB")
            im.save(self.testmultiTiffFileName.format(index=idx))

        if sequence_axis == "c":
            data = numpy.concatenate(data, axis=-1)

        reader = OpInputDataReader(graph=self.graph)
        reader.SequenceAxis.setValue(sequence_axis)
        globString = self.testmultiTiffFileName.replace("02d}", "s}").format(index="*")
        try:
            reader.FilePath.setValue(globString)
            tiffdata = reader.Output[:].wait()

            assert tiffdata.shape == data.shape, f"{tiffdata.shape}, {data.shape}"
            numpy.testing.assert_array_equal(tiffdata, data)
        finally:
            # Call cleanUp() to close the file that this operator opened
            reader.cleanUp()
Exemplo n.º 46
0
    def test_npz(self):
        # Create two Numpy test arrays
        a = numpy.zeros((10, 11))
        for x in range(0, 10):
            for y in range(0, 11):
                a[x, y] = x + y

        b = numpy.arange((3 * 9)).reshape((3, 9))
        numpy.savez(self.testNpzDataFileName, a=a, b=b)

        # Now read back our test data using an OpInputDataReader operator
        npyReader = OpInputDataReader(graph=self.graph)

        try:
            for internalPath, referenceArray in zip(["a", "b"], [a, b]):
                npyReader.FilePath.setValue("{}/{}".format(self.testNpzDataFileName, internalPath))
                cwd = os.path.split(__file__)[0]
                npyReader.WorkingDirectory.setValue(cwd)

                npzData = npyReader.Output[:].wait()
                assert npzData.shape == referenceArray.shape
                numpy.testing.assert_array_equal(npzData, referenceArray)
        finally:
            npyReader.cleanUp()
Exemplo n.º 47
0
class OpImageOnDiskProvider(Operator):
    """
    This simply wraps a lazyflow OpInputDataReader, but ensures that the metadata 
    (axistags, drange) on the output matches the metadata from the original data 
    (even if the output file format doesn't support metadata fields).
    """
    TransactionSlot = InputSlot()
    Input = InputSlot() # Used for dtype and shape only. Data is always provided directly from the file.

    WorkingDirectory = InputSlot()
    DatasetPath = InputSlot() # A TOTAL path (possibly including a dataset name, e.g. myfile.h5/volume/data
    Dirty = InputSlot()
    
    Output = OutputSlot()

    def __init__(self, *args, **kwargs):
        super( OpImageOnDiskProvider, self ).__init__(*args, **kwargs)
        self._opReader = None
        self._opMetadataInjector = None
    
    # Block diagram:
    #
    # (Input.axistags, Input.drange)
    #                               \  
    # DatasetPath ---> opReader ---> opMetadataInjector --> Output
    #                 /
    # WorkingDirectory
    
    def setupOutputs( self ):
        if self._opReader is not None:
            self.Output.disconnect()
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None

        try:
            # Configure the reader
            dataReady = True
            self._opReader = OpInputDataReader( parent=self )
            self._opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
            self._opReader.FilePath.setValue( self.DatasetPath.value )

            # Since most file formats don't save meta-info,
            # The reader output's axis order may be incorrect.
            # (For example, if we export in npy format with zxy order, 
            #  the Npy reader op will simply assume xyz order when it reads the data.)

            # Force the metadata back to the correct state by copying select items from Input.meta
            metadata = {}
            metadata['axistags'] = self.Input.meta.axistags
            metadata['drange'] = self.Input.meta.drange
            self._opMetadataInjector = OpMetadataInjector( parent=self )
            self._opMetadataInjector.Input.connect( self._opReader.Output )
            self._opMetadataInjector.Metadata.setValue( metadata )

            dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect( self._opMetadataInjector.Output )
            else:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
                self._opReader.cleanUp()
                self._opReader = None
                self.Output.meta.NOTREADY = True

        except OpInputDataReader.DatasetReadError:
            # Note: If the data is exported as a 'sequence', then this will always be NOTREADY
            #       because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
            #       That's okay because a stack is probably too slow to be of use for a preview anyway.
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None
            # The dataset doesn't exist yet.
            self.Output.meta.NOTREADY = True

    def execute(self, slot, subindex, roi, result):
        assert False, "Output is supposed to be directly connected to an internal operator."

    def propagateDirty(self, slot, subindex, roi):
        if slot == self.Input:
            self.Output.setDirty( roi )
        else:
            self.Output.setDirty( slice(None) )
Exemplo n.º 48
0
 def getInternalDatasets(cls, filePath):
     return OpInputDataReader.getInternalDatasets( filePath )
Exemplo n.º 49
0
def generate_trained_project_file(
    new_project_path, raw_data_paths, label_data_paths, feature_selections, classifier_factory=None
):
    """
    Create a new project file from scratch, add the given raw data files,
    inject the corresponding labels, configure the given feature selections,
    and (if provided) override the classifier type ('factory').
    
    Finally, request the classifier object from the pipeline (which forces training),
    and save the project.
    
    new_project_path: Where to save the new project file
    raw_data_paths: A list of paths to the raw data images to train with
    label_data_paths: A list of paths to the label image data to train with
    feature_selections: A matrix of bool, representing the selected features
    classifier_factory: Override the classifier type.  Must be a subclass of either:
                        - lazyflow.classifiers.LazyflowVectorwiseClassifierFactoryABC
                        - lazyflow.classifiers.LazyflowPixelwiseClassifierFactoryABC
    """
    assert len(raw_data_paths) == len(label_data_paths), "Number of label images must match number of raw images."

    import ilastik_main
    from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
    from lazyflow.graph import Graph
    from lazyflow.operators.ioOperators import OpInputDataReader
    from lazyflow.roi import roiToSlice, roiFromShape

    ##
    ## CREATE PROJECT
    ##

    # Manually configure the arguments to ilastik, as if they were parsed from the command line.
    # (Start with empty args and fill in below.)
    ilastik_args = ilastik_main.parser.parse_args([])
    ilastik_args.new_project = new_project_path
    ilastik_args.headless = True
    ilastik_args.workflow = "Pixel Classification"

    shell = ilastik_main.main(ilastik_args)
    assert isinstance(shell.workflow, PixelClassificationWorkflow)

    ##
    ## CONFIGURE GRAYSCALE INPUT
    ##

    data_selection_applet = shell.workflow.dataSelectionApplet

    # To configure data selection, start with empty cmdline args and manually fill them in
    data_selection_args, _ = data_selection_applet.parse_known_cmdline_args([], PixelClassificationWorkflow.ROLE_NAMES)
    data_selection_args.raw_data = raw_data_paths
    data_selection_args.preconvert_stacks = True

    # Simplest thing here is to configure using cmd-line interface
    data_selection_applet.configure_operator_with_parsed_args(data_selection_args)

    ##
    ## APPLY FEATURE MATRIX (from matrix above)
    ##

    opFeatures = shell.workflow.featureSelectionApplet.topLevelOperator
    opFeatures.Scales.setValue(ScalesList)
    opFeatures.FeatureIds.setValue(FeatureIds)
    opFeatures.SelectionMatrix.setValue(feature_selections)

    ##
    ## CUSTOMIZE CLASSIFIER TYPE
    ##

    opPixelClassification = shell.workflow.pcApplet.topLevelOperator
    if classifier_factory is not None:
        opPixelClassification.ClassifierFactory.setValue(classifier_factory)

    ##
    ## READ/APPLY LABEL VOLUMES
    ##

    # Read each label volume and inject the label data into the appropriate training slot
    cwd = os.getcwd()
    max_label_class = 0
    for lane, label_data_path in enumerate(label_data_paths):
        graph = Graph()
        opReader = OpInputDataReader(graph=graph)
        try:
            opReader.WorkingDirectory.setValue(cwd)
            opReader.FilePath.setValue(label_data_path)

            print "Reading label volume: {}".format(label_data_path)
            label_volume = opReader.Output[:].wait()
        finally:
            opReader.cleanUp()

        raw_shape = opPixelClassification.InputImages[lane].meta.shape
        if label_volume.ndim != len(raw_shape):
            # Append a singleton channel axis
            assert label_volume.ndim == len(raw_shape) - 1
            label_volume = label_volume[..., None]

        # Auto-calculate the max label value
        max_label_class = max(max_label_class, label_volume.max())

        print "Applying label volume to lane #{}".format(lane)
        entire_volume_slicing = roiToSlice(*roiFromShape(label_volume.shape))
        opPixelClassification.LabelInputs[lane][entire_volume_slicing] = label_volume

    assert max_label_class > 1, "Not enough label classes were found in your label data."
    label_names = map(str, range(max_label_class))
    opPixelClassification.LabelNames.setValue(label_names)

    ##
    ## TRAIN CLASSIFIER
    ##

    # Make sure the caches in the pipeline are not 'frozen'.
    # (This is the equivalent of 'live update' mode in the GUI.)
    opPixelClassification.FreezePredictions.setValue(False)

    # Request the classifier object from the pipeline.
    # This forces the pipeline to produce (train) the classifier.
    _ = opPixelClassification.Classifier.value

    ##
    ## SAVE PROJECT
    ##

    # save project file (includes the new classifier).
    shell.projectManager.saveProject(force_all_save=False)
Exemplo n.º 50
0
        def impl():
            shell = self.shell
            workflow = shell.projectManager.workflow
            carvingApplet = workflow.carvingApplet
            gui = carvingApplet.getMultiLaneGui()
            op_carving = carvingApplet.topLevelOperator.getLane(0)

            # activate the carving applet
            shell.setSelectedAppletDrawer(2)
            # let the gui catch up
            QApplication.processEvents()
            self.waitForViews(gui.currentGui().editor.imageViews)
            # inject the labels
            op5 = OpReorderAxes(parent=op_carving.parent)
            opReader = OpInputDataReader(parent=op_carving.parent)
            try:
                opReader.FilePath.setValue(f"{self.reference_files['carving_label_file']}/exported_data")
                op5.AxisOrder.setValue(op_carving.WriteSeeds.meta.getAxisKeys())
                op5.Input.connect(opReader.Output)
                label_data = op5.Output[:].wait()
            finally:
                op5.cleanUp()
                opReader.cleanUp()
            slicing = roi.fullSlicing(label_data.shape)
            op_carving.WriteSeeds[slicing] = label_data

            gui.currentGui().labelingDrawerUi.segment.click()
            QApplication.processEvents()

            op_carving.saveObjectAs("Object 1")
            op_carving.deleteObject("<not saved yet>")

            # export the mesh:
            req = gui.currentGui()._exportMeshes(["Object 1"], [self.output_obj_file])
            req.wait()

            # compare meshes
            with open(self.output_obj_file, "r") as f:
                left = f.read()

            with open(self.reference_files["output_obj_file"], "r") as f:
                right = f.read()

            # TODO: might result in errors due to rounding on different systems
            assert left == right

            # export the completed segments layer
            layermatch = [
                x.name.startswith("Completed segments (unicolor)") for x in gui.currentGui().editor.layerStack
            ]
            assert sum(layermatch) == 1, "Completed segments (unicolor) Layer expected."
            completed_segments_layer = gui.currentGui().editor.layerStack[layermatch.index(True)]
            opExport = get_export_operator(completed_segments_layer)
            try:
                opExport.OutputFilenameFormat.setValue(self.output_file)
                opExport.run_export()
            finally:
                opExport.cleanUp()

            assert os.path.exists(self.output_file)

            # compare completed segments
            with h5py.File(self.reference_files["output_file"], "r") as f_left:
                data_left = f_left["exported_data"][:]

            with h5py.File(self.output_file, "r") as f_right:
                data_right = f_right["exported_data"][:]

            numpy.testing.assert_array_almost_equal(data_left, data_right)

            # Save the project
            saveThread = self.shell.onSaveProjectActionTriggered()
            saveThread.join()
Exemplo n.º 51
0
def import_labeling_layer(labelLayer, labelingSlots, parent_widget=None):
    """
    Prompt the user for layer import settings, and perform the layer import.
    :param labelLayer: The top label layer source
    :param labelingSlots: An instance of LabelingGui.LabelingSlots
    :param parent_widget: The Qt GUI parent object
    """
    writeSeeds = labelingSlots.labelInput
    assert isinstance(writeSeeds, lazyflow.graph.Slot), "slot is of type %r" % (type(writeSeeds))
    opLabels = writeSeeds.getRealOperator()
    assert isinstance(opLabels, lazyflow.graph.Operator), "slot's operator is of type %r" % (type(opLabels))


    recentlyImported = PreferencesManager().get('labeling', 'recently imported')
    mostRecentProjectPath = PreferencesManager().get('shell', 'recently opened')
    mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )
    if recentlyImported:
        defaultDirectory = os.path.split(recentlyImported)[0]
    elif mostRecentProjectPath:
        defaultDirectory = os.path.split(mostRecentProjectPath)[0]
    elif mostRecentImageFile:
        defaultDirectory = os.path.split(mostRecentImageFile)[0]
    else:
        defaultDirectory = os.path.expanduser('~')

    fileNames = DataSelectionGui.getImageFileNamesToOpen(parent_widget, defaultDirectory)
    fileNames = map(str, fileNames)

    if not fileNames:
        return

    PreferencesManager().set('labeling', 'recently imported', fileNames[0])

    try:
        # Initialize operators
        opImport = OpInputDataReader( parent=opLabels.parent )
        opCache = OpArrayCache( parent=opLabels.parent )
        opMetadataInjector = OpMetadataInjector( parent=opLabels.parent )
        opReorderAxes = OpReorderAxes( parent=opLabels.parent )
    
        # Set up the pipeline as follows:
        #
        #   opImport --> opCache --> opMetadataInjector --------> opReorderAxes --(inject via setInSlot)--> labelInput
        #                           /                            /
        #   User-specified axisorder    labelInput.meta.axistags
    
        opImport.WorkingDirectory.setValue(defaultDirectory)
        opImport.FilePath.setValue(fileNames[0] if len(fileNames) == 1 else
                                   os.path.pathsep.join(fileNames))
        assert opImport.Output.ready()
    
        opCache.blockShape.setValue( opImport.Output.meta.shape )
        opCache.Input.connect( opImport.Output )
        assert opCache.Output.ready()

        opMetadataInjector.Input.connect( opCache.Output )
        metadata = opCache.Output.meta.copy()
        opMetadataInjector.Metadata.setValue( metadata )
        opReorderAxes.Input.connect( opMetadataInjector.Output )

        # Transpose the axes for assignment to the labeling operator.
        opReorderAxes.AxisOrder.setValue( writeSeeds.meta.getAxisKeys() )
    
        # We'll show a little window with a busy indicator while the data is loading
        busy_dlg = QProgressDialog(parent=parent_widget)
        busy_dlg.setLabelText("Importing Label Data...")
        busy_dlg.setCancelButton(None)
        busy_dlg.setMinimum(100)
        busy_dlg.setMaximum(100)
        def close_busy_dlg(*args):
            QApplication.postEvent(busy_dlg, QCloseEvent())
    
        # Load the data from file into our cache
        # When it's done loading, close the progress dialog.
        req = opCache.Output[:]
        req.notify_finished( close_busy_dlg )
        req.notify_failed( close_busy_dlg )
        req.submit()
        busy_dlg.exec_()

        readData = req.result
        
        maxLabels = len(labelingSlots.labelNames.value)

        # Can't use return_counts feature because that requires numpy >= 1.9
        #unique_read_labels, readLabelCounts = numpy.unique(readData, return_counts=True)

        # This does the same as the above, albeit slower, and probably with more ram.
        unique_read_labels = numpy.unique(readData)
        readLabelCounts = vigra_bincount(readData)[unique_read_labels]

        labelInfo = (maxLabels, (unique_read_labels, readLabelCounts))
        del readData
    
        # Ask the user how to interpret the data.
        settingsDlg = LabelImportOptionsDlg( parent_widget,
                                             fileNames, opMetadataInjector.Output,
                                             labelingSlots.labelInput, labelInfo )

        def handle_updated_axes():
            # The user is specifying a new interpretation of the file's axes
            updated_axisorder = str(settingsDlg.axesEdit.text())
            metadata = opMetadataInjector.Metadata.value.copy()
            metadata.axistags = vigra.defaultAxistags(updated_axisorder)
            opMetadataInjector.Metadata.setValue( metadata )
            
            if opReorderAxes._invalid_axes:
                settingsDlg.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
                # Red background
                settingsDlg.axesEdit.setStyleSheet("QLineEdit { background: rgb(255, 128, 128);"
                                                   "selection-background-color: rgb(128, 128, 255); }")
        settingsDlg.axesEdit.editingFinished.connect( handle_updated_axes )
        
        # Initialize
        handle_updated_axes()

        dlg_result = settingsDlg.exec_()
        if dlg_result != LabelImportOptionsDlg.Accepted:
            return

        # Get user's chosen label mapping from dlg
        labelMapping = settingsDlg.labelMapping    

        # Get user's chosen offsets.
        # Offsets in dlg only include the file axes, not the 5D axes expected by the label input,
        # so expand them to full 5D 
        axes_5d = opReorderAxes.Output.meta.getAxisKeys()
        tagged_offsets = collections.OrderedDict( zip( axes_5d, [0]*len(axes_5d) ) )
        tagged_offsets.update( dict( zip( opMetadataInjector.Output.meta.getAxisKeys(), settingsDlg.imageOffsets ) ) )
        imageOffsets = tagged_offsets.values()

        # Optimization if mapping is identity
        if labelMapping.keys() == labelMapping.values():
            labelMapping = None

        # This will be fast (it's already cached)
        label_data = opReorderAxes.Output[:].wait()
        
        # Map input labels to output labels
        if labelMapping:
            # There are other ways to do a relabeling (e.g skimage.segmentation.relabel_sequential)
            # But this supports potentially huge values of unique_read_labels (in the billions),
            # without needing GB of RAM.
            mapping_indexes = numpy.searchsorted(unique_read_labels, label_data)
            new_labels = numpy.array([labelMapping[x] for x in unique_read_labels])
            label_data[:] = new_labels[mapping_indexes]

        label_roi = numpy.array( roiFromShape(opReorderAxes.Output.meta.shape) )
        label_roi += imageOffsets
        label_slice = roiToSlice(*label_roi)
        writeSeeds[label_slice] = label_data

    finally:
        opReorderAxes.cleanUp()
        opMetadataInjector.cleanUp()
        opCache.cleanUp()
        opImport.cleanUp()