Exemplo n.º 1
0
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity.Input.meta.has_mask = True
Exemplo n.º 2
0
class TestOpArrayPiper7(object):
    def setUp(self):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")

    def test1(self):
        # Explicitly set has_mask for the input
        self.operator_identity_1.Input.meta.has_mask = True
        self.operator_identity_1.Output.meta.has_mask = True

        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(
            data,
            mask=numpy.zeros(data.shape, dtype=bool),
            shrink=False
        )

        # Try to connect the compatible operators.
        self.operator_identity_2.Input.connect(self.operator_identity_1.Output)
        self.operator_identity_1.Input.setValue(data)
        output = self.operator_identity_2.Output[None].wait()

        assert((data == output).all())
        assert(data.mask.shape == output.mask.shape)
        assert((data.mask == output.mask).all())


    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(
            data,
            mask=numpy.zeros(data.shape, dtype=bool),
            shrink=False
        )

        # Try to connect the compatible operators.
        self.operator_identity_1.Input.setValue(data)
        self.operator_identity_2.Input.connect(self.operator_identity_1.Output)
        output = self.operator_identity_2.Output[None].wait()

        assert((data == output).all())
        assert(data.mask.shape == output.mask.shape)
        assert((data.mask == output.mask).all())

    def tearDown(self):
        # Take down operators
        self.operator_identity_2.Input.disconnect()
        self.operator_identity_2.Output.disconnect()
        self.operator_identity_2.cleanUp()
        self.operator_identity_1.Input.disconnect()
        self.operator_identity_1.Output.disconnect()
        self.operator_identity_1.cleanUp()
Exemplo n.º 3
0
    def setUp(self):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")
    def setUp(self):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)
        self.operator_identity.Input.allow_mask = False
        self.operator_identity.Output.allow_mask = False

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")
    def setUp(self):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")
Exemplo n.º 6
0
class TestOpArrayPiper4(object):
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)
        self.operator_identity.Input.allow_mask = False
        self.operator_identity.Output.allow_mask = False
        self.operator_identity.Input.meta.has_mask = False
        self.operator_identity.Output.meta.has_mask = False

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")

    @nose.tools.raises(AllowMaskException)
    def test1(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data, mask=numpy.zeros(data.shape, dtype=bool), shrink=False)

        # Provide input read all output.
        try:
            self.operator_identity.Input.setValue(data)
        except AssertionError as e:
            raise AllowMaskException(str(e))

    @nose.tools.raises(AllowMaskException)
    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data, mask=numpy.zeros(data.shape, dtype=bool), shrink=False)

        # Create array to store results. Don't keep original data.
        output = data.copy()
        output[:] = 0
        output[:] = numpy.ma.nomask

        # Provide input and grab chunks.
        try:
            self.operator_identity.Input.setValue(data)
        except AssertionError as e:
            raise AllowMaskException(str(e))

    @nose.tools.raises(AllowMaskException)
    def test3(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data, mask=numpy.zeros(data.shape, dtype=bool), shrink=False)

        # Provide input read all output.
        try:
            self.operator_identity.Input.setValue(numpy.zeros_like(data))
        except AssertionError as e:
            raise AllowMaskException(str(e))

    def teardown_method(self, method):
        # Take down operators
        self.operator_identity.Input.disconnect()
        self.operator_identity.Output.disconnect()
        self.operator_identity.cleanUp()
Exemplo n.º 7
0
class TestOpArrayPiper4(object):
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)
        self.operator_identity.Input.allow_mask = False
        self.operator_identity.Output.allow_mask = False
        self.operator_identity.Input.meta.has_mask = False
        self.operator_identity.Output.meta.has_mask = False

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")

    def test1(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data, mask=numpy.zeros(data.shape, dtype=bool), shrink=False)

        # Provide input read all output.
        with pytest.raises(AllowMaskException):
            try:
                self.operator_identity.Input.setValue(data)
            except AssertionError as e:
                raise AllowMaskException(str(e))

    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data, mask=numpy.zeros(data.shape, dtype=bool), shrink=False)

        # Create array to store results. Don't keep original data.
        output = data.copy()
        output[:] = 0
        output[:] = numpy.ma.nomask

        # Provide input and grab chunks.
        with pytest.raises(AllowMaskException):
            try:
                self.operator_identity.Input.setValue(data)
            except AssertionError as e:
                raise AllowMaskException(str(e))

    def test3(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data, mask=numpy.zeros(data.shape, dtype=bool), shrink=False)

        # Provide input read all output.
        with pytest.raises(AllowMaskException):
            try:
                self.operator_identity.Input.setValue(numpy.zeros_like(data))
            except AssertionError as e:
                raise AllowMaskException(str(e))

    def teardown_method(self, method):
        # Take down operators
        self.operator_identity.Input.disconnect()
        self.operator_identity.Output.disconnect()
        self.operator_identity.cleanUp()
Exemplo n.º 8
0
    def setUp(self):
        self.graph = Graph()

        self.operator_border = OpMaskArray(graph=self.graph)
        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_border.InputArray.meta.axistags = vigra.AxisTags("txyzc")

        self.operator_identity.Input.connect(self.operator_border.Output)
Exemplo n.º 9
0
class TestOpArrayPiper(object):
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")

    def test1(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)

        # Provide input read all output.
        self.operator_identity.Input.setValue(data)
        output = self.operator_identity.Output[None].wait()

        assert (data == output).all()

    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)

        # Create array to store results. Don't keep original data.
        output = data.copy()
        output[:] = 0

        # Provide input and grab chunks.
        self.operator_identity.Input.setValue(data)
        output[:2] = self.operator_identity.Output[:2].wait()
        output[2:] = self.operator_identity.Output[2:].wait()

        assert (data == output).all()

    def test3(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)

        # Provide input read all output.
        self.operator_identity.Input.setValue(numpy.zeros_like(data))
        output = self.operator_identity.Output[None].wait()

        assert (output == 0).all()

        # Try setInSlot
        data_shape_roi = roiFromShape(data.shape)
        data_shape_slice = roiToSlice(*data_shape_roi)
        self.operator_identity.Input[data_shape_slice] = data
        output = self.operator_identity.Output[None].wait()

        assert (data == output).all()

    def teardown_method(self, method):
        # Take down operators
        self.operator_identity.Input.disconnect()
        self.operator_identity.Output.disconnect()
        self.operator_identity.cleanUp()
Exemplo n.º 10
0
class TestOpArrayPiper(object):
    def setUp(self):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")

    def test1(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)

        # Provide input read all output.
        self.operator_identity.Input.setValue(data)
        output = self.operator_identity.Output[None].wait()

        assert((data == output).all())

    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)

        # Create array to store results. Don't keep original data.
        output = data.copy()
        output[:] = 0

        # Provide input and grab chunks.
        self.operator_identity.Input.setValue(data)
        output[:2] = self.operator_identity.Output[:2].wait()
        output[2:] = self.operator_identity.Output[2:].wait()

        assert((data == output).all())

    def test3(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)

        # Provide input read all output.
        self.operator_identity.Input.setValue(numpy.zeros_like(data))
        output = self.operator_identity.Output[None].wait()

        assert((output == 0).all())

        # Try setInSlot
        data_shape_roi = roiFromShape(data.shape)
        data_shape_slice = roiToSlice(*data_shape_roi)
        self.operator_identity.Input[data_shape_slice] = data
        output = self.operator_identity.Output[None].wait()

        assert((data == output).all())

    def tearDown(self):
        # Take down operators
        self.operator_identity.Input.disconnect()
        self.operator_identity.Output.disconnect()
        self.operator_identity.cleanUp()
Exemplo n.º 11
0
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2.Input.allow_mask = False
        self.operator_identity_2.Output.allow_mask = False

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")
Exemplo n.º 12
0
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2.Input.allow_mask = False
        self.operator_identity_2.Output.allow_mask = False

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")
Exemplo n.º 13
0
class TestOpArrayPiper7(object):
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")

    def test1(self):
        # Explicitly set has_mask for the input
        self.operator_identity_1.Input.meta.has_mask = True
        self.operator_identity_1.Output.meta.has_mask = True

        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data,
                                     mask=numpy.zeros(data.shape, dtype=bool),
                                     shrink=False)

        # Try to connect the compatible operators.
        self.operator_identity_2.Input.connect(self.operator_identity_1.Output)
        self.operator_identity_1.Input.setValue(data)
        output = self.operator_identity_2.Output[None].wait()

        assert (data == output).all()
        assert data.mask.shape == output.mask.shape
        assert (data.mask == output.mask).all()

    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data,
                                     mask=numpy.zeros(data.shape, dtype=bool),
                                     shrink=False)

        # Try to connect the compatible operators.
        self.operator_identity_1.Input.setValue(data)
        self.operator_identity_2.Input.connect(self.operator_identity_1.Output)
        output = self.operator_identity_2.Output[None].wait()

        assert (data == output).all()
        assert data.mask.shape == output.mask.shape
        assert (data.mask == output.mask).all()

    def teardown_method(self, method):
        # Take down operators
        self.operator_identity_2.Input.disconnect()
        self.operator_identity_2.Output.disconnect()
        self.operator_identity_2.cleanUp()
        self.operator_identity_1.Input.disconnect()
        self.operator_identity_1.Output.disconnect()
        self.operator_identity_1.cleanUp()
Exemplo n.º 14
0
class TestOpArrayPiper6(object):
    def setUp(self):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2.Input.allow_mask = False
        self.operator_identity_2.Output.allow_mask = False

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")

    @nose.tools.raises(AllowMaskException)
    def test1(self):
        # Explicitly set has_mask for the input
        self.operator_identity_1.Input.meta.has_mask = True
        self.operator_identity_1.Output.meta.has_mask = True

        # Try to connect the incompatible operators.
        try:
            self.operator_identity_2.Input.connect(self.operator_identity_1.Output)
        except AssertionError as e:
            raise AllowMaskException(str(e))

    @nose.tools.raises(AllowMaskException)
    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(
            data,
            mask=numpy.zeros(data.shape, dtype=bool),
            shrink=False
        )

        # Implicitly set has_mask for the input by setting the value.
        self.operator_identity_1.Input.setValue(data)

        # Try to connect the incompatible operators.
        try:
            self.operator_identity_2.Input.connect(self.operator_identity_1.Output)
        except AssertionError as e:
            raise AllowMaskException(str(e))

    def tearDown(self):
        # Take down operators
        self.operator_identity_2.Input.disconnect()
        self.operator_identity_2.Output.disconnect()
        self.operator_identity_2.cleanUp()
        self.operator_identity_1.Input.disconnect()
        self.operator_identity_1.Output.disconnect()
        self.operator_identity_1.cleanUp()
Exemplo n.º 15
0
class TestOpArrayPiper6(object):
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity_1 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2 = OpArrayPiper(graph=self.graph)
        self.operator_identity_2.Input.allow_mask = False
        self.operator_identity_2.Output.allow_mask = False

        self.operator_identity_1.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity_2.Input.meta.axistags = vigra.AxisTags("txyzc")

    @nose.tools.raises(AllowMaskException)
    def test1(self):
        # Explicitly set has_mask for the input
        self.operator_identity_1.Input.meta.has_mask = True
        self.operator_identity_1.Output.meta.has_mask = True

        # Try to connect the incompatible operators.
        try:
            self.operator_identity_2.Input.connect(
                self.operator_identity_1.Output)
        except AssertionError as e:
            raise AllowMaskException(str(e))

    @nose.tools.raises(AllowMaskException)
    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        data = numpy.ma.masked_array(data,
                                     mask=numpy.zeros(data.shape, dtype=bool),
                                     shrink=False)

        # Implicitly set has_mask for the input by setting the value.
        self.operator_identity_1.Input.setValue(data)

        # Try to connect the incompatible operators.
        try:
            self.operator_identity_2.Input.connect(
                self.operator_identity_1.Output)
        except AssertionError as e:
            raise AllowMaskException(str(e))

    def teardown_method(self, method):
        # Take down operators
        self.operator_identity_2.Input.disconnect()
        self.operator_identity_2.Output.disconnect()
        self.operator_identity_2.cleanUp()
        self.operator_identity_1.Input.disconnect()
        self.operator_identity_1.Output.disconnect()
        self.operator_identity_1.cleanUp()
Exemplo n.º 16
0
    def setUp(self):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")
        self.operator_identity.Input.meta.has_mask = True
Exemplo n.º 17
0
    def test_Writer(self):
        # Create the h5 file
        hdf5File = h5py.File(self.testDataFileName)

        opPiper = OpArrayPiper(graph=self.graph)
        opPiper.Input.setValue(self.testData)

        opWriter = OpH5WriterBigDataset(graph=self.graph)
        opWriter.hdf5File.setValue(hdf5File)
        opWriter.hdf5Path.setValue(self.datasetInternalPath)
        opWriter.Image.connect(opPiper.Output)

        # Force the operator to execute by asking for the output (a bool)
        success = opWriter.WriteImage.value
        assert success

        hdf5File.close()

        # Check the file.
        f = h5py.File(self.testDataFileName, 'r')
        dataset = f[self.datasetInternalPath]
        assert dataset.shape == self.dataShape
        assert numpy.all(
            dataset[...] == self.testData.view(numpy.ndarray)[...])
        f.close()
Exemplo n.º 18
0
    def test_Writer(self):
        # Create the h5 file
        hdf5File = h5py.File(self.testDataFileName)

        opPiper = OpArrayPiper(graph=self.graph)
        opPiper.Input.setValue(self.testData)

        # Force extra metadata onto the output
        opPiper.Output.meta.ideal_blockshape = (1, 1, 0, 0, 1)
        # Pretend the RAM usage will be really high to force lots of tiny blocks
        opPiper.Output.meta.ram_usage_per_requested_pixel = 1000000.0

        opWriter = OpH5WriterBigDataset(graph=self.graph)

        # This checks that you can give a preexisting group as the file
        g = hdf5File.create_group('volume')
        opWriter.hdf5File.setValue(g)
        opWriter.hdf5Path.setValue("data")
        opWriter.Image.connect(opPiper.Output)

        # Force the operator to execute by asking for the output (a bool)
        success = opWriter.WriteImage.value
        assert success

        hdf5File.close()

        # Check the file.
        f = h5py.File(self.testDataFileName, 'r')
        dataset = f[self.datasetInternalPath]
        assert dataset.shape == self.dataShape
        assert numpy.all(
            dataset[...] == self.testData.view(numpy.ndarray)[...])
        f.close()
Exemplo n.º 19
0
    def testMargin(self):
        graph = Graph()
        vol = np.zeros((100, 110, 10), dtype=np.float32)
        # draw a big plus sign
        vol[50:70, :, :] = 1.0
        vol[:, 60:80, :] = 1.0
        vol = vigra.taggedView(vol, axistags="zyx").withAxes(*"tzyxc")
        labels = np.zeros((100, 110, 10), dtype=np.uint32)
        labels[45:75, 55:85, 3:4] = 1
        labels = vigra.taggedView(labels, axistags="zyx").withAxes(*"tzyxc")

        op = OpObjectsSegment(graph=graph)
        piper = OpArrayPiper(graph=graph)
        piper.Input.setValue(vol)
        op.Prediction.connect(piper.Output)
        op.LabelImage.setValue(labels)

        # without margin
        op.MarginZYX.setValue(np.asarray((0, 0, 0)))
        out = op.Output[...].wait()
        out = vigra.taggedView(out, axistags=op.Output.meta.axistags)
        out = out.withAxes(*"zyx")
        vol = vol.withAxes(*"zyx")
        assert_array_equal(out[50:70, 60:80, 3] > 0,
                           vol[50:70, 60:80, 3] > 0.5)
        assert np.all(out[:45, ...] == 0)

        # with margin
        op.MarginZYX.setValue(np.asarray((5, 5, 0)))
        out = op.Output[...].wait()
        out = vigra.taggedView(out, axistags=op.Output.meta.axistags)
        out = out.withAxes(*"zyx")
        assert_array_equal(out[45:75, 55:85, 3] > 0,
                           vol[45:75, 55:85, 3] > 0.5)
        assert np.all(out[:40, ...] == 0)
Exemplo n.º 20
0
 def get_non_transposed_provider_slot(
         self,
         parent: Optional[Operator] = None,
         graph: Optional[Graph] = None) -> OutputSlot:
     opReader = OpArrayPiper(parent=parent, graph=graph)
     opReader.Input.setValue(self.preloaded_array)
     return opReader.Output
Exemplo n.º 21
0
    def testDirtyPropagation_masked(self):
        graph = Graph()
        data = numpy.random.random( (1,100,100,10,1) )
        data = numpy.ma.masked_array(data, mask=numpy.ma.getmaskarray(data), fill_value=data.dtype.type(numpy.nan), shrink=False)
        data[:, 25] = numpy.ma.masked
        opProvider = OpArrayPiper(graph=graph)
        opProvider.Input.setValue(data)

        opSubRegion = OpSubRegion( graph=graph )
        opSubRegion.Input.connect( opProvider.Output )

        opSubRegion.Roi.setValue( ((0,20,30,5,0), (1,30,50,8,1)) )

        gotDirtyRois = []
        def handleDirty(slot, roi):
            gotDirtyRois.append(roi)
        opSubRegion.Output.notifyDirty(handleDirty)

        # Set an input dirty region that overlaps with the subregion
        key = numpy.s_[0:1, 15:35, 32:33, 0:10, 0:1 ]
        opProvider.Input.setDirty( key )

        assert len(gotDirtyRois) == 1
        assert gotDirtyRois[0].start == [0,0,2,0,0]
        assert gotDirtyRois[0].stop == [1,10,3,3,1]

        # Now mark a region that DOESN'T overlap with the subregion
        key = numpy.s_[0:1, 70:80, 32:33, 0:10, 0:1 ]
        opProvider.Input.setDirty( key )

        # Should have gotten no extra dirty notifications
        assert len(gotDirtyRois) == 1
Exemplo n.º 22
0
    def setUp(self):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)
        self.operator_identity.Input.allow_mask = False
        self.operator_identity.Output.allow_mask = False

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")
Exemplo n.º 23
0
    def setUp(self):
        self.graph = Graph()

        self.operator_border = OpMaskArray(graph=self.graph)
        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_border.InputArray.meta.axistags = vigra.AxisTags("txyzc")

        self.operator_identity.Input.connect(self.operator_border.Output)
Exemplo n.º 24
0
        def testBB(self):
            graph = Graph()
            op = OpObjectsSegment(graph=graph)
            piper = OpArrayPiper(graph=graph)
            piper.Input.setValue(self.vol)
            op.Prediction.connect(piper.Output)
            op.LabelImage.setValue(self.labels)

            bbox = op.BoundingBoxes[0, ..., 0].wait()
            assert isinstance(bbox, dict)
Exemplo n.º 25
0
        def testFaulty(self):
            vec = vigra.taggedView(np.zeros((500, ), dtype=np.float32),
                                   axistags=vigra.defaultAxistags('x'))
            graph = Graph()
            op = OpObjectsSegment(graph=graph)
            piper = OpArrayPiper(graph=graph)
            piper.Input.setValue(vec)

            with self.assertRaises(AssertionError):
                op.Prediction.connect(piper.Output)
                op.LabelImage.connect(piper.Output)
Exemplo n.º 26
0
    def setupOutputs(self):
        self.CleanBlocks.meta.shape = (1,)
        self.CleanBlocks.meta.dtype = object

        reconfigure = False
        if  self.inputs["fixAtCurrent"].ready():
            self._fixed =  self.inputs["fixAtCurrent"].value

        if self.inputs["blockShape"].ready() and self.inputs["Input"].ready():
            newBShape = self.inputs["blockShape"].value
            if self._origBlockShape != newBShape and self.inputs["Input"].ready():
                reconfigure = True
            self._origBlockShape = newBShape
            OpArrayPiper.setupOutputs(self)

        if reconfigure and self.shape is not None:
            self._lock.acquire()
            self._allocateManagementStructures()
            if not self._lazyAlloc:
                self._allocateCache()
            self._lock.release()
    def testIngestData(self):
        """
        The ingestData() function can be used to import an entire slot's
        data into the label array, but copied one block at a time.
        """
        op = self.op
        data = self.data + 5
        opProvider = OpArrayPiper(graph=op.graph)
        opProvider.Input.setValue(data)

        max_label = op.ingestData(opProvider.Output)
        assert (op.Output[:].wait() == data).all()
        assert max_label == data.max()
Exemplo n.º 28
0
    def testComplete(self):
        graph = Graph()
        op = OpObjectsSegment(graph=graph)
        piper = OpArrayPiper(graph=graph)
        piper.Input.setValue(self.vol)
        op.Prediction.connect(piper.Output)
        piper = OpArrayPiper(graph=graph)
        piper.Input.setValue(self.labels)
        op.LabelImage.connect(piper.Output)

        # get whole volume
        out = op.CachedOutput[...].wait()
        out = vigra.taggedView(out, axistags=op.Output.meta.axistags)

        # check whether no new blocks introduced
        mask = np.where(self.labels > 0, 0, 1)
        masked = out * mask
        assert_array_equal(masked, 0 * masked)

        # check whether the interior was labeled 1
        assert np.all(out[:, 22:38, 22:38, 22:38, :] > 0)
        assert np.all(out[:, 62:78, 62:78, 62:78, :] > 0)
Exemplo n.º 29
0
 def testOutput(self):
     graph = Graph()
     data = numpy.random.random( (1,100,100,10,1) )
     opProvider = OpArrayPiper(graph=graph)
     opProvider.Input.setValue(data)
     
     opSubRegion = OpSubRegion( graph=graph )
     opSubRegion.Input.connect( opProvider.Output )
     
     opSubRegion.Roi.setValue( ((0,20,30,5,0), (1,30,50,8,1)) )
     
     subData = opSubRegion.Output( start=( 0,5,10,1,0 ), stop=( 1,10,20,3,1 ) ).wait()
     assert (subData == data[0:1, 25:30, 40:50, 6:8, 0:1]).all()
Exemplo n.º 30
0
    def test_Writer(self):
        # Create the h5 file
        hdf5File = h5py.File(self.testDataH5FileName)
        n5File = z5py.N5File(self.testDataN5FileName)

        opPiper = OpArrayPiper(graph=self.graph)
        opPiper.Input.setValue(self.testData)

        # Force extra metadata onto the output
        opPiper.Output.meta.ideal_blockshape = (1, 1, 0, 0, 1)
        # Pretend the RAM usage will be really high to force lots of tiny blocks
        opPiper.Output.meta.ram_usage_per_requested_pixel = 1000000.0

        h5_opWriter = OpH5N5WriterBigDataset(graph=self.graph)
        n5_opWriter = OpH5N5WriterBigDataset(graph=self.graph)

        # This checks that you can give a preexisting group as the file
        h5_g = hdf5File.create_group("volume")
        n5_g = n5File.create_group("volume")
        h5_opWriter.h5N5File.setValue(h5_g)
        n5_opWriter.h5N5File.setValue(n5_g)
        h5_opWriter.h5N5Path.setValue("data")
        n5_opWriter.h5N5Path.setValue("data")
        h5_opWriter.Image.connect(opPiper.Output)
        n5_opWriter.Image.connect(opPiper.Output)

        # Force the operator to execute by asking for the output (a bool)
        h5_success = h5_opWriter.WriteImage.value
        n5_success = n5_opWriter.WriteImage.value
        assert h5_success
        assert n5_success

        hdf5File.close()
        n5File.close()

        # Check the file.
        hdf5File = h5py.File(self.testDataH5FileName, "r")
        n5File = h5py.File(self.testDataH5FileName, "r")
        h5_dataset = hdf5File[self.datasetInternalPath]
        n5_dataset = n5File[self.datasetInternalPath]
        assert h5_dataset.shape == self.dataShape
        assert n5_dataset.shape == self.dataShape
        assert (numpy.all(
            h5_dataset[...] == self.testData.view(numpy.ndarray)[...])).all()
        assert (numpy.all(
            n5_dataset[...] == self.testData.view(numpy.ndarray)[...])).all()
        hdf5File.close()
        n5File.close()
Exemplo n.º 31
0
    def test_Writer(self):

        # Create the h5 file
        hdf5File = h5py.File(self.testDataH5FileName)
        n5File = z5py.N5File(self.testDataN5FileName)

        opPiper = OpArrayPiper(graph=self.graph)
        opPiper.Input.setValue(self.testData)

        h5_opWriter = OpH5N5WriterBigDataset(graph=self.graph)
        n5_opWriter = OpH5N5WriterBigDataset(graph=self.graph)

        # This checks that you can give a preexisting group as the file
        h5_g = hdf5File.create_group("volume")
        n5_g = n5File.create_group("volume")
        h5_opWriter.h5N5File.setValue(h5_g)
        n5_opWriter.h5N5File.setValue(n5_g)
        h5_opWriter.h5N5Path.setValue("data")
        n5_opWriter.h5N5Path.setValue("data")
        h5_opWriter.Image.connect(opPiper.Output)
        n5_opWriter.Image.connect(opPiper.Output)

        # Force the operator to execute by asking for the output (a bool)
        h5_success = h5_opWriter.WriteImage.value
        n5_success = n5_opWriter.WriteImage.value
        assert h5_success
        assert n5_success

        hdf5File.close()
        n5File.close()

        # Check the file.
        hdf5File = h5py.File(self.testDataH5FileName, "r")
        n5File = h5py.File(self.testDataH5FileName, "r")
        h5_dataset = hdf5File[self.datasetInternalPath]
        n5_dataset = n5File[self.datasetInternalPath]
        assert h5_dataset.shape == self.dataShape
        assert n5_dataset.shape == self.dataShape
        assert (numpy.all(
            h5_dataset[...] == self.testData.view(numpy.ndarray)[...])).all()
        assert (numpy.all(
            n5_dataset[...] == self.testData.view(numpy.ndarray)[...])).all()
        hdf5File.close()
        n5File.close()
Exemplo n.º 32
0
    def testComplete(self):
        graph = Graph()
        op = OpGraphCut(graph=graph)
        piper = OpArrayPiper(graph=graph)
        piper.Input.setValue(self.fullVolume)
        op.Prediction.connect(piper.Output)

        out = op.CachedOutput[...].wait()
        out = vigra.taggedView(out, axistags=op.Output.meta.axistags)
        assert_array_equal(out.shape, self.fullVolume.shape)

        # check whether no new blocks introduced
        mask = np.where(self.labels > 0, 0, 1)
        masked = out.view(np.ndarray) * mask
        assert_array_equal(masked, 0 * masked)

        # check whether the interior was labeled 1
        assert np.all(out[:, 22:38, 22:38, 22:38, :] > 0)
        assert np.all(out[:, 62:78, 62:78, 62:78, :] > 0)
    def testIngestData(self):
        """
        The ingestData() function can be used to import an entire slot's
        data into the label array, but copied one block at a time.
        """
        op = self.op
        data = self.data + 5
        opProvider = OpArrayPiper(graph=op.graph)
        opProvider.Input.setValue(data)

        max_label = op.ingestData(opProvider.Output)
        outputData = op.Output[...].wait()
        assert numpy.all(outputData[...] == data)
        assert numpy.all(outputData.mask == data.mask)

        # FIXME: This assertion fails and I don't know why.
        #        I don't think masked arrays are important for user label data, so I'm ignoring this failure.
        #  assert numpy.all(outputData.fill_value == data.fill_value), \
        #     "Unexpected fill_value: {} instead of {}".format(outputData.fill_value, data.fill_value)
        assert max_label == data.max()
Exemplo n.º 34
0
    def test_basic(self):
        opSource = OpArrayPiper(graph=self.graph)
        opSource.Input.setValue(self.testData)

        opData = OpArrayCache(graph=self.graph)
        opData.blockShape.setValue(self.testData.shape)
        opData.Input.connect(opSource.Output)

        filepath = os.path.join(self._tmpdir, 'multipage.tiff')
        logger.debug("writing to: {}".format(filepath))

        opExport = OpExportMultipageTiff(graph=self.graph)
        opExport.Filepath.setValue(filepath)
        opExport.Input.connect(opData.Output)

        # Run the export
        opExport.run_export()

        opReader = OpTiffReader(graph=self.graph)
        try:
            opReader.Filepath.setValue(filepath)

            # Re-order before comparing
            opReorderAxes = OpReorderAxes(graph=self.graph)
            try:
                opReorderAxes.AxisOrder.setValue(self._axisorder)
                opReorderAxes.Input.connect(opReader.Output)

                readData = opReorderAxes.Output[:].wait()
                logger.debug("Expected shape={}".format(self.testData.shape))
                logger.debug("Read shape={}".format(readData.shape))

                assert opReorderAxes.Output.meta.shape == self.testData.shape, \
                    "Exported files were of the wrong shape or number."
                assert (opReorderAxes.Output[:].wait() == self.testData.view( numpy.ndarray )).all(), \
                    "Exported data was not correct"
            finally:
                opReorderAxes.cleanUp()
        finally:
            opReader.cleanUp()
Exemplo n.º 35
0
    def testOutput_masked(self):
        graph = Graph()
        data = numpy.random.random( (1,100,100,10,1) )
        data = numpy.ma.masked_array(data,
                                     mask=numpy.ma.getmaskarray(data),
                                     fill_value=data.dtype.type(numpy.nan),
                                     shrink=False
        )
        data[:, 25] = numpy.ma.masked
        opProvider = OpArrayPiper(graph=graph)
        opProvider.Input.setValue(data)

        opSubRegion = OpSubRegion( graph=graph )
        opSubRegion.Input.connect( opProvider.Output )

        opSubRegion.Roi.setValue( ((0,20,30,5,0), (1,30,50,8,1)) )

        subData = opSubRegion.Output( start=( 0,5,10,1,0 ), stop=( 1,10,20,3,1 ) ).wait()
        assert (subData == data[0:1, 25:30, 40:50, 6:8, 0:1]).all()
        assert (subData.mask == data.mask[0:1, 25:30, 40:50, 6:8, 0:1]).all()
        assert ((subData.fill_value == data.fill_value) |
                (numpy.isnan(subData.fill_value) & numpy.isnan(data.fill_value))).all()
Exemplo n.º 36
0
 def get_provider_slot(self, parent: Operator) -> OutputSlot:
     opReader = OpArrayPiper(parent=parent)
     opReader.Input.setValue(self.preloaded_array)
     return opReader.Output
Exemplo n.º 37
0
class TestOpMaskArray3(object):
    def setUp(self):
        self.graph = Graph()

        self.operator_border = OpMaskArray(graph=self.graph)
        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_border.InputArray.meta.axistags = vigra.AxisTags("txyzc")

        self.operator_identity.Input.connect(self.operator_border.Output)

    def test1(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        mask = numpy.zeros(data.shape, dtype=bool)

        # Mask borders of the expected output.
        left_slicing = (mask.ndim - 1) * (slice(None), ) + (slice(None, 1), )
        right_slicing = (mask.ndim - 1) * (slice(None), ) + (slice(-1, None), )
        for i in xrange(mask.ndim):
            left_slicing = left_slicing[-1:] + left_slicing[:-1]
            right_slicing = right_slicing[-1:] + right_slicing[:-1]

            mask[left_slicing] = True
            mask[right_slicing] = True

        expected_output = numpy.ma.masked_array(data, mask=mask, shrink=False)

        # Provide input read all output.
        self.operator_border.InputArray.setValue(data)
        self.operator_border.InputMask.setValue(mask)
        output = self.operator_identity.Output[None].wait()

        assert ((expected_output == output).all())
        assert (expected_output.mask.shape == output.mask.shape)
        assert ((expected_output.mask == output.mask).all())

    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        mask = numpy.zeros(data.shape, dtype=bool)

        # Mask borders of the expected output.
        left_slicing = (mask.ndim - 1) * (slice(None), ) + (slice(None, 1), )
        right_slicing = (mask.ndim - 1) * (slice(None), ) + (slice(-1, None), )
        for i in xrange(mask.ndim):
            left_slicing = left_slicing[-1:] + left_slicing[:-1]
            right_slicing = right_slicing[-1:] + right_slicing[:-1]

            mask[left_slicing] = True
            mask[right_slicing] = True

        expected_output = numpy.ma.masked_array(data, mask=mask, shrink=False)

        # Create array to store results. Don't keep original data.
        output = expected_output.copy()
        output[:] = 0
        output[:] = numpy.ma.nomask

        # Provide input and grab chunks.
        self.operator_border.InputArray.setValue(data)
        self.operator_border.InputMask.setValue(mask)
        output[:2] = self.operator_identity.Output[:2].wait()
        output[2:] = self.operator_identity.Output[2:].wait()

        assert ((expected_output == output).all())
        assert (expected_output.mask.shape == output.mask.shape)
        assert ((expected_output.mask == output.mask).all())

    def tearDown(self):
        # Take down operators
        self.operator_identity.Input.disconnect()
        self.operator_identity.Output.disconnect()
        self.operator_identity.cleanUp()
        self.operator_border.InputArray.disconnect()
        self.operator_border.InputMask.disconnect()
        self.operator_border.Output.disconnect()
        self.operator_border.cleanUp()
Exemplo n.º 38
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value
    
            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
            elif datasetInfo.location == DatasetInfo.Location.PreloadedArray:
                preloaded_array = datasetInfo.preloaded_array
                assert preloaded_array is not None
                if not hasattr(preloaded_array, 'axistags'):
                    # Guess the axis order, since one was not provided.
                    axisorders = { 2 : 'yx',
                                   3 : 'zyx',
                                   4 : 'zyxc',
                                   5 : 'tzyxc' }

                    shape = preloaded_array.shape
                    ndim = preloaded_array.ndim            
                    assert ndim != 0, "Support for 0-D data not yet supported"
                    assert ndim != 1, "Support for 1-D data not yet supported"
                    assert ndim <= 5, "No support for data with more than 5 dimensions."
        
                    axisorder = axisorders[ndim]
                    if ndim == 3 and shape[2] <= 4:
                        # Special case: If the 3rd dim is small, assume it's 'c', not 'z'
                        axisorder = 'yxc'
                    preloaded_array = vigra.taggedView(preloaded_array, axisorder)
                opReader = OpArrayPiper(parent=self)
                opReader.Input.setValue( preloaded_array )
                providerSlot = opReader.Output
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                if datasetInfo.subvolume_roi is not None:
                    opReader.SubVolumeRoi.setValue( datasetInfo.subvolume_roi )
                opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
            self._opReaders.append(opReader)
            
            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            metadata = {}
            metadata['display_mode'] = datasetInfo.display_mode
            role_name = self.RoleName.value
            if 'c' not in providerSlot.meta.getTaggedShape():
                num_channels = 0
            else:
                num_channels = providerSlot.meta.getTaggedShape()['c']
            if num_channels > 1:
                metadata['channel_names'] = ["{}-{}".format(role_name, i) for i in range(num_channels)]
            else:
                metadata['channel_names'] = [role_name]
                 
            if datasetInfo.drange is not None:
                metadata['drange'] = datasetInfo.drange
            elif providerSlot.meta.dtype == numpy.uint8:
                # SPECIAL case for uint8 data: Provide a default drange.
                # The user can always override this herself if she wants.
                metadata['drange'] = (0,255)
            if datasetInfo.normalizeDisplay is not None:
                metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
            if datasetInfo.axistags is not None:
                if len(datasetInfo.axistags) != len(providerSlot.meta.shape):
                    # This usually only happens when we copied a DatasetInfo from another lane,
                    # and used it as a 'template' to initialize this lane.
                    # This happens in the BatchProcessingApplet when it attempts to guess the axistags of 
                    # batch images based on the axistags chosen by the user in the interactive images.
                    # If the interactive image tags don't make sense for the batch image, you get this error.
                    raise Exception( "Your dataset's provided axistags ({}) do not have the "
                                     "correct dimensionality for your dataset, which has {} dimensions."
                                     .format( "".join(tag.key for tag in datasetInfo.axistags), len(providerSlot.meta.shape) ) )
                metadata['axistags'] = datasetInfo.axistags
            if datasetInfo.subvolume_roi is not None:
                metadata['subvolume_roi'] = datasetInfo.subvolume_roi
                
                # FIXME: We are overwriting the axistags metadata to intentionally allow 
                #        the user to change our interpretation of which axis is which.
                #        That's okay, but technically there's a special corner case if 
                #        the user redefines the channel axis index.  
                #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.
            
            opMetadataInjector = OpMetadataInjector( parent=self )
            opMetadataInjector.Input.connect( providerSlot )
            opMetadataInjector.Metadata.setValue( metadata )
            providerSlot = opMetadataInjector.Output
            self._opReaders.append( opMetadataInjector )

            self._NonTransposedImage.connect(providerSlot)
            
            if self.forceAxisOrder:
                # Before we re-order, make sure no non-singleton 
                #  axes would be dropped by the forced order.
                output_order = "".join(self.forceAxisOrder)
                provider_order = "".join(providerSlot.meta.getAxisKeys())
                tagged_provider_shape = providerSlot.meta.getTaggedShape()
                dropped_axes = set(provider_order) - set(output_order)
                if any(tagged_provider_shape[a] > 1 for a in dropped_axes):
                    msg = "The axes of your dataset ({}) are not compatible with the axes used by this workflow ({}). Please fix them."\
                          .format(provider_order, output_order)
                    raise DatasetConstraintError("DataSelection", msg)

                op5 = OpReorderAxes(parent=self)
                op5.AxisOrder.setValue(self.forceAxisOrder)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)
            
            # If the channel axis is not last (or is missing),
            #  make sure the axes are re-ordered so that channel is last.
            if providerSlot.meta.axistags.index('c') != len( providerSlot.meta.axistags )-1:
                op5 = OpReorderAxes( parent=self )
                keys = providerSlot.meta.getTaggedShape().keys()
                try:
                    # Remove if present.
                    keys.remove('c')
                except ValueError:
                    pass
                # Append
                keys.append('c')
                op5.AxisOrder.setValue( "".join( keys ) )
                op5.Input.connect( providerSlot )
                providerSlot = op5.Output
                self._opReaders.append( op5 )
            
            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)
            
            # Set the image name and usage flag
            self.AllowLabels.setValue( datasetInfo.allowLabels )
            
            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname
            
            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)
        
        except:
            self.internalCleanup()
            raise
Exemplo n.º 39
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (
                datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value

            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingH5N5Reader(parent=self)
                opReader.H5N5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
            elif datasetInfo.location == DatasetInfo.Location.PreloadedArray:
                preloaded_array = datasetInfo.preloaded_array
                assert preloaded_array is not None
                if not hasattr(preloaded_array, 'axistags'):
                    axisorder = get_default_axisordering(preloaded_array.shape)
                    preloaded_array = vigra.taggedView(preloaded_array,
                                                       axisorder)

                opReader = OpArrayPiper(parent=self)
                opReader.Input.setValue(preloaded_array)
                providerSlot = opReader.Output
            else:
                if datasetInfo.realDataSource:
                    # Use a normal (filesystem) reader
                    opReader = OpInputDataReader(parent=self)
                    if datasetInfo.subvolume_roi is not None:
                        opReader.SubVolumeRoi.setValue(
                            datasetInfo.subvolume_roi)
                    opReader.WorkingDirectory.setValue(
                        self.WorkingDirectory.value)
                    opReader.SequenceAxis.setValue(datasetInfo.sequenceAxis)
                    opReader.FilePath.setValue(datasetInfo.filePath)
                else:
                    # Use fake reader: allows to run the project in a headless
                    # mode without the raw data
                    opReader = OpZeroDefault(parent=self)
                    opReader.MetaInput.meta = MetaDict(
                        shape=datasetInfo.laneShape,
                        dtype=datasetInfo.laneDtype,
                        drange=datasetInfo.drange,
                        axistags=datasetInfo.axistags)
                    opReader.MetaInput.setValue(
                        numpy.zeros(datasetInfo.laneShape,
                                    dtype=datasetInfo.laneDtype))
                providerSlot = opReader.Output
            self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            metadata = {}
            metadata['display_mode'] = datasetInfo.display_mode
            role_name = self.RoleName.value
            if 'c' not in providerSlot.meta.getTaggedShape():
                num_channels = 0
            else:
                num_channels = providerSlot.meta.getTaggedShape()['c']
            if num_channels > 1:
                metadata['channel_names'] = [
                    "{}-{}".format(role_name, i) for i in range(num_channels)
                ]
            else:
                metadata['channel_names'] = [role_name]

            if datasetInfo.drange is not None:
                metadata['drange'] = datasetInfo.drange
            elif providerSlot.meta.dtype == numpy.uint8:
                # SPECIAL case for uint8 data: Provide a default drange.
                # The user can always override this herself if she wants.
                metadata['drange'] = (0, 255)
            if datasetInfo.normalizeDisplay is not None:
                metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
            if datasetInfo.axistags is not None:
                if len(datasetInfo.axistags) != len(providerSlot.meta.shape):
                    ts = providerSlot.meta.getTaggedShape()
                    if 'c' in ts and 'c' not in datasetInfo.axistags and len(
                            datasetInfo.axistags) + 1 == len(ts):
                        # provider has no channel axis, but template has => add channel axis to provider
                        # fixme: Optimize the axistag guess in BatchProcessingApplet instead of hoping for the best here
                        metadata['axistags'] = vigra.defaultAxistags(
                            ''.join(datasetInfo.axistags.keys()) + 'c')
                    else:
                        # This usually only happens when we copied a DatasetInfo from another lane,
                        # and used it as a 'template' to initialize this lane.
                        # This happens in the BatchProcessingApplet when it attempts to guess the axistags of
                        # batch images based on the axistags chosen by the user in the interactive images.
                        # If the interactive image tags don't make sense for the batch image, you get this error.
                        raise Exception(
                            "Your dataset's provided axistags ({}) do not have the "
                            "correct dimensionality for your dataset, which has {} dimensions."
                            .format(
                                "".join(tag.key
                                        for tag in datasetInfo.axistags),
                                len(providerSlot.meta.shape)))
                else:
                    metadata['axistags'] = datasetInfo.axistags
            if datasetInfo.original_axistags is not None:
                metadata['original_axistags'] = datasetInfo.original_axistags

            if datasetInfo.subvolume_roi is not None:
                metadata['subvolume_roi'] = datasetInfo.subvolume_roi

                # FIXME: We are overwriting the axistags metadata to intentionally allow
                #        the user to change our interpretation of which axis is which.
                #        That's okay, but technically there's a special corner case if
                #        the user redefines the channel axis index.
                #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.

            opMetadataInjector = OpMetadataInjector(parent=self)
            opMetadataInjector.Input.connect(providerSlot)
            opMetadataInjector.Metadata.setValue(metadata)
            providerSlot = opMetadataInjector.Output
            self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            # make sure that x and y axes are present in the selected axis order
            if 'x' not in providerSlot.meta.axistags or 'y' not in providerSlot.meta.axistags:
                raise DatasetConstraintError(
                    "DataSelection",
                    "Data must always have at leaset the axes x and y for ilastik to work."
                )

            if self.forceAxisOrder:
                assert isinstance(self.forceAxisOrder, list), \
                    "forceAxisOrder should be a *list* of preferred axis orders"

                # Before we re-order, make sure no non-singleton
                #  axes would be dropped by the forced order.
                tagged_provider_shape = providerSlot.meta.getTaggedShape()
                minimal_axes = [
                    k_v for k_v in list(tagged_provider_shape.items())
                    if k_v[1] > 1
                ]
                minimal_axes = set(k for k, v in minimal_axes)

                # Pick the shortest of the possible 'forced' orders that
                # still contains all the axes of the original dataset.
                candidate_orders = list(self.forceAxisOrder)
                candidate_orders = [
                    order for order in candidate_orders
                    if minimal_axes.issubset(set(order))
                ]

                if len(candidate_orders) == 0:
                    msg = "The axes of your dataset ({}) are not compatible with any of the allowed"\
                          " axis configurations used by this workflow ({}). Please fix them."\
                          .format(providerSlot.meta.getAxisKeys(), self.forceAxisOrder)
                    raise DatasetConstraintError("DataSelection", msg)

                output_order = sorted(candidate_orders,
                                      key=len)[0]  # the shortest one
                output_order = "".join(output_order)
            else:
                # No forced axisorder is supplied. Use original axisorder as
                # output order: it is assumed by the export-applet, that the
                # an OpReorderAxes operator is added in the beginning
                output_order = "".join(
                    [x for x in providerSlot.meta.axistags.keys()])

            op5 = OpReorderAxes(parent=self)
            op5.AxisOrder.setValue(output_order)
            op5.Input.connect(providerSlot)
            providerSlot = op5.Output
            self._opReaders.append(op5)

            # If the channel axis is missing, add it as last axis
            if 'c' not in providerSlot.meta.axistags:
                op5 = OpReorderAxes(parent=self)
                keys = providerSlot.meta.getAxisKeys()

                # Append
                keys.append('c')
                op5.AxisOrder.setValue("".join(keys))
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)

            self.AllowLabels.setValue(datasetInfo.allowLabels)

            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname

            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)

        except:
            self.internalCleanup()
            raise
Exemplo n.º 40
0
class TestOpMaskArray3(object):
    def setUp(self):
        self.graph = Graph()

        self.operator_border = OpMaskArray(graph=self.graph)
        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_border.InputArray.meta.axistags = vigra.AxisTags("txyzc")

        self.operator_identity.Input.connect(self.operator_border.Output)

    def test1(self):
        # Generate a random dataset and see if it we get the right masking from the operator.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        mask = numpy.zeros(data.shape, dtype=bool)

        # Mask borders of the expected output.
        left_slicing = (mask.ndim - 1) * (slice(None),) + (slice(None, 1),)
        right_slicing = (mask.ndim - 1) * (slice(None),) + (slice(-1, None),)
        for i in xrange(mask.ndim):
            left_slicing = left_slicing[-1:] + left_slicing[:-1]
            right_slicing = right_slicing[-1:] + right_slicing[:-1]

            mask[left_slicing] = True
            mask[right_slicing] = True

        expected_output = numpy.ma.masked_array(data,
                                                mask=mask,
                                                shrink=False
                          )

        # Provide input read all output.
        self.operator_border.InputArray.setValue(data)
        self.operator_border.InputMask.setValue(mask)
        output = self.operator_identity.Output[None].wait()

        assert((expected_output == output).all())
        assert(expected_output.mask.shape == output.mask.shape)
        assert((expected_output.mask == output.mask).all())

    def test2(self):
        # Generate a dataset and grab chunks of it from the operator. The result should be the same as above.
        data = numpy.random.random((4, 5, 6, 7, 3)).astype(numpy.float32)
        mask = numpy.zeros(data.shape, dtype=bool)

        # Mask borders of the expected output.
        left_slicing = (mask.ndim - 1) * (slice(None),) + (slice(None, 1),)
        right_slicing = (mask.ndim - 1) * (slice(None),) + (slice(-1, None),)
        for i in xrange(mask.ndim):
            left_slicing = left_slicing[-1:] + left_slicing[:-1]
            right_slicing = right_slicing[-1:] + right_slicing[:-1]

            mask[left_slicing] = True
            mask[right_slicing] = True

        expected_output = numpy.ma.masked_array(data,
                                                mask=mask,
                                                shrink=False
                          )

        # Create array to store results. Don't keep original data.
        output = expected_output.copy()
        output[:] = 0
        output[:] = numpy.ma.nomask

        # Provide input and grab chunks.
        self.operator_border.InputArray.setValue(data)
        self.operator_border.InputMask.setValue(mask)
        output[:2] = self.operator_identity.Output[:2].wait()
        output[2:] = self.operator_identity.Output[2:].wait()

        assert((expected_output == output).all())
        assert(expected_output.mask.shape == output.mask.shape)
        assert((expected_output.mask == output.mask).all())

    def tearDown(self):
        # Take down operators
        self.operator_identity.Input.disconnect()
        self.operator_identity.Output.disconnect()
        self.operator_identity.cleanUp()
        self.operator_border.InputArray.disconnect()
        self.operator_border.InputMask.disconnect()
        self.operator_border.Output.disconnect()
        self.operator_border.cleanUp()
Exemplo n.º 41
0
    def setup_method(self, method):
        self.graph = Graph()

        self.operator_identity = OpArrayPiper(graph=self.graph)

        self.operator_identity.Input.meta.axistags = vigra.AxisTags("txyzc")