Ejemplo n.º 1
0
 def test_grid_rag_hdf5_stacked2d(self):
     import nifty.hdf5 as nhdf5
     hidT = nhdf5.createFile(self.path)
     chunkShape = [1, 2, 1]
     array = nhdf5.Hdf5ArrayUInt32(hidT, "data", self.shape, chunkShape)
     array[0:self.shape[0], 0:self.shape[1], 0:self.shape[2]] = self.labels
     self.small_array_test(array, nrag.gridRagStacked2DHdf5)
     nhdf5.closeFile(hidT)
Ejemplo n.º 2
0
 def test_stacked_rag_hdf5_serialize_deserialize(self):
     import nifty.hdf5 as nhdf5
     hidT = nhdf5.createFile(self.path)
     chunkShape = [1, 2, 1]
     array = nhdf5.Hdf5ArrayUInt32(hidT, "data", self.bigShape, chunkShape)
     array[0:self.bigShape[0], 0:self.bigShape[1],
           0:self.bigShape[2]] = self.bigLabels
     self.serialization_test(array, nrag.gridRagStacked2DHdf5)
     nhdf5.closeFile(hidT)
Ejemplo n.º 3
0
    def test_hdf5_rag2d_large(self):
        import nifty.hdf5 as nhdf5

        shape = [5, 6]
        blockShape = chunkShape = shape
        # FIXME  these shapes cause incorrect edges !!!
        # chunkShape = [3, 2]
        # blockShape = [2, 3]

        hidT = nhdf5.createFile(self.path)
        array = nhdf5.Hdf5ArrayUInt32(hidT, "data", shape, chunkShape)

        self.assertEqual(array.shape[0], shape[0])
        self.assertEqual(array.shape[1], shape[1])

        labels = numpy.array([[0, 0, 0, 0, 1, 1],
                              [0, 2, 2, 0, 1, 3],
                              [0, 3, 3, 3, 3, 3],
                              [0, 3, 4, 5, 5, 5],
                              [0, 0, 4, 6, 6, 6]],
                             dtype='uint32')

        self.assertEqual(labels.shape[0], shape[0])
        self.assertEqual(labels.shape[1], shape[1])

        array[0:shape[0], 0:shape[1]] = labels
        rag = nrag.gridRagHdf5(array,
                               numberOfLabels=int(labels.max() + 1),
                               blockShape=blockShape,
                               numberOfThreads=1)

        shouldEdges = [(0, 1),
                       (0, 2),
                       (0, 3),
                       (0, 4),
                       (1, 3),
                       (2, 3),
                       (3, 4),
                       (3, 5),
                       (4, 5),
                       (4, 6),
                       (5, 6)]

        shouldNotEdges = [(0, 6),
                          (0, 5),
                          (1, 6),
                          (1, 5)]

        self.generic_rag_test(rag=rag,
                              numberOfNodes=labels.max() + 1,
                              shouldEdges=shouldEdges,
                              shouldNotEdges=shouldNotEdges)
        nhdf5.closeFile(hidT)
Ejemplo n.º 4
0
    def test_hdf5_rag_3d(self):
        import nifty.hdf5 as nhdf5

        shape = [3, 2, 2]
        chunkShape = [1, 2, 1]
        blockShape = [1, 2, 3]

        hidT = nhdf5.createFile(self.path)
        array = nhdf5.Hdf5ArrayUInt32(hidT, "data", shape, chunkShape)

        self.assertEqual(array.shape[0], shape[0])
        self.assertEqual(array.shape[1], shape[1])
        self.assertEqual(array.shape[2], shape[2])

        labels = [[[0, 1],
                   [0, 0]],
                  [[1, 1],
                   [2, 2]],
                  [[3, 3],
                   [3, 3]]]
        labels = numpy.array(labels, dtype='uint32')

        self.assertEqual(labels.shape[0], shape[0])
        self.assertEqual(labels.shape[1], shape[1])
        self.assertEqual(labels.shape[2], shape[2])

        array[0:shape[0], 0:shape[1], 0:shape[2]] = labels
        rag = nrag.gridRagHdf5(array,
                               numberOfLabels=labels.max() + 1,
                               blockShape=blockShape,
                               numberOfThreads=-1)

        shouldEdges = [(0, 1),
                       (0, 2),
                       (1, 2),
                       (1, 3),
                       (2, 3)]

        shouldNotEdges = [(0, 3)]

        self.generic_rag_test(rag=rag,
                              numberOfNodes=labels.max() + 1,
                              shouldEdges=shouldEdges,
                              shouldNotEdges=shouldNotEdges)
        nhdf5.closeFile(hidT)
Ejemplo n.º 5
0
    def test_hdf5_serialization(self):
        import nifty.hdf5 as nh5
        seg = self.make_random_stacked_seg((20, 100, 100))
        self.check_stacked(seg)
        n_labels = seg.max() + 1

        with h5py.File(self.seg_path, 'a') as f:
            f.create_dataset('data', data=seg)

        label_f = nh5.openFile(self.seg_path)
        labels = nh5.Hdf5ArrayUInt32(label_f, 'data')

        rag = nrag.gridRagStacked2DHdf5(labels, n_labels, -1)

        nifty.graph.rag.writeStackedRagToHdf5(rag, self.rag_path)
        rag_read = nrag.readStackedRagFromHdf5(labels, n_labels, self.rag_path)

        self.assertTrue((rag.uvIds() == rag_read.uvIds()).all())
        self.assertTrue((rag.numberOfEdges == rag_read.numberOfEdges))
        self.assertTrue((rag.numberOfNodes == rag_read.numberOfNodes))
        self.assertTrue((
            rag.minMaxLabelPerSlice() == rag_read.minMaxLabelPerSlice()).all())
        self.assertTrue(
            (rag.numberOfNodesPerSlice() == rag_read.numberOfNodesPerSlice()
             ).all())
        self.assertTrue(
            (rag.numberOfInSliceEdges() == rag_read.numberOfInSliceEdges()
             ).all())
        self.assertTrue((rag.numberOfInBetweenSliceEdges() ==
                         rag_read.numberOfInBetweenSliceEdges()).all())
        self.assertTrue(
            (rag.inSliceEdgeOffset() == rag_read.inSliceEdgeOffset()).all())
        self.assertTrue(
            (rag.betweenSliceEdgeOffset() == rag_read.betweenSliceEdgeOffset()
             ).all())
        self.assertTrue((rag.totalNumberOfInSliceEdges ==
                         rag_read.totalNumberOfInSliceEdges))
        self.assertTrue((rag.totalNumberOfInBetweenSliceEdges ==
                         rag_read.totalNumberOfInBetweenSliceEdges))
Ejemplo n.º 6
0
    def run(self):

        # read stuff from the sub solver
        sub_solver = self.input()['sub_solver']
        sub_results = sub_solver.read('sub_results')
        block_begins = sub_solver.read('block_begins')
        block_ends = sub_solver.read('block_ends')
        sub_nodes = sub_solver.read('sub_nodes')

        has_defects = False
        if PipelineParameter().defectPipeline:
            defect_slices_path = self.input()['defect_slices'].path
            defect_slices = vigra.readHDF5(defect_slices_path, 'defect_slices')
            if defect_slices.size:
                has_defects = True

        # get the rag
        rag = self.input()['rag'].read()

        out_path = self.output().path
        if not os.path.exists(out_path):
            os.mkdir(out_path)

        # iterate over the blocks and serialize the sub-block result
        # for block_id in range(1):
        for block_id in range(len(sub_results)):
            sub_result = {
                sub_nodes[block_id][i]: sub_results[block_id][i]
                for i in range(len(sub_nodes[block_id]))
            }

            print("Saving Block-Result for block %i / %i" %
                  (block_id, len(sub_results)))
            block_begin = block_begins[block_id]
            block_end = block_ends[block_id]

            # save the begin and end coordinates of this block for later use
            block_path = os.path.join(out_path,
                                      'block%i_coordinates.h5' % block_id)
            vigra.writeHDF5(block_begin, block_path, 'block_begin')
            vigra.writeHDF5(block_end, block_path, 'block_end')

            # determine the shape of this subblock
            block_shape = block_end - block_begin
            chunk_shape = [
                1, min(512, block_shape[1]),
                min(512, block_shape[2])
            ]

            # save the segmentation for this subblock
            res_path = os.path.join(out_path,
                                    'block%i_segmentation.h5' % block_id)
            res_file = nh5.createFile(res_path)
            out_array = nh5.Hdf5ArrayUInt32(
                res_file,
                'data',
                block_shape.tolist(),
                chunk_shape,
                compression=PipelineParameter().compressionLevel)

            nrag.projectScalarNodeDataInSubBlock(rag, sub_result, out_array,
                                                 block_begins[block_id],
                                                 block_ends[block_id])

            # if we have defected slices in this sub-block, replace them by an adjacent slice
            if has_defects:

                # project the defected slicces in global coordinates to the subblock coordinates
                this_defect_slices = defect_slices - block_begin[0]
                this_defect_slices = this_defect_slices[np.logical_and(
                    this_defect_slices > 0,
                    this_defect_slices < block_shape[0])]

                # only replace slices if there are any in the subblock
                if this_defect_slices.size:
                    replace_slice = get_replace_slices(this_defect_slices,
                                                       block_shape)
                    for z in this_defect_slices:
                        replace_z = replace_slice[z]
                        workflow_logger.debug(
                            "SubblockSegmentationWorkflow: block %i replacing defected slice %i by %i"
                            % (block_id, z, replace_z))
                        out_array.writeSubarray(
                            [z, 0, 0],
                            out_array.readSubarray([replace_z, 0, 0], [
                                replace_z + 1, block_shape[1], block_shape[2]
                            ]))

            nh5.closeFile(res_file)