예제 #1
0
파일: metrics.py 프로젝트: elhuhdron/emdrp
def warping_error( lbl_truth, lbl_proposed, doComps=True, simpleLUT=None, connectivity=1 ):

    warped, classified, nonSimpleTypes, diff, simpleLUT = binary_warping( lbl_truth, lbl_proposed,
        return_nonSimple=True, connectivity=connectivity, simpleLUT=simpleLUT )

    wrp_err = float(diff) / lbl_truth.size

    if doComps:
        lbls, nSplits = nd.measurements.label(classified == nonSimpleTypes['dic']['RESULT_SPLIT'],
            structure=np.ones((3,3,3),dtype=np.bool))
        nonSimpleTypesOut = np.zeros(lbls.shape, dtype=lbls.dtype)
        nonSimpleTypesOut[lbls > 0] = nonSimpleTypes['dic']['RESULT_SPLIT']
        lbls, nMerges = nd.measurements.label(classified == nonSimpleTypes['dic']['RESULT_MERGE'],
            structure=np.ones((3,3,3),dtype=np.bool))
        nonSimpleTypesOut[lbls > 0] = nonSimpleTypes['dic']['RESULT_MERGE']
    else:
        nSplits = (classified == nonSimpleTypes['dic']['RESULT_SPLIT']).sum(dtype=np.int64)
        nMerges = (classified == nonSimpleTypes['dic']['RESULT_MERGE']).sum(dtype=np.int64)
        nonSimpleTypesOut = nonSimpleTypes

    return wrp_err, nSplits, nMerges, nonSimpleTypesOut, simpleLUT
예제 #2
0
def warping_error( lbl_truth, lbl_proposed, doComps=True, simpleLUT=None, connectivity=1 ):
    from pyCext import binary_warping

    warped, classified, nonSimpleTypes, diff, simpleLUT = binary_warping( lbl_truth, lbl_proposed,
        return_nonSimple=True, connectivity=connectivity, simpleLUT=simpleLUT )

    wrp_err = float(diff) / lbl_truth.size

    if doComps:
        lbls, nSplits = nd.measurements.label(classified == nonSimpleTypes['dic']['RESULT_SPLIT'],
            structure=np.ones((3,3,3),dtype=bool))
        nonSimpleTypesOut = np.zeros(lbls.shape, dtype=lbls.dtype)
        nonSimpleTypesOut[lbls > 0] = nonSimpleTypes['dic']['RESULT_SPLIT']
        lbls, nMerges = nd.measurements.label(classified == nonSimpleTypes['dic']['RESULT_MERGE'],
            structure=np.ones((3,3,3),dtype=bool))
        nonSimpleTypesOut[lbls > 0] = nonSimpleTypes['dic']['RESULT_MERGE']
    else:
        nSplits = (classified == nonSimpleTypes['dic']['RESULT_SPLIT']).sum(dtype=np.int64)
        nMerges = (classified == nonSimpleTypes['dic']['RESULT_MERGE']).sum(dtype=np.int64)
        nonSimpleTypesOut = nonSimpleTypes

    return wrp_err, nSplits, nMerges, nonSimpleTypesOut, simpleLUT
예제 #3
0
    def watershed_cube(self):
        writeVerbose = False;
        #writeVerbose = self.dpWatershedTypes_verbose
        readVerbose = False;
        #readVerbose = self.dpWatershedTypes_verbose

        # load the probability data, allocate as array of volumes instead of 4D ndarray to maintain C-order volumes
        probs = [None]*self.ntypes; bwseeds = [None]*self.nfg_types
        if self.srclabels:
            # this code path is typically not used in favor of the label checker for fully labeled 3d gt components.
            # but, some ground truth (for example, 2d ECS cases) was only labeled with voxel type,
            #   so this is used to create ground truth components from the voxel types.
            loadh5 = emLabels.readLabels(srcfile=self.srclabels, chunk=self.chunk.tolist(), offset=self.offset.tolist(),
                size=self.size.tolist(), data_type='uint16', verbose=writeVerbose)
            self.datasize = loadh5.datasize; self.chunksize = loadh5.chunksize; self.attrs = loadh5.data_attrs
            # pre-allocate for srclabels method, labeled areas are set to prob of 1 below
            for i in range(self.ntypes): probs[i] = np.zeros(self.size, dtype=emProbabilities.PROBS_DTYPE, order='C')
            if self.TminSrc < 2:
                # simple method with no "cleaning"
                for i in range(self.ntypes): probs[i][loadh5.data_cube==i] = 1
            else:
                # optionally "clean" labels by removing small bg and fg components for each foreground type
                fgbwlabels = np.zeros(self.size, dtype=np.bool)
                for i in range(self.nfg_types):
                    # background connected components and threshold
                    comps, nlbls = nd.measurements.label(loadh5.data_cube!=i+1)
                    comps, sizes = emLabels.thresholdSizes(comps, minSize=self.TminSrc)
                    # foreground connected components and threshold
                    comps, nlbls = nd.measurements.label(comps==0)
                    comps, sizes = emLabels.thresholdSizes(comps, minSize=self.TminSrc)
                    # keep track of mask for all foreground types
                    bwlabels = (comps > 0); fgbwlabels = np.logical_or(fgbwlabels, bwlabels)
                    probs[i+1][bwlabels] = 1
                # set background type as all areas that are not in foreground types after "cleaning"
                probs[0][np.logical_not(fgbwlabels)] = 1
        else:
            # check if background is in the prob file
            hdf = h5py.File(self.probfile,'r'); has_bg = self.bg_type in hdf; hdf.close()
            for i in range(0 if has_bg else 1, self.ntypes):
                loadh5 = dpLoadh5.readData(srcfile=self.probfile, dataset=self.types[i], chunk=self.chunk.tolist(),
                    offset=self.offset.tolist(), size=self.size.tolist(), data_type=emProbabilities.PROBS_STR_DTYPE,
                    verbose=readVerbose)
                self.datasize = loadh5.datasize; self.chunksize = loadh5.chunksize; self.attrs = loadh5.data_attrs
                probs[i] = loadh5.data_cube; del loadh5
            # if background was not in hdf5 then create it as 1-sum(fg type probs)
            if not has_bg:
                probs[0] = np.ones_like(probs[1])
                for i in range(1,self.ntypes): probs[0] -= probs[i]
                #assert( (probs[0] >= 0).all() ) # comment for speed
                probs[0][probs[0] < 0] = 0 # rectify

        # save some of the parameters as attributes
        self.attrs['types'] = self.types; self.attrs['fg_types'] = self.fg_types
        self.attrs['fg_types_labels'] = self.fg_types_labels

        # save connnetivity structure and warping LUT because used on each iteration (for speed)
        self.bwconn = nd.morphology.generate_binary_structure(dpLoadh5.ND, self.connectivity)
        self.bwconn2d = self.bwconn[:,:,1]; self.simpleLUT = None

        # load the warpings if warping mode is enabled
        warps = None
        if self.warpfile:
            warps = [None]*self.nwarps
            for i in range(self.nwarps):
                loadh5 = dpLoadh5.readData(srcfile=self.warpfile, dataset=self.warp_datasets[i],
                    chunk=self.chunk.tolist(), offset=self.offset.tolist(), size=self.size.tolist(),
                    verbose=readVerbose)
                warps[i] = loadh5.data_cube; del loadh5

        # xxx - may need to revisit cropping, only intended to be used with warping method.
        if self.docrop: c = self.cropborder; s = self.size  # DO NOT use variables c or s below

        # optionally apply filters in attempt to fill small background (membrane) probability gaps.
        if self.close_bg > 0:
            # create structuring element
            n = 2*self.close_bg + 1; h = self.close_bg; strel = np.zeros((n,n,n),dtype=np.bool); strel[h,h,h]=1;
            strel = nd.binary_dilation(strel,iterations=self.close_bg)

            # xxx - this was the only thing tried here that helped some but didn't work well against the skeletons
            probs[0] = nd.grey_closing( probs[0], structure=strel )
            for i in range(self.nfg_types): probs[i+1] = nd.grey_opening( probs[i+1], structure=strel )
            # xxx - this gave worse results
            #probs[0] = nd.maximum_filter( probs[0], footprint=strel )
            # xxx - this had almost no effect
            #probs[0] = nd.grey_closing( probs[0], structure=strel )

        # argmax produces the winner-take-all assignment for each supervoxel.
        # background type was put first, so voxType of zero is background (membrane).
        voxType = np.concatenate([x.reshape(x.shape + (1,)) for x in probs], axis=3).argmax(axis=3)
        # write out the winning type for each voxel
        # save some params from this watershed run in the attributes
        d = self.attrs.copy(); d['thresholds'] = self.Ts; d['Tmins'] = self.Tmins
        data = voxType.astype(emVoxelType.VOXTYPE_DTYPE)
        if self.docrop: data = data[c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]]
        emVoxelType.writeVoxType(outfile=self.outlabels, chunk=self.chunk.tolist(),
            offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
            chunksize=self.chunksize.tolist(), verbose=writeVerbose, attrs=d,
            data=data)

        # only allow a voxel to be included in the type of component that had max prob for that voxel.
        # do this by setting the non-winning probabilities to zero.
        for i in range(self.ntypes): probs[i][voxType != i] = 0;

        # create a type mask for each foreground type to select only current voxel type (winner-take-all from network)
        voxTypeSel = [None] * self.nfg_types; voxTypeNotSel =  [None] * self.nfg_types
        for i in range(self.nfg_types):
            voxTypeSel[i] = (voxType == i+1)
            # create an inverted version, only used for complete fill not for warping (which requires C-contiguous),
            #   so apply crop here if cropping enabled
            voxTypeNotSel[i] = np.logical_not(voxTypeSel[i])
            if self.docrop: voxTypeNotSel[i] = voxTypeNotSel[i][c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]]

        # need C-contiguous probabilities for binary_warping.
        for i in range(self.nfg_types):
            if not probs[i+1].flags.contiguous or np.isfortran(probs[i+1]):
                probs[i+1] = np.ascontiguousarray(probs[i+1])

        # iteratively apply thresholds, each time only keeping components that have fallen under size Tmin.
        # at last iteration keep all remaining components.
        # do this separately for foreground types.
        for k in range(self.nTmin):
            for i in range(self.nfg_types): bwseeds[i] = np.zeros(self.size, dtype=np.bool, order='C')
            for i in range(self.nthresh):
                if self.dpWatershedTypes_verbose:
                    print('creating supervoxels at threshold = %.8f with Tmin = %d' % (self.Ts[i], self.Tmins[k]))
                    t = time.time()
                types_labels = [None]*self.nfg_types; types_uclabels = [None]*self.nfg_types;
                if self.skeletonize: types_sklabels = [None]*self.nfg_types
                types_nlabels = np.zeros((self.nfg_types,),dtype=np.int64)
                types_ucnlabels = np.zeros((self.nfg_types,),dtype=np.int64)
                for j in range(self.nfg_types):
                    # run connected components at this threshold on labels
                    labels, nlabels = nd.measurements.label(probs[j+1] > self.Ts[i], self.bwconn)

                    # merge the current thresholded components with the previous seeds to get current bwlabels
                    bwlabels = np.logical_or(labels, bwseeds[j])

                    # take the current components under threshold and merge with the seeds for the next iteration
                    if i < self.nthresh-1:
                        labels, sizes = emLabels.thresholdSizes(labels, minSize=-self.Tmins[k])
                        bwseeds[j] = np.logical_or(labels, bwseeds[j])

                    # this if/elif switch determines the main method for creating the labels.
                    # xxx - make cropping to be done in more efficient way, particular to avoid filling cropped areas
                    if self.method == 'overlap':
                        # definite advantage to this method over other methods, but cost is about 2-3 times slower.
                        # labels are linked per zslice using precalculated slice to slice warpings based on the probs.
                        labels, nlabels = self.label_overlap(bwlabels, voxTypeSel[j], warps)

                        # xxx - add switches to only optionally export the unconnected labels
                        #uclabels = labels; ucnlabels = nlabels;

                        # crop right after the labels are created and stay uncropped from here.
                        # xxx - labels will be wrong unless method implicitly handled the cropping during the labeling.
                        #   currently only the warping method is doing, don't need cropping for other methods anyways.
                        if self.docrop: labels = labels[c[0]:s[0]-c[0],c[1]:s[1]-c[1],c[2]:s[2]-c[2]]

                        # this method can not create true unconnected 3d labels, but should be unconnected in 2d.
                        # NOTE: currently this only removes 6-connectivity, no matter what specified connecitity is
                        # xxx - some method of removing adjacencies with arbitrary connectivity?
                        uclabels, ucnlabels = emLabels.remove_adjacencies(labels)
                    elif self.method == 'skim-ws':
                        # xxx - still trying to evaluate if there is any advantage to this more traditional watershed.
                        #   it does not leave a non-adjacency boundary and is about 1.5 times slower than bwmorph

                        # run connected components on the thresholded labels merged with previous seeds
                        labels, nlabels = nd.measurements.label(bwlabels, self.bwconn)

                        # run a true watershed based the current foreground probs using current components as markers
                        labels = morph.watershed(probs[j+1], labels, connectivity=self.bwconn, mask=voxTypeSel[j])

                        # remove any adjacencies created during the watershed
                        # NOTE: currently this only removes 6-connectivity, no matter what specified connecitity is
                        # xxx - some method of removing adjacencies with arbitrary connectivity?
                        uclabels, ucnlabels = emLabels.remove_adjacencies(labels)
                    else:
                        if self.method == 'comps-ws' and i>1:
                            # this is an alternative to the traditional watershed that warps out only based on stepping
                            #   back through the thresholds in reverse order. has advantages of non-connectivity.
                            # may help slightly for small supervoxels but did not show much improved metrics in
                            #   terms of large-scale connectivity (against skeletons)
                            # about 4-5 times slower than regular warping method.

                            # make an unconnected version of bwlabels by warping out but with mask only for this type
                            # everything above current threshold is already labeled, so only need to use gray thresholds
                            #    starting below the current threshold level.
                            bwlabels, diff, self.simpleLUT = binary_warping(bwlabels, np.ones(self.size,dtype=np.bool),
                                mask=voxTypeSel[j], borderval=False, slow=True, simpleLUT=self.simpleLUT,
                                connectivity=self.connectivity, gray=probs[j+1],
                                grayThresholds=self.Ts[i-1::-1].astype(np.float32, order='C'))
                        else:
                            assert( self.method == 'comps' )     # bad method option
                            # make an unconnected version of bwlabels by warping out but with mask only for this type
                            bwlabels, diff, self.simpleLUT = binary_warping(bwlabels, np.ones(self.size,dtype=np.bool),
                                mask=voxTypeSel[j], borderval=False, slow=True, simpleLUT=self.simpleLUT,
                                connectivity=self.connectivity)

                        # run connected components on the thresholded labels merged with previous seeds (warped out)
                        uclabels, ucnlabels = nd.measurements.label(bwlabels, self.bwconn);

                        # in this case the normal labels are the same as the unconnected labels because of warping
                        labels = uclabels; nlabels = ucnlabels;

                    # optionally make a skeletonized version of the unconnected labels
                    # xxx - revisit this, currently not being used for anything, started as a method to skeletonize GT
                    if self.skeletonize:
                        # method to skeletonize using max range endpoints only
                        sklabels, sknlabels = emLabels.ucskeletonize(uclabels, mask=voxTypeSel[j],
                            sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None)
                        assert( sknlabels == ucnlabels )

                    # fill out these labels out so that they fill in remaining voxels based on voxType.
                    # this uses bwdist method for finding nearest neighbors, so connectivity can be violoated.
                    # this is mitigated by first filling out background using the warping transformation
                    #   (or watershed) above, then this step is only to fill in remaining voxels for the
                    #   current foreground voxType.
                    labels = emLabels.nearest_neighbor_fill(labels, mask=voxTypeNotSel[j],
                        sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None)

                    # save the components labels generated for this type
                    types_labels[j] = labels.astype(emLabels.LBLS_DTYPE, copy=False);
                    types_uclabels[j] = uclabels.astype(emLabels.LBLS_DTYPE, copy=False);
                    types_nlabels[j] = nlabels if self.fg_types_labels[j] < 0 else 1
                    types_ucnlabels[j] = ucnlabels if self.fg_types_labels[j] < 0 else 1
                    if self.skeletonize: types_sklabels[j] = sklabels.astype(emLabels.LBLS_DTYPE, copy=False)

                # merge the fg components labels. they can not overlap because voxel type is winner-take-all.
                nlabels = 0; ucnlabels = 0;
                labels = np.zeros(self.size_crop, dtype=emLabels.LBLS_DTYPE);
                uclabels = np.zeros(self.size_crop, dtype=emLabels.LBLS_DTYPE);
                if self.skeletonize: sklabels = np.zeros(self.size, dtype=emLabels.LBLS_DTYPE);
                for j in range(self.nfg_types):
                    sel = (types_labels[j] > 0); ucsel = (types_uclabels[j] > 0);
                    if self.skeletonize: sksel = (types_sklabels[j] > 0);
                    if self.fg_types_labels[j] < 0:
                        labels[sel] += (types_labels[j][sel] + nlabels);
                        uclabels[ucsel] += (types_uclabels[j][ucsel] + ucnlabels);
                        if self.skeletonize: sklabels[sksel] += (types_sklabels[j][sksel] + ucnlabels);
                        nlabels += types_nlabels[j]; ucnlabels += types_ucnlabels[j];
                    else:
                        labels[sel] = self.fg_types_labels[j];
                        uclabels[ucsel] = self.fg_types_labels[j];
                        if self.skeletonize: sklabels[sksel] = self.fg_types_labels[j]
                        nlabels += 1; ucnlabels += 1;

                if self.dpWatershedTypes_verbose:
                    print('\tnlabels = %d' % (nlabels,))
                    #print('\tnlabels = %d %d' % (nlabels,labels.max())) # for debug only
                    #assert(nlabels == labels.max()) # sanity check for non-overlapping voxTypeSel, comment for speed
                    print('\tdone in %.4f s' % (time.time() - t,))

                # make a fully-filled out version using bwdist nearest foreground neighbor
                wlabels = emLabels.nearest_neighbor_fill(labels, mask=None,
                    sampling=self.attrs['scale'] if hasattr(self.attrs,'scale') else None)

                # write out the results
                if self.nTmin == 1: subgroups = ['%.8f' % (self.Ts[i],)]
                else: subgroups = ['%d' % (self.Tmins[k],), '%.8f' % (self.Ts[i],)]
                d = self.attrs.copy(); d['threshold'] = self.Ts[i];
                d['types_nlabels'] = types_nlabels; d['Tmin'] = self.Tmins[k]
                emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                    offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                    chunksize=self.chunksize.tolist(), data=labels, verbose=writeVerbose,
                    attrs=d, strbits=self.outlabelsbits, subgroups=['with_background']+subgroups )
                emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                    offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                    chunksize=self.chunksize.tolist(), data=wlabels, verbose=writeVerbose,
                    attrs=d, strbits=self.outlabelsbits, subgroups=['zero_background']+subgroups )
                d['type_nlabels'] = types_ucnlabels;
                emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                    offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                    chunksize=self.chunksize.tolist(), data=uclabels, verbose=writeVerbose,
                    attrs=d, strbits=self.outlabelsbits, subgroups=['no_adjacencies']+subgroups )
                if self.skeletonize:
                    emLabels.writeLabels(outfile=self.outlabels, chunk=self.chunk.tolist(),
                        offset=self.offset_crop.tolist(), size=self.size_crop.tolist(), datasize=self.datasize.tolist(),
                        chunksize=self.chunksize.tolist(), data=sklabels, verbose=writeVerbose,
                        attrs=d, strbits=self.outlabelsbits, subgroups=['skeletonized']+subgroups )
예제 #4
0
    def label_overlap(self, bwlabels, mask, warps=None):
        # this method operates slice by slice
        zlabels = np.zeros(self.size, dtype=np.int64)
        nzlabels = 0; prv_labels = None; connections = [None]*(self.size[2]-1)
        s = self.size; s2 = [s[0], s[1], 1]; c = self.cropborder
        if warps:
            x = np.arange(s[0], dtype=warps[0].dtype); y = np.arange(s[1], dtype=warps[0].dtype)
            X = np.meshgrid(x,y, indexing='ij')
        for z in range(self.size[2]):
            # get bwlabels and mask for the current zslice
            cur_bwlabels = bwlabels[:,:,z]; cur_mask = mask[:,:,z]

            # make an unconnected version of bwlabels by warping out but with mask only for this type
            bw = cur_bwlabels[:,:,None].copy(order='C'); msk = cur_mask[:,:,None].copy(order='C')
            bw, diff, self.simpleLUT = binary_warping(bw, np.ones(s2,dtype=np.bool),
                mask=msk, borderval=False, slow=True, simpleLUT=self.simpleLUT, connectivity=self.connectivity)
            cur_fill_bwlabels = bw[:,:,0]

            # run connected components on the thresholded labels merged with previous seeds (warped out)
            cur_fill_labels, cur_nlabels = nd.measurements.label(cur_fill_bwlabels, self.bwconn2d, output=np.int64)

            # make labels for this zslice unique and add to whole label cube
            sel = (cur_fill_labels > 0); cur_fill_labels[sel] += nzlabels; zlabels[:,:,z] = cur_fill_labels

            # get eroded labels by applying mask for original bwlabels
            cur_labels = cur_fill_labels.copy(); cur_labels[np.logical_not(cur_bwlabels)] = 0

            # warp the previous slice to this slice and connect them
            if z > 0:
                if warps:
                    # apply warping from previous label slice to current slice using nearest neighbor interpolation.
                    cur_warpsx = warps[0][:,:,z-1]; cur_warpsy = warps[1][:,:,z-1]
                    xi = X[0] + cur_warpsx; yi = X[1] + cur_warpsy
                    # remove warps that are out of the size bounds
                    #xi[xi < 0] = 0; xi[xi > s[0]-1] = s[0]-1; yi[yi < 0] = 0; yi[yi > s[1]-1] = s[1]-1
                    f = interpolate.RegularGridInterpolator((x,y), prv_labels, method='nearest',
                        bounds_error=False, fill_value=0)
                    prv_labels = f( np.vstack((xi.ravel(),yi.ravel())).T ).reshape(prv_labels.shape)

                # map the previous warped labels to the current labels based on pixel-by-pixel overlap of eroded labels.
                # only used the xy cropped area to do the linkage.
                prv_labels_crop = prv_labels; cur_labels_crop = cur_labels
                if self.docrop:
                    prv_labels_crop = prv_labels_crop[c[0]:s[0]-c[0],c[1]:s[1]-c[1]]
                    cur_labels_crop = cur_labels_crop[c[0]:s[0]-c[0],c[1]:s[1]-c[1]]
                tmp = dpWatershedTypes.unique_rows(\
                    np.ascontiguousarray( np.vstack((prv_labels_crop.ravel(),cur_labels_crop.ravel())).T ))
                # remove background connections (any rows with zeros)
                connections[z-1] = tmp[(tmp>0).all(axis=1),:]

            # loop updates for linking current slice to next
            prv_labels = cur_labels; nzlabels += cur_nlabels

        # run graph connected components on graph created from pairwise connections.
        # this graph represents labels that have been linked by the warping between zslices.
        G = nx.Graph(); G.add_edges_from(np.vstack(connections))
        compsG = nx.connected_components(G); nlabels = 0; mapping = np.zeros((nzlabels+1,), dtype=np.int64)
        for nodes in compsG:
            # create mapping from current per-zslice labels to linked labels across zslices
            nlabels += 1; mapping[np.array(tuple(nodes),dtype=np.int64)] = nlabels

        # create the final labels using the mapping built from the graph connected components
        return mapping[zlabels], nlabels
예제 #5
0
    def clean(self):

        # read voxel types first, allows for cavity_fill and get_svox_type to be both specified
        if self.get_svox_type or self.write_voxel_type or self.apply_bg_mask:
            if self.dpCleanLabels_verbose:
                print('Reading supervoxel types')
                t = time.time()

            voxType = emVoxelType.readVoxType(srcfile=self.srcfile,
                                              chunk=self.chunk.tolist(),
                                              offset=self.offset.tolist(),
                                              size=self.size.tolist())
            voxel_type = voxType.data_cube.copy(order='C')
            ntypes = len(voxType.data_attrs['types'])

            if self.dpCleanLabels_verbose:
                print('\tdone in %.4f s' % (time.time() - t))

        # minpath overlay creation is intended to improve proofreading speed by highlighting connected paths
        if self.minpath > 0:
            import skfmm
            if self.minpath_skel:
                from pyCext import binary_warping

            # xxx - allow for multiple minpaths with different labels?
            selmin = (self.data_cube == self.minpath)
            pts, npts = nd.measurements.label(selmin, self.fgbwconn)

            if self.dpCleanLabels_verbose:
                print(
                    'Finding shortest paths for all pairwise combinations of %d points'
                    % (npts, ))
                t = time.time()

            labels = self.data_cube
            sel_ECS, ECS_label = self.getECS(labels)
            labels[sel_ECS] = 0
            selbg = (labels == 0)
            assert (self.minpath != ECS_label
                    )  # can't have minpath and ECS label defined the same

            # create paths for all pair-wise combinations of points
            paths = np.zeros(self.size, dtype=np.uint8)
            for i in range(npts):
                for j in range(i + 1, npts):
                    s1 = (pts == i + 1)
                    m = np.ones(self.size, dtype=np.double)
                    m[s1] = 0
                    d1 = skfmm.distance(
                        np.ma.masked_array(
                            nd.distance_transform_edt(
                                m,
                                return_indices=False,
                                return_distances=True,
                                sampling=self.data_attrs['scale']), selbg))

                    s2 = (pts == j + 1)
                    m = np.ones(self.size, dtype=np.double)
                    m[s2] = 0
                    d2 = skfmm.distance(
                        np.ma.masked_array(
                            nd.distance_transform_edt(
                                m,
                                return_indices=False,
                                return_distances=True,
                                sampling=self.data_attrs['scale']), selbg))

                    # xxx - need something like imregionalmin in 3d, could not quickly find an easy solution
                    d = d1 + d2
                    bwlabels = ((d.data < self.minpath_perc * d.min())
                                & ~d.mask)
                    bwlabels[s1 | s2] = 1
                    if self.minpath_skel:
                        # optionally skeletonize keeping original minpath points as anchors
                        bwlabels, diff, simpleLUT = binary_warping(
                            bwlabels.copy(order='C'),
                            np.zeros(self.size, dtype=bool),
                            mask=(~selmin).copy(order='C'),
                            borderval=False,
                            slow=True,
                            connectivity=self.fg_connectivity)
                        # fill back out slightly so more easily viewed in itksnap
                        bwlabels, diff, simpleLUT = binary_warping(
                            bwlabels.copy(order='C'),
                            np.ones(self.size, dtype=bool),
                            borderval=False,
                            slow=True,
                            simpleLUT=simpleLUT,
                            connectivity=self.fg_connectivity,
                            numiters=1)
                    paths[bwlabels] = 1
            self.data_cube = paths

            if self.dpCleanLabels_verbose:
                print('\tdone in %.4f s' % (time.time() - t))

        # smoothing operates on each label one at a time
        if self.smooth:
            if self.dpCleanLabels_verbose:
                print('Smoothing labels object by object')
                t = time.time()

            # threshold sizes to remove empty labels
            self.data_cube, sizes = emLabels.thresholdSizes(self.data_cube,
                                                            minSize=1)

            # exposed smoothing kernel size and contour level as parameters
            smooth_size = self.smooth_size
            #contour_level = self.contour_lvl
            # calculate padding based on smoothing kernel size
            rad = int(1.5 * smooth_size.max())

            sizes = np.array(self.data_cube.shape)
            sz = sizes + 2 * rad
            image_with_zeros = np.zeros(sz, dtype=self.data_cube.dtype)
            # create zeros 3 dimensional array
            image_with_zeros[
                rad:-rad, rad:-rad, rad:
                -rad] = self.data_cube  # embed label array into zeros array

            image_with_brd = np.lib.pad(self.data_cube,
                                        ((rad, rad), (rad, rad), (rad, rad)),
                                        'edge')

            # do not smooth ECS labels
            sel_ECS, ECS_label = self.getECS(image_with_brd)
            if self.dpCleanLabels_verbose and ECS_label:
                print('\tignoring ECS label %d' % (ECS_label, ))

            # get bounding boxes for each supervoxel in zero padded label volume
            svox_bnd = nd.measurements.find_objects(image_with_zeros)

            # iterate over labels
            nSeeds = len(svox_bnd)
            lbls = np.zeros(sz, dtype=self.data_cube.dtype)
            for j in range(nSeeds):
                if ECS_label and j + 1 == ECS_label: continue

                #if self.dpCleanLabels_verbose:
                #    print('Smoothing label %d / %d' % (j+1,nSeeds)); t = time.time()

                pbnd = tuple(
                    [slice(x.start - rad, x.stop + rad) for x in svox_bnd[j]])
                Lcrpsel = (image_with_brd[pbnd] == j + 1)
                Lcrp = Lcrpsel.astype(np.double)
                # smoothing operation just box filter on cropped binarized label
                Lfilt = nd.filters.uniform_filter(Lcrp,
                                                  size=smooth_size,
                                                  mode='constant')

                # this feature allows for variable contour level depending on if object splits apart
                if len(self.contour_lvl) > 1:
                    # get the original number of components for the object
                    nlabels_orig = nd.measurements.label(
                        Lcrpsel, self.fgbwconn)[1]
                    for c in np.arange(
                            self.contour_lvl[1],
                            self.contour_lvl[0] - self.contour_lvl[2] / 10,
                            -self.contour_lvl[2]):
                        # incase smoothing below contour level, use without smoothing
                        if not (Lfilt > c).any(): Lfilt = Lcrp

                        # check new number of labels, stop if it same (or less?) than original number of components
                        csel = (Lfilt > c)
                        nlabels = nd.measurements.label(csel, self.fgbwconn)[1]
                        if nlabels <= nlabels_orig: break
                else:
                    contour_level = self.contour_lvl[0]
                    # incase smoothing below contour level, use without smoothing
                    if not (Lfilt > contour_level).any(): Lfilt = Lcrp
                    csel = (Lfilt > contour_level)

                # assign smoothed output for current label
                lbls[pbnd][csel] = j + 1

            # put ECS labels back
            if ECS_label: lbls[sel_ECS] = ECS_label

            if self.dpCleanLabels_verbose:
                print('\tdone in %.4f s' % (time.time() - t))

            self.data_cube = lbls[rad:-rad, rad:-rad, rad:-rad]

        if self.remove_adjacencies:
            r = self.remove_adjacencies
            assert (r > 1 and r % 2 == 1)  # bad nbhd size
            labels = self.data_cube.astype(np.uint32, copy=True, order='C')
            sel_ECS, ECS_label = self.getECS(labels)
            labels[sel_ECS] = 0

            if self.dpCleanLabels_verbose:
                print('Removing adjacencies with nbhd %d%s' %
                      (r, ', ignoring ECS label %d' %
                       (ECS_label, ) if ECS_label else ''))
                t = time.time()

            self.data_cube = emLabels.remove_adjacencies_nconn(labels,
                                                               bwconn=np.ones(
                                                                   (r, r, r),
                                                                   dtype=bool))
            if ECS_label: self.data_cube[sel_ECS] = ECS_label

            if self.dpCleanLabels_verbose:
                print('\tdone in %.4f s' % (time.time() - t))

        if self.minsize > 0:
            labels = self.data_cube
            labels, nlabels = self.minsize_scrub(labels, self.minsize,
                                                 self.minsize_fill)
            self.data_cube = labels
            # allow this to work before self.get_svox_type or self.write_voxel_type
            self.data_attrs['types_nlabels'] = [nlabels]

        # NOTE: cavity_fill not intended to work with ECS labeled with single value (ECS components are fine)
        if self.cavity_fill:
            if self.labelwise:
                if self.dpCleanLabels_verbose:
                    print('Removing cavities labelwise using conn %d' %
                          (self.bg_connectivity, ))
                    print('\tGetting bounding boxes')
                    t = time.time()

                # iterate over labels, fill each label within bounding box
                svox_bnd = nd.measurements.find_objects(self.data_cube)
                if self.dpCleanLabels_verbose:
                    print('\t\tdone in %.4f s' % (time.time() - t))
                    t = time.time()
                    verbose = self.dpCleanLabels_verbose
                    self.dpCleanLabels_verbose = False
                nSeeds = len(svox_bnd)
                lbls = np.zeros(self.size, dtype=self.data_cube.dtype)
                for j in range(nSeeds):
                    csel, selbg, msk = self.cavity_fill_voxels(
                        self.data_cube[svox_bnd[j]] == j + 1, tab=True)
                    lbls[svox_bnd[j]][csel] = j + 1
                del self.data_cube
                self.data_cube = lbls

                if verbose:
                    print('\tdone in %.4f s' % (time.time() - t))
                    self.dpCleanLabels_verbose = True
            else:
                self.data_cube, selbg, msk = self.cavity_fill_voxels(
                    self.data_cube)

            # this prevents any supervoxels as being classified as "membrane".
            # many scripts assume that membrane is labeled as background (label 0).
            if self.get_svox_type or self.write_voxel_type:
                if self.dpCleanLabels_verbose:
                    print('\tRemoving cavities from voxel type')
                    t = time.time()
                if ntypes - 1 > 1:
                    voxel_type = emLabels.nearest_neighbor_fill(
                        voxel_type,
                        mask=selbg,
                        sampling=self.data_attrs['scale'])
                else:
                    voxel_type[msk] = 1
                print('\t\tdone in %.4f s' % (time.time() - t))
            del msk, selbg

        # NOTE: cavity_fill not intended to work with ECS labeled with single value (ECS components are fine)
        # This method is not foolproof, in one shot it removes all labels below a specified size and then reruns
        #   cavity fill. If a label was not filled then it puts it back. This will not remove any labels that are
        #   in a cavity but connected to background via a bunch of other labels smaller than specified size.
        if self.cavity_fill_minsize > 1:
            if self.dpCleanLabels_verbose:
                print('Removing labels < %d in cavities' %
                      (self.cavity_fill_minsize, ))
            labels_orig = self.data_cube
            data, nlabels = self.minsize_scrub(labels_orig,
                                               self.cavity_fill_minsize,
                                               False,
                                               tab=True,
                                               no_remap=True)

            # do a normal cavity fill after labels smaller than cavity_fill_minsize are removed
            labels, selbg, msk = self.cavity_fill_voxels(data, tab=True)
            del msk, selbg

            if self.dpCleanLabels_verbose:
                print('\tReplacing non-cavity labels')
            sel_not_fill = np.logical_and(labels_orig > 0, labels == 0)
            labels[sel_not_fill] = labels_orig[sel_not_fill]
            del labels_orig, sel_not_fill
            # remove any zero labels (that were removed as cavities)
            self.data_cube, nlabels = self.minsize_scrub(labels,
                                                         1,
                                                         False,
                                                         tab=True)
            del labels
            # allow this to work before self.get_svox_type or self.write_voxel_type
            self.data_attrs['types_nlabels'] = [nlabels]

        if self.relabel:
            labels = self.data_cube
            sel_ECS, ECS_label = self.getECS(labels)
            labels[sel_ECS] = 0

            if self.dpCleanLabels_verbose:
                print('Relabeling fg components with conn %d%s' %
                      (self.fg_connectivity, ', ignoring ECS label %d' %
                       (ECS_label, ) if ECS_label else ''))
                print('\tnlabels = %d, max = %d, before re-label' %
                      (len(np.unique(labels)), labels.max()))
                t = time.time()

            labels, nlabels = nd.measurements.label(labels, self.fgbwconn)

            labels, nlabels = self.setECS(labels, sel_ECS, ECS_label, nlabels)
            self.data_cube = labels
            # allow this to work before self.get_svox_type or self.write_voxel_type
            self.data_attrs['types_nlabels'] = [nlabels]

            if self.dpCleanLabels_verbose:
                print('\tnlabels = %d after re-label' % (nlabels, ))
                print('\tdone in %.4f s' % (time.time() - t))

        # this step re-writes the original background (membrane) mask back to the supervoxels.
        # this is useful if agglomeration was done using the completely watershedded supervoxels.
        if self.apply_bg_mask:
            if self.dpCleanLabels_verbose:
                print('Applying background (membrane) mask to supervoxels')
                t = time.time()
            sel = (voxel_type == 0)
            self.data_cube[sel] = 0
            if self.dpCleanLabels_verbose:
                print('\tdone in %.4f s' % (time.time() - t))

        # this step is always last, as writes new voxel_type depending on the cleaning that was done
        if self.get_svox_type or self.write_voxel_type:
            if self.dpCleanLabels_verbose:
                print('Recomputing supervoxel types and re-ordering labels')
                t = time.time()

            # moved this as first step to allow other steps to modify voxel_type
            #voxType = emVoxelType.readVoxType(srcfile=self.srcfile, chunk=self.chunk.tolist(),
            #    offset=self.offset.tolist(), size=self.size.tolist())
            #voxel_type = voxType.data_cube.copy(order='C')
            #ntypes = len(voxType.data_attrs['types'])

            labels = self.data_cube.copy(order='C')
            #nlabels = labels.max(); assert(nlabels == self.data_attrs['types_nlabels'][0])
            nlabels = sum(self.data_attrs['types_nlabels'])
            supervoxel_type, voxel_type = emLabels.type_components(
                labels, voxel_type, nlabels, ntypes)
            assert (supervoxel_type.size == nlabels)
            # reorder labels so that supervoxels are grouped by / in order of supervoxel type
            remap = np.zeros((nlabels + 1, ), dtype=self.data_cube.dtype)
            remap[np.argsort(supervoxel_type) + 1] = np.arange(
                1, nlabels + 1, dtype=self.data_cube.dtype)
            self.data_cube = remap[self.data_cube]
            types_nlabels = [(supervoxel_type == x).sum(dtype=np.int64)
                             for x in range(1, ntypes)]
            assert (sum(types_nlabels) == nlabels
                    )  # indicates voxel type does not match supervoxels
            self.data_attrs['types_nlabels'] = types_nlabels

            if self.write_voxel_type:
                if self.dpCleanLabels_verbose:
                    print(
                        'Rewriting voxel type pixel data based on supervoxel types'
                    )
                d = voxType.data_attrs.copy()
                #d['types_nlabels'] =
                emVoxelType.writeVoxType(outfile=self.outfile,
                                         chunk=self.chunk.tolist(),
                                         offset=self.offset.tolist(),
                                         size=self.size.tolist(),
                                         datasize=voxType.datasize.tolist(),
                                         chunksize=voxType.chunksize.tolist(),
                                         data=voxel_type.astype(
                                             emVoxelType.VOXTYPE_DTYPE),
                                         attrs=d)

            if self.dpCleanLabels_verbose:
                print('\tdone in %.4f s' % (time.time() - t))

        # this step should not be mixed with other steps
        if self.replace_ECS:
            assert (len(self.data_attrs['types_nlabels']) == 2)
            sel_ECS = (self.data_cube > self.data_attrs['types_nlabels'][0])
            sel_ICS = np.logical_and(
                self.data_cube > 0,
                self.data_cube <= self.data_attrs['types_nlabels'][0])
            self.data_cube[sel_ICS] += self.min_label
            self.data_cube[sel_ECS] = self.ECS_label
            # xxx - probably shouldn't be using this anyways?
            self.data_attrs['types_nlabels'] = self.data_attrs[
                'types_nlabels'][0]
예제 #6
0
from dpLoadh5 import dpLoadh5
from dpWriteh5 import dpWriteh5
#from emdrp.utils.typesh5 import emLabels, emProbabilities, emVoxelType
#from emdrp.utils.typesh5 import emLabels
from pyCext import binary_warping

overlay_in = '/home/watkinspv/Downloads/K0057_soma_annotation/affine_2Pstack/K0057_D31_soma_seg_overlays_v3.gipl'
data, hdr, info = dpWriteh5.gipl_read_volume(overlay_in)
data = data.reshape(hdr['sizes'][:3][::-1]).transpose(((2, 1, 0)))
print(data.shape, data.dtype)

# warp all the way down to points for each object. xxx - could be prohibitive for larger volumes
bwlabels, diff, simpleLUT = binary_warping((data > 0).copy(order='C'),
                                           np.zeros(data.shape, dtype=np.bool),
                                           borderval=False,
                                           slow=True,
                                           connectivity=3)

subs = np.nonzero(bwlabels)
#np.set_printoptions(threshold=np.nan); print(np.transpose(subs))

# get soma information back from original overlay
types = data[subs]

# convert subscripts into appropriate downsampling space
offset = np.array(
    [1, 1, 0]
)  # try to deal with warping bias, has to do with shape cells were labeled with 9x9x8 rect
subs_out = (np.transpose(subs) + offset) * np.array([16, 16, 16]) // np.array(
    [12, 12, 4])