Example #1
0
def _evaluate_alignment_with_sift(im_0, im_1, areas, sigma=1.):
    print(im_0)

    im_0 = imread(im_0)
    im_1 = imread(im_1)

    if im_0.dtype == 'uint16':
        im_0 = (im_0 / (65536 / 255)).astype('uint8')
    if im_1.dtype == 'uint16':
        im_1 = (im_1 / (65536 / 255)).astype('uint8')

    # im_0 = gaussianSmoothing(im_0, sigma)
    # im_1 = gaussianSmoothing(im_1, sigma)

    offset = []

    for area in areas:

        crop_0 = im_0[area]
        crop_1 = im_1[area]

        crop_0 = gaussianSmoothing(crop_0, sigma)
        crop_1 = gaussianSmoothing(crop_1, sigma)

        try:
            # Do the alignment
            sa = sift.LinearAlign(crop_0, devicetype='GPU')
            aligned = sa.align(crop_1, return_all=True, shift_only=True)
            offset.append(aligned['offset'])
            sa = None
            aligned = None

        except TypeError:
            print('Warning: The alignment failed, appending [None, None]')
            offset.append([None, None])

    return offset
Example #2
0
def get_segmentation(predict, pmin=0.5, minMemb=10, minSeg=10, sigMin=6, sigWeights=1, sigSmooth=0.1, cleanCloseSeeds=True,
                     returnSeedsOnly=False, edgeLengths=None,nodeFeatures=None, nodeSizes=None, nodeLabels=None,
                     nodeNumStop=None, beta=0, metric='l1', wardness=0.2, out=None):
    """ Get segmentation through watershed and agglomerative clustering
    :param predict: prediction map
    :return: segmentation map
    """
    #use watershed and save superpixels map
    super_pixels = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, returnSeedsOnly)
    # seeds = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, True)
    # save_h5(seeds, "/home/stamylew/delme/seeds.h5", "data")
    print
    print "#Nodes in superpixels", len(np.unique(super_pixels))
    # save_h5(super_pixels, "/home/stamylew/delme/super_pixels.h5", "data")

    #smooth prediction map
    probs = vf.gaussianSmoothing(predict, sigSmooth)
     # save_h5(probs, "/home/stamylew/delme/probs.h5", "data")

    #make grid graph
    grid_graph = vg.gridGraph(super_pixels.shape, False)

    grid_graph_edge_indicator = vg.edgeFeaturesFromImage(grid_graph, probs)
    #make region adjacency graph
    rag = vg.regionAdjacencyGraph(grid_graph, super_pixels)

    #accumulate edge features from grid graph node map
    edge_weights = rag.accumulateEdgeFeatures(grid_graph_edge_indicator)
    edge_weights_tag = "mean of the probabilities"

    #do agglomerative clustering

    labels = vg.agglomerativeClustering(rag, edge_weights, edgeLengths, nodeFeatures, nodeSizes,
            nodeLabels, nodeNumStop, beta, metric, wardness, out)

    #segmentation data
    wsDt_data = np.zeros((8,1))
    wsDt_data[:,0] = (pmin, minMemb, minSeg, sigMin, sigWeights, sigSmooth, cleanCloseSeeds, returnSeedsOnly)
    agglCl_data = edge_weights_tag, str(edgeLengths), str(nodeFeatures), str(nodeSizes), str(nodeLabels), str(nodeNumStop), \
                  str(beta), metric, str(wardness), str(out)

    #project labels back to data
    segmentation = rag.projectLabelsToBaseGraph(labels)
    print "#nodes in segmentation", len(np.unique(segmentation))
    # save_h5(segmentation, "/home/stamylew/delme/segmap.h5", "data", None)
    print "seg", np.unique(segmentation)
    return segmentation, super_pixels, wsDt_data, agglCl_data
Example #3
0
    def process(self, pmaps):
        if self.ws_sigma > 0:
            # fix ws sigma length
            # ws sigma cannot be shorter than pmaps dims
            max_sigma = (np.array(pmaps.shape) - 1) / 3
            ws_sigma = np.minimum(max_sigma,
                                  np.ones(max_sigma.ndim) * self.ws_sigma)
            pmaps = gaussianSmoothing(pmaps, ws_sigma)

        # Itk watershed + size filtering
        itk_pmaps = sitk.GetImageFromArray(pmaps)
        itk_segmentation = sitk.MorphologicalWatershed(itk_pmaps,
                                                       self.ws_threshold,
                                                       markWatershedLine=False,
                                                       fullyConnected=False)
        itk_segmentation = sitk.RelabelComponent(itk_segmentation,
                                                 self.ws_minsize)

        return sitk.GetArrayFromImage(itk_segmentation).astype(np.uint16)
Example #4
0
def seeded_dt_ws(input_,
                 threshold, seeds, sigma_weights=2., min_size=100, alpha=.9, pixel_pitch=None):

    # threshold the input and compute distance transform
    thresholded = (input_ > threshold).astype('uint32')
    dt = vigra.filters.distanceTransform(thresholded, pixel_pitch=pixel_pitch)

    # normalize and invert distance transform
    dt = 1. - (dt - dt.min()) / dt.max()

    # compute weights from input and distance transform
    if sigma_weights > 0.:
        hmap = alpha * ff.gaussianSmoothing(input_, sigma_weights) + (1. - alpha) * dt
    else:
        hmap = alpha * input_ + (1. - alpha) * dt

    # compute watershed
    ws, max_id = watershed(hmap, seeds, size_filter=min_size)
    return ws, max_id
Example #5
0
def triangulation(pts,
                  downsampling=(1, 1, 1),
                  n_closings=0,
                  single_cc=False,
                  decimate_mesh=0,
                  gradient_direction='descent',
                  force_single_cc=False):
    """
    Calculates triangulation of point cloud or dense volume using marching cubes
    by building dense matrix (in case of a point cloud) and applying marching
    cubes.

    Parameters
    ----------
    pts : np.array
        [N, 3] or [N, M, O] (dtype: uint8, bool)
    downsampling : Tuple[int]
        Magnitude of downsampling, e.g. 1, 2, (..) which is applied to pts
        for each axis
    n_closings : int
        Number of closings applied before mesh generation
    single_cc : bool
        Returns mesh of biggest connected component only
    decimate_mesh : float
        Percentage of mesh size reduction, i.e. 0.1 will leave 90% of the
        vertices
    gradient_direction : str
        defines orientation of triangle indices. '?' is needed for KNOSSOS
         compatibility. TODO: check compatible index orientation, switched to `descent`, 23April2019
    force_single_cc : bool
        If True, performans dilations until only one foreground CC is present
        and then erodes with the same number to maintain size.

    Returns
    -------
    array, array, array
        indices [M, 3], vertices [N, 3], normals [N, 3]

    """
    if boundaryDistanceTransform is None:
        raise ImportError(
            '"boundaryDistanceTransform" could not be imported from VIGRA. '
            'Please install vigra, see SyConn documentation.')
    assert type(
        downsampling) == tuple, "Downsampling has to be of type 'tuple'"
    assert (pts.ndim == 2 and pts.shape[1] == 3) or pts.ndim == 3, \
        "Point cloud used for mesh generation has wrong shape."
    if pts.ndim == 2:
        if np.max(pts) <= 1:
            msg = "Currently this function only supports point " \
                  "clouds with coordinates >> 1."
            log_proc.error(msg)
            raise ValueError(msg)
        offset = np.min(pts, axis=0)
        pts -= offset
        pts = (pts / downsampling).astype(np.uint32)
        # add zero boundary around object
        margin = n_closings + 5
        pts += margin
        bb = np.max(pts, axis=0) + margin
        volume = np.zeros(bb, dtype=np.float32)
        volume[pts[:, 0], pts[:, 1], pts[:, 2]] = 1
    else:
        volume = pts
        if np.any(np.array(downsampling) != 1):
            ndimage.zoom(volume, downsampling, order=0)
        offset = np.array([0, 0, 0])
    if n_closings > 0:
        volume = binary_closing(volume,
                                iterations=n_closings).astype(np.float32)
        if force_single_cc:
            n_dilations = 0
            while True:
                labeled, nb_cc = ndimage.label(volume)
                # log_proc.debug('Forcing single CC, additional dilations {}, num'
                #                'ber connected components: {}'
                #                ''.format(n_dilations, nb_cc))
                if nb_cc == 1:  # does not count background
                    break
                # pad volume to maintain margin at boundary and correct offset
                volume = np.pad(volume, [(1, 1), (1, 1), (1, 1)],
                                mode='constant',
                                constant_values=0)
                offset -= 1
                volume = binary_dilation(volume,
                                         iterations=1).astype(np.float32)
                n_dilations += 1
    else:
        volume = volume.astype(np.float32)
    if single_cc:
        labeled, nb_cc = ndimage.label(volume)
        cnt = Counter(labeled[labeled != 0])
        l, occ = cnt.most_common(1)[0]
        volume = np.array(labeled == l, dtype=np.float32)
    # InterpixelBoundary, OuterBoundary, InnerBoundary
    dt = boundaryDistanceTransform(volume, boundary="InterpixelBoundary")
    dt[volume == 1] *= -1
    volume = gaussianSmoothing(dt, 1)
    if np.sum(volume < 0) == 0 or np.sum(volume > 0) == 0:  # less smoothing
        volume = gaussianSmoothing(dt, 0.5)
    try:
        verts, ind, norm, _ = measure.marching_cubes_lewiner(
            volume, 0, gradient_direction=gradient_direction)
    except Exception as e:
        raise ValueError(e)
    if pts.ndim == 2:  # account for [5, 5, 5] offset
        verts -= margin
    verts = np.array(verts) * downsampling + offset
    if decimate_mesh > 0:
        if not __vtk_avail__:
            msg = "vtki not installed. Please install vtki.'" \
                  "pip install vtki'."
            log_proc.error(msg)
            raise ImportError(msg)
        # log_proc.warning("'triangulation': Currently mesh-sparsification"
        #                  " may not preserve volume.")
        # add number of vertices in front of every face (required by vtki)
        ind = np.concatenate(
            [np.ones((len(ind), 1)).astype(np.int64) * 3, ind], axis=1)
        mesh = vtki.PolyData(verts, ind.flatten())
        mesh.decimate(decimate_mesh, volume_preservation=True)
        # remove face sizes again
        ind = mesh.faces.reshape((-1, 4))[:, 1:]
        verts = mesh.points
        mo = MeshObject("", ind, verts)
        # compute normals
        norm = mo.normals.reshape((-1, 3))
    return np.array(ind, dtype=np.int), verts, norm
Example #6
0
def _gaussian(image, sigma):
    max_sigma = (np.array(image.shape) - 1) / 3
    sigma = np.minimum(max_sigma, np.ones(max_sigma.ndim) * sigma)
    return gaussianSmoothing(image, sigma)
def get_segmentation(predict, pmin=0.5, minMemb=10, minSeg=10, sigMin=6, sigWeights=1, sigSmooth=0.1, cleanCloseSeeds=True,
                     returnSeedsOnly=False, edgeLengths=None,nodeFeatures=None, nodeSizes=None, nodeLabels=None, nodeNumStop=None,
                     beta=0, metric='l1', wardness=0.2, out=None):
    """ Get segmentation through watershed and agglomerative clustering
    :param predict: prediction map
    :return: segmentation map
    """
    #use watershed and save superpixels map
    super_pixels = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, returnSeedsOnly)
    # seeds = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, True)
    # save_h5(seeds, "/home/stamylew/delme/seeds.h5", "data")
    print
    print "#Nodes in superpixels", len(np.unique(super_pixels))
    # save_h5(super_pixels, "/home/stamylew/delme/super_pixels.h5", "data")

    #smooth prediction map
    probs = vf.gaussianSmoothing(predict, sigSmooth)
     # save_h5(probs, "/home/stamylew/delme/probs.h5", "data")

    #make grid graph
    grid_graph = vg.gridGraph(super_pixels.shape, False)

    grid_graph_edge_indicator = vg.edgeFeaturesFromImage(grid_graph, probs)
    #make region adjacency graph
    rag = vg.regionAdjacencyGraph(grid_graph, super_pixels)

    #accumulate edge features from grid graph node map
    edge_weights = rag.accumulateEdgeFeatures(grid_graph_edge_indicator)
    edge_weights_tag = "mean of the probabilities"

    #do agglomerative clustering

    def agglomerativeClustering_beststop_th05(graph, edgeWeights=None, edgeStoppers=None,
                                          edgeLengths=None, nodeFeatures=None, nodeSizes=None,
                                          nodeLabels=None, beta=0, metric=None,
                                          wardness=1.0, sameLabelMultiplier=1.0,  out=None):


        vg.ac(graph, edgeWeights=edgeWeights, edgeStoppers=edgeStoppers,
                                       edgeLengths=edgeLengths, nodeFeatures=nodeFeatures,
                                       nodeSizes=nodeSizes, nodeLabels=nodeLabels, nodeNumStop=0,
                                       beta=beta, wardness=wardness, sameLabelMultiplier=sameLabelMultiplier, out=out)

        f = open('/tmp/ac_bestNodenumstop.txt', 'r')
        nodeNumStop = f.readline()
        f.close()
        print "best nodeNumStop:", nodeNumStop

        # in experiments if n of the groundtruth is n the file contains n-1. probably in the hc.hxx the
        #  number is written to file too late. does this make any sense??

        nodeNumStop = int(nodeNumStop)
        nodeNumStop += 1

        ac_res, nodelabel_out_1 = vg.ac(graph, edgeWeights=edgeWeights, edgeStoppers=edgeStoppers,
                                            edgeLengths=edgeLengths, nodeFeatures=nodeFeatures,
                                            nodeSizes=nodeSizes, nodeLabels=nodeLabels, nodeNumStop=nodeNumStop,
                                            beta=beta, wardness=wardness, sameLabelMultiplier=sameLabelMultiplier,
                                            out=out)

        return ac_res, nodeNumStop, nodelabel_out_1

    labels, _, _ = agglomerativeClustering_beststop_th05(rag, edgeWeights=edge_weights, edgeStoppers=edge_weights,
                                          edgeLengths=edgeLengths, nodeFeatures=nodeFeatures, nodeSizes=nodeSizes,
                                          nodeLabels=nodeLabels, beta=beta, metric=metric,
                                          wardness=0.1, sameLabelMultiplier=1.0,  out=out)


    # labels = vg.agglomerativeClustering(rag, edge_weights, edgeLengths, nodeFeatures, nodeSizes,
    #         nodeLabels, nodeNumStop, beta, metric, wardness, out)

    #segmentation data
    wsDt_data = np.zeros((8,1))
    wsDt_data[:,0] = (pmin, minMemb, minSeg, sigMin, sigWeights, sigSmooth, cleanCloseSeeds, returnSeedsOnly)
    agglCl_data = edge_weights_tag, str(edgeLengths), str(nodeFeatures), str(nodeSizes), str(nodeLabels), str(nodeNumStop), str(beta), metric, str(wardness), str(out)

    #project labels back to data
    segmentation = rag.projectLabelsToBaseGraph(labels)
    print "#nodes in segmentation", len(np.unique(segmentation))
    # save_h5(segmentation, "/home/stamylew/delme/segmap.h5", "data", None)

    return segmentation, super_pixels, wsDt_data, agglCl_data