Exemplo n.º 1
0
    def calc_edge_features(self, feature_matrix=None):

        grid_graph = graphs.gridGraph(self._data.shape)

        if feature_matrix is None:
            edge_features = graphs.edgeFeaturesFromImage(grid_graph, self._data)
        else:
            edge_features = graphs.edgeFeaturesFromImage(grid_graph, feature_matrix)

        self._edge_features = self._rag.accumulateEdgeFeatures(edge_features)
Exemplo n.º 2
0
def get_segmentation(predict, pmin=0.5, minMemb=10, minSeg=10, sigMin=6, sigWeights=1, sigSmooth=0.1, cleanCloseSeeds=True,
                     returnSeedsOnly=False, edgeLengths=None,nodeFeatures=None, nodeSizes=None, nodeLabels=None,
                     nodeNumStop=None, beta=0, metric='l1', wardness=0.2, out=None):
    """ Get segmentation through watershed and agglomerative clustering
    :param predict: prediction map
    :return: segmentation map
    """
    #use watershed and save superpixels map
    super_pixels = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, returnSeedsOnly)
    # seeds = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, True)
    # save_h5(seeds, "/home/stamylew/delme/seeds.h5", "data")
    print
    print "#Nodes in superpixels", len(np.unique(super_pixels))
    # save_h5(super_pixels, "/home/stamylew/delme/super_pixels.h5", "data")

    #smooth prediction map
    probs = vf.gaussianSmoothing(predict, sigSmooth)
     # save_h5(probs, "/home/stamylew/delme/probs.h5", "data")

    #make grid graph
    grid_graph = vg.gridGraph(super_pixels.shape, False)

    grid_graph_edge_indicator = vg.edgeFeaturesFromImage(grid_graph, probs)
    #make region adjacency graph
    rag = vg.regionAdjacencyGraph(grid_graph, super_pixels)

    #accumulate edge features from grid graph node map
    edge_weights = rag.accumulateEdgeFeatures(grid_graph_edge_indicator)
    edge_weights_tag = "mean of the probabilities"

    #do agglomerative clustering

    labels = vg.agglomerativeClustering(rag, edge_weights, edgeLengths, nodeFeatures, nodeSizes,
            nodeLabels, nodeNumStop, beta, metric, wardness, out)

    #segmentation data
    wsDt_data = np.zeros((8,1))
    wsDt_data[:,0] = (pmin, minMemb, minSeg, sigMin, sigWeights, sigSmooth, cleanCloseSeeds, returnSeedsOnly)
    agglCl_data = edge_weights_tag, str(edgeLengths), str(nodeFeatures), str(nodeSizes), str(nodeLabels), str(nodeNumStop), \
                  str(beta), metric, str(wardness), str(out)

    #project labels back to data
    segmentation = rag.projectLabelsToBaseGraph(labels)
    print "#nodes in segmentation", len(np.unique(segmentation))
    # save_h5(segmentation, "/home/stamylew/delme/segmap.h5", "data", None)
    print "seg", np.unique(segmentation)
    return segmentation, super_pixels, wsDt_data, agglCl_data
Exemplo n.º 3
0
def shortest_paths(indicator, pairs, bounds=None, hfp=None):

    # Crate the grid graph and shortest path objects
    gridgr = graphs.gridGraph(indicator.shape)
    indicator = indicator.astype(np.float32)
    gridgr_edgeind = graphs.edgeFeaturesFromImage(gridgr, indicator)
    instance = graphs.ShortestPathPathDijkstra(gridgr)

    # Initialize paths image
    pathsim = np.zeros(indicator.shape)
    # Initialize list of path coordinates
    paths = []

    for pair in pairs:

        source = pair[0]
        target = pair[1]

        if hfp is not None:
            hfp.logging('Calculating path from {} to {}', source, target)

        targetNode = gridgr.coordinateToNode(target)
        sourceNode = gridgr.coordinateToNode(source)

        instance.run(gridgr_edgeind, sourceNode, target=targetNode)
        path = instance.path(pathType='coordinates')
        if path.any():
            # Do not forget to correct for the offset caused by cropping!
            if bounds is not None:
                paths.append(path + [bounds[0].start, bounds[1].start, bounds[2].start])
            else:
                paths.append(path)

        pathindices = np.swapaxes(path, 0, 1)
        pathsim[pathindices[0], pathindices[1], pathindices[2]] = 1

    return paths, pathsim
def find_shortest_path(ifp, penaltypower, bounds):

    # Modify distancetransform
    #
    # a) Invert: the lowest values (i.e. the lowest penalty for the shortest path detection) should be at the center of
    #    the current process
    ifp.invert_image(ids='curdisttransf')
    #
    # b) Set all values outside the process to infinity
    ifp.filter_values(ifp.amax('curdisttransf'),
                      type='eq',
                      setto=np.inf,
                      ids='curdisttransf',
                      targetids='curdisttransf_inf')
    #
    # c) Increase the value difference between pixels near the boundaries and pixels central within the processes
    #    This increases the likelihood of the paths to follow the center of processes, thus avoiding short-cuts
    ifp.power(penaltypower, ids='curdisttransf_inf')

    indicator = ifp.get_image('curdisttransf_inf')
    gridgr = graphs.gridGraph(ifp.shape('curlabel'))

    ifp.logging('gridgr.shape = {}'.format(gridgr.shape))

    indicator = indicator.astype(np.float32)
    gridgr_edgeind = graphs.edgeFeaturesFromImage(gridgr, indicator)
    instance = graphs.ShortestPathPathDijkstra(gridgr)

    # Get two local maxima
    indices = np.where(ifp.get_image('curlocmax') == 1)
    coords = zip(indices[0], indices[1], indices[2])
    ifp.logging('Local maxima coordinates: {}'.format(coords))

    ifp.set_data_dict({'pathsim': np.zeros(ifp.get_image('curlocmax').shape)},
                      append=True)

    ifp.logging('len(coords) = {}'.format(len(coords)))
    paths = []
    for i in xrange(0, len(coords) - 1):

        for j in xrange(i + 1, len(coords)):

            ifp.logging('---')
            ifp.logging('i = {0}; j = {1}'.format(i, j))

            source = coords[i]
            target = coords[j]

            targetNode = gridgr.coordinateToNode(target)
            sourceNode = gridgr.coordinateToNode(source)

            ifp.logging('Source = {}'.format(source))
            ifp.logging('Target = {}'.format(target))

            instance.run(gridgr_edgeind, sourceNode, target=targetNode)
            path = instance.path(pathType='coordinates')
            # Do not forget to correct for the offset caused by cropping!
            paths.append(path + [bounds[0][0], bounds[1][0], bounds[2][0]])

            # for coords in path:
            #     # ifp.logging('coords = {}'.format(coords))
            #     pass

            pathindices = np.swapaxes(path, 0, 1)
            ifp.get_image('pathsim')[pathindices[0], pathindices[1],
                                     pathindices[2]] = 1

    # ifp.concatenate('disttransf', 'paths', target='paths_over_dist')
    ifp.astype(np.uint8, ('pathsim', 'curlocmax'))
    # ifp.anytask(vigra.filters.multiBinaryDilation, ('paths', 'locmax'), 3)
    ifp.swapaxes(0, 2, ids=('pathsim', 'curlocmax', 'curdisttransf'))
    ifp.anytask(vigra.filters.discDilation, 2, ids=('pathsim', 'curlocmax'))
    ifp.set_data_dict(
        {
            'paths_over_dist':
            np.array([
                ifp.get_image('pathsim'),
                ifp.get_image('curlocmax'),
                ifp.get_image('curdisttransf')
            ])
        },
        append=True)

    return paths
def find_shortest_path(ifp):

    # Modify distancetransform
    #
    # a) Invert: the lowest values (i.e. the lowest penalty for the shortest path detection) should be at the center of
    #    the current process
    ifp.invert_image(ids='curdisttransf')
    #
    # b) Set all values outside the process to infinity
    ifp.filter_values(ifp.amax('curdisttransf'), type='eq', setto=np.inf, ids='curdisttransf', targetids='curdisttransf_inf')
    #
    # c) Increase the value difference between pixels near the boundaries and pixels central within the processes
    #    This increases the likelihood of the paths to follow the center of processes, thus avoiding short-cuts
    ifp.power(10, ids='curdisttransf_inf')

    indicator = ifp.get_image('curdisttransf_inf')
    gridgr = graphs.gridGraph(ifp.shape('curlabel'))

    ifp.logging('gridgr.shape = {}'.format(gridgr.shape))

    indicator = indicator.astype(np.float32)
    gridgr_edgeind = graphs.edgeFeaturesFromImage(gridgr, indicator)
    instance = graphs.ShortestPathPathDijkstra(gridgr)

    # Get two local maxima
    indices = np.where(ifp.get_image('locmax') == 1)
    coords = zip(indices[0], indices[1], indices[2])
    ifp.logging('Local maxima coordinates: {}'.format(coords))

    # ifp.deepcopy_entry('locmax', 'paths')
    ifp.set_data_dict({'paths': np.zeros(ifp.get_image('locmax').shape)}, append=True)

    ifp.logging('len(coords) = {}'.format(len(coords)))
    for i in xrange(0, len(coords)-1):

        for j in xrange(i+1, len(coords)):

            ifp.logging('---')
            ifp.logging('i = {0}; j = {1}'.format(i, j))

            source = coords[i]
            target = coords[j]

            targetNode = gridgr.coordinateToNode(target)
            sourceNode = gridgr.coordinateToNode(source)

            ifp.logging('Source = {}'.format(source))
            ifp.logging('Target = {}'.format(target))

            instance.run(gridgr_edgeind, sourceNode, target=targetNode)
            path = instance.path(pathType='coordinates')

            # for coords in path:
            #     # ifp.logging('coords = {}'.format(coords))
            #     pass

            pathindices = np.swapaxes(path, 0, 1)
            ifp.get_image('paths')[pathindices[0], pathindices[1], pathindices[2]] = 1

    # ifp.concatenate('disttransf', 'paths', target='paths_over_dist')
    ifp.astype(np.uint8, ('paths', 'locmax'))
    # ifp.anytask(vigra.filters.multiBinaryDilation, ('paths', 'locmax'), 3)
    ifp.swapaxes(0, 2, ids=('paths', 'locmax', 'curdisttransf'))
    ifp.anytask(vigra.filters.discDilation, 2, ids=('paths', 'locmax'))
    ifp.set_data_dict({'paths_over_dist': np.array([ifp.get_image('paths'), ifp.get_image('locmax'), ifp.get_image('curdisttransf')])}, append=True)
Exemplo n.º 6
0
def get_segmentation(predict, pmin=0.5, minMemb=10, minSeg=10, sigMin=6, sigWeights=1, sigSmooth=0.1, cleanCloseSeeds=True,
                     returnSeedsOnly=False, edgeLengths=None,nodeFeatures=None, nodeSizes=None, nodeLabels=None, nodeNumStop=None,
                     beta=0, metric='l1', wardness=0.2, out=None):
    """ Get segmentation through watershed and agglomerative clustering
    :param predict: prediction map
    :return: segmentation map
    """
    #use watershed and save superpixels map
    super_pixels = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, returnSeedsOnly)
    # seeds = wsDtSegmentation(predict, pmin, minMemb, minSeg, sigMin, sigWeights, cleanCloseSeeds, True)
    # save_h5(seeds, "/home/stamylew/delme/seeds.h5", "data")
    print
    print "#Nodes in superpixels", len(np.unique(super_pixels))
    # save_h5(super_pixels, "/home/stamylew/delme/super_pixels.h5", "data")

    #smooth prediction map
    probs = vf.gaussianSmoothing(predict, sigSmooth)
     # save_h5(probs, "/home/stamylew/delme/probs.h5", "data")

    #make grid graph
    grid_graph = vg.gridGraph(super_pixels.shape, False)

    grid_graph_edge_indicator = vg.edgeFeaturesFromImage(grid_graph, probs)
    #make region adjacency graph
    rag = vg.regionAdjacencyGraph(grid_graph, super_pixels)

    #accumulate edge features from grid graph node map
    edge_weights = rag.accumulateEdgeFeatures(grid_graph_edge_indicator)
    edge_weights_tag = "mean of the probabilities"

    #do agglomerative clustering

    def agglomerativeClustering_beststop_th05(graph, edgeWeights=None, edgeStoppers=None,
                                          edgeLengths=None, nodeFeatures=None, nodeSizes=None,
                                          nodeLabels=None, beta=0, metric=None,
                                          wardness=1.0, sameLabelMultiplier=1.0,  out=None):


        vg.ac(graph, edgeWeights=edgeWeights, edgeStoppers=edgeStoppers,
                                       edgeLengths=edgeLengths, nodeFeatures=nodeFeatures,
                                       nodeSizes=nodeSizes, nodeLabels=nodeLabels, nodeNumStop=0,
                                       beta=beta, wardness=wardness, sameLabelMultiplier=sameLabelMultiplier, out=out)

        f = open('/tmp/ac_bestNodenumstop.txt', 'r')
        nodeNumStop = f.readline()
        f.close()
        print "best nodeNumStop:", nodeNumStop

        # in experiments if n of the groundtruth is n the file contains n-1. probably in the hc.hxx the
        #  number is written to file too late. does this make any sense??

        nodeNumStop = int(nodeNumStop)
        nodeNumStop += 1

        ac_res, nodelabel_out_1 = vg.ac(graph, edgeWeights=edgeWeights, edgeStoppers=edgeStoppers,
                                            edgeLengths=edgeLengths, nodeFeatures=nodeFeatures,
                                            nodeSizes=nodeSizes, nodeLabels=nodeLabels, nodeNumStop=nodeNumStop,
                                            beta=beta, wardness=wardness, sameLabelMultiplier=sameLabelMultiplier,
                                            out=out)

        return ac_res, nodeNumStop, nodelabel_out_1

    labels, _, _ = agglomerativeClustering_beststop_th05(rag, edgeWeights=edge_weights, edgeStoppers=edge_weights,
                                          edgeLengths=edgeLengths, nodeFeatures=nodeFeatures, nodeSizes=nodeSizes,
                                          nodeLabels=nodeLabels, beta=beta, metric=metric,
                                          wardness=0.1, sameLabelMultiplier=1.0,  out=out)


    # labels = vg.agglomerativeClustering(rag, edge_weights, edgeLengths, nodeFeatures, nodeSizes,
    #         nodeLabels, nodeNumStop, beta, metric, wardness, out)

    #segmentation data
    wsDt_data = np.zeros((8,1))
    wsDt_data[:,0] = (pmin, minMemb, minSeg, sigMin, sigWeights, sigSmooth, cleanCloseSeeds, returnSeedsOnly)
    agglCl_data = edge_weights_tag, str(edgeLengths), str(nodeFeatures), str(nodeSizes), str(nodeLabels), str(nodeNumStop), str(beta), metric, str(wardness), str(out)

    #project labels back to data
    segmentation = rag.projectLabelsToBaseGraph(labels)
    print "#nodes in segmentation", len(np.unique(segmentation))
    # save_h5(segmentation, "/home/stamylew/delme/segmap.h5", "data", None)

    return segmentation, super_pixels, wsDt_data, agglCl_data
Exemplo n.º 7
0
def getFeatures(rag, img, imgId):

    featureNames = ['1-Feature']
    ############################## Filter ###################################################
    filters = []
    ### Gradient Magnitude ###
    imgLab = vigra.colors.transform_RGB2Lab(img)
    imgLabBig = vigra.resize(imgLab, [imgLab.shape[0]*2-1, imgLab.shape[1]*2-1])  ##### was ist der Vorteil hiervon? #####
    filters.append(vigra.filters.gaussianGradientMagnitude(imgLabBig, 1.))
    featureNames.append('GradMag1')
    filters.append(vigra.filters.gaussianGradientMagnitude(imgLabBig, 2.))
    featureNames.append('GradMag2')
    filters.append(vigra.filters.gaussianGradientMagnitude(imgLabBig, 5.))
    featureNames.append('GradMag5')

    
    ### Hessian of Gaussian Eigenvalues ###
    sigmahoG     = 2.0
    hoG = vigra.filters.hessianOfGaussianEigenvalues(rgb2gray(imgLab), sigmahoG)  
    filters.append(hoG[:,:,0])
    featureNames.append('HessGauss1')
    filters.append(hoG[:,:,1])
    featureNames.append('HessGauss2')

    
    ### Laplacian of Gaussian ###
    loG = vigra.filters.laplacianOfGaussian(imgLab)
    loG = loG[:,:,0]  # es gilt hier: loG[:,:,i] = loG[:,:,j], i,j = 1,2,3
    filters.append(loG)
    featureNames.append('LoG')

    ### Canny Filter ###
    scaleCanny = 2.0
    thresholdCanny = 2.0
    markerCanny = 1
    canny = vigra.VigraArray(vigra.analysis.cannyEdgeImage(rgb2gray(img), scaleCanny, 
                                                           thresholdCanny, markerCanny), 
                             dtype=np.float32)
    filters.append(canny)
    featureNames.append('Canny')

    
    ### Structure Tensor Eigenvalues ###
    strucTens = vigra.filters.structureTensorEigenvalues(imgLab, 1.5, 3.0)
    filters.append(strucTens[:,:,0])
    featureNames.append('StrucTensor1')
    filters.append(strucTens[:,:,1])
    featureNames.append('StrucTensor2')
    

    n4 = vigra.impex.readImage('images/edgeDetectors/n4/' + imgId + '.png')
    filters.append(n4)
    featureNames.append('N4')

    dollar = vigra.impex.readImage('images/edgeDetectors/dollar/' + imgId + '.png')
    filters.append(dollar)
    featureNames.append('Dollar')
    
        
        
    ##########################################################################################
    ############# Edge Weights Calculation #############
    edgeWeightsList = []
    featureSpace = np.ones((rag.edgeNum, 1))
    
    
    for i in range(len(filters)):
        gridGraphEdgeIndicator = graphs.edgeFeaturesFromImage(rag.baseGraph, filters[i]) 
        edgeWeights = rag.accumulateEdgeFeatures(gridGraphEdgeIndicator)
        #edgeWeights /= edgeWeights.max()
        edgeWeights = edgeWeights.reshape(edgeWeights.shape[0], 1)
        featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
                
          
    pos = np.where(np.array(featureNames)=='N4')[0][0]
    edgeWeights = featureSpace[:,pos] * rag.edgeLengths()
    edgeWeights /= edgeWeights.max()
    edgeWeights = edgeWeights.reshape(edgeWeights.shape[0], 1)    
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.append('N4_EdgeLengthWeighted')
                
    pos = np.where(np.array(featureNames)=='Dollar')[0][0]
    edgeWeights = featureSpace[:,pos] * rag.edgeLengths()
    edgeWeights /= edgeWeights.max()
    edgeWeights = edgeWeights.reshape(edgeWeights.shape[0], 1)    
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.append('Dollar_EdgeLengthWeighted')



    rgbDummy = np.array(n4)
    rgbDummy = rgbDummy.reshape(rgbDummy.shape[0], rgbDummy.shape[1], 1)
    edgeWeights = getEdgeWeightsFromNodesAround3(rag, rgbDummy, 1, variance=True, mean=True, meanRatio=True, medianRatio=True, skewness=True, kurtosis=True)
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.extend(('N4_Variance_1', 'N4_Mean_1', 'N4_MeanRatio_1', 'N4_MedianRatio_1', 'N4_Skewness_1', 'N4_Kurtosis_1'))
    
    edgeWeights = getEdgeWeightsFromNodesAround3(rag, rgbDummy, 3, variance=True, mean=True, meanRatio=True, medianRatio=True, skewness=True, kurtosis=True)
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.extend(('N4_Variance_3', 'N4_Mean_3', 'N4_MeanRatio_3', 'N4_MedianRatio_3', 'N4_Skewness_3', 'N4_Kurtosis_3'))


    rgbDummy = np.array(dollar)
    rgbDummy = rgbDummy.reshape(rgbDummy.shape[0], rgbDummy.shape[1], 1)
    edgeWeights = getEdgeWeightsFromNodesAround3(rag, rgbDummy, 1, variance=True, mean=True, meanRatio=True, medianRatio=True, skewness=True, kurtosis=True)
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.extend(('Dollar_Variance_1', 'Dollar_Mean_1', 'Dollar_MeanRatio_1', 'Dollar_MedianRatio_1', 'Dollar_Skewness_1', 'Dollar_Kurtosis_1'))
    
    edgeWeights = getEdgeWeightsFromNodesAround3(rag, rgbDummy, 3, variance=True, mean=True, meanRatio=True, medianRatio=True, skewness=True, kurtosis=True)
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.extend(('Dollar_Variance_3', 'Dollar_Mean_3', 'Dollar_MeanRatio_3', 'Dollar_MedianRatio_3', 'Dollar_Skewness_3', 'Dollar_Kurtosis_3'))


            
    edgeWeights = getEdgeWeightsFromNodesAround3(rag, imgLab, 1, variance=True, mean=True, meanRatio=True, medianRatio=True, skewness=True, kurtosis=True)
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.extend(('Variance_1_R', 'Variance_1_G', 'Variance_1_B',
                         'Mean_1_R', 'Mean_1_G', 'Mean_1_B', 
                         'MeanRatio_1_R', 'MeanRatio_1_G', 'MeanRatio_1_B',
                         'MedianRatio_1_R', 'MedianRatio_1_G', 'MedianRatio_1_B',
                         'Skewness_1_R', 'Skewness_1_G', 'Skewness_1_B', 
                         'Kurtosis_1_R', 'Kurtosis_1_G', 'Kurtosis_1_B'))
    edgeWeights = getEdgeWeightsFromNodesAround3(rag, imgLab, 3, variance=True, mean=True, meanRatio=True, medianRatio=True, skewness=True, kurtosis=True)
    featureSpace = np.concatenate((featureSpace, edgeWeights), axis=1)
    featureNames.extend(('Variance_3_R', 'Variance_3_G', 'Variance_3_B',
                         'Mean_3_R', 'Mean_3_G', 'Mean_3_B', 
                         'MeanRatio_3_R', 'MeanRatio_3_G', 'MeanRatio_3_B',
                         'MedianRatio_3_R', 'MedianRatio_3_G', 'MedianRatio_3_B',
                         'Skewness_3_R', 'Skewness_3_G', 'Skewness_3_B', 
                         'Kurtosis_3_R', 'Kurtosis_3_G', 'Kurtosis_3_B'))
    
    featureSpace = featureSpace.astype(np.float64)

    ### Normalize to [-1, 1]
    '''for edgeWeights in featureSpace.transpose():
        if (edgeWeights.min() < 0):
            edgeWeights -= edgeWeights.min()
        maximum = edgeWeights.max() 
        edgeWeights *= 2
        edgeWeights /= maximum
        edgeWeights -= 1
    '''

    ### Normalize to [0, 1]
    for edgeWeights in featureSpace.transpose():
        if (edgeWeights.min() < 0):
            edgeWeights -= edgeWeights.min()
        edgeWeights /= edgeWeights.max()
    
    return featureSpace, featureNames