예제 #1
0
def hierarchical_merging_of_region_boundary_rags_example():
    #img = data.coffee()

    edges = sobel(skimage.color.rgb2gray(img))

    labels = slic(img, compactness=30, n_segments=400)
    g = graph.rag_boundary(labels, edges)

    graph.show_rag(labels, g, img)
    plt.title('Initial RAG')

    labels2 = graph.merge_hierarchical(labels,
                                       g,
                                       thresh=0.08,
                                       rag_copy=False,
                                       in_place_merge=True,
                                       merge_func=merge_boundary,
                                       weight_func=weight_boundary)

    graph.show_rag(labels, g, img)
    plt.title('RAG after hierarchical merging')

    plt.figure()
    out = skimage.color.label2rgb(labels2, img, kind='avg')
    plt.imshow(out)
    plt.title('Final segmentation')

    plt.show()
예제 #2
0
    def _quantify_2d(cls, tile, img_seg, nz, feature_calculators, **kwargs):
        feature_values = []
        for z in range(nz):
            # Calculate properties of masked+labeled cell components
            cell_props = measure.regionprops(img_seg[z][CytometerBase.CELL_MASK_CHANNEL], cache=False)
            nucleus_props = measure.regionprops(img_seg[z][CytometerBase.NUCLEUS_MASK_CHANNEL], cache=False)
            if len(cell_props) != len(nucleus_props):
                raise ValueError(
                    'Expecting cell and nucleus properties to have same length (nucleus props = {}, cell props = {})'
                    .format(len(nucleus_props), len(cell_props))
                )

            # Compute RAG for cells if necessary
            graph = None
            if kwargs.get('cell_graph'):
                labels = img_seg[z][CytometerBase.CELL_MASK_CHANNEL]

                # rag_boundary fails on all zero label matrices so default to empty graph if that is the case
                # see: https://github.com/scikit-image/scikit-image/blob/master/skimage/future/graph/rag.py#L386
                if np.count_nonzero(labels) > 0:
                    graph = label_graph.rag_boundary(labels, np.ones(labels.shape))
                else:
                    graph = label_graph.RAG()

            # Loop through each detected cell and compute features
            for i in range(len(cell_props)):
                props = ObjectProperties(cell=cell_props[i], nucleus=nucleus_props[i])

                # Run each feature calculator and add results in order
                feature_values.append([
                    v for fc in feature_calculators
                    for v in fc.get_feature_values(tile, img_seg, graph, props, z)
                ])

        return feature_values
예제 #3
0
def show_segmen_rag(array,
                    pathlibpath,
                    numSegments,
                    threshold,
                    cedges=177,
                    compactness=0.1,
                    sigma=5,
                    convert2lab=False):
    print('Obtaining superstructures')
    segments = slic(array,
                    compactness=compactness,
                    n_segments=numSegments,
                    sigma=sigma,
                    multichannel=False,
                    convert2lab=convert2lab)
    print('Number of SV: ', len(np.unique(segments)))
    segments += 1
    edges = filters.sobel(array)
    print('Obtaining RAG')
    rag = graph.rag_boundary(segments, edges, connectivity=1)
    segments2 = graph.merge_hierarchical(segments,
                                         rag,
                                         threshold=threshold,
                                         in_place_merge=True,
                                         rag_copy=False,
                                         merge_func=merge_boundary,
                                         weight_func=weight_boundary)
    print('Final Number of SV: ', len(np.unique(segments2)))

    graph.show_rag(segments, rag, array, edge_cmap='viridis')
    save_yorno(array, segments, pathlibpath, cedges)
예제 #4
0
def get_patches(img, compactness=30, n_segments=200, rag_thresh=0.08):
    """Get list of patches from image found with SLIC."""
    patches = []
    img_lab = color.rgb2lab(img)
    edges = filters.sobel(color.rgb2gray(img))
    labels = segmentation.slic(img_lab, convert2lab=False,
                               compactness=compactness, n_segments=n_segments)
    g = graph.rag_boundary(labels, edges)
    segmented = graph.merge_hierarchical(labels, g, thresh=rag_thresh,
                                         rag_copy=False,
                                         in_place_merge=True,
                                         merge_func=merge_boundary,
                                         weight_func=weight_boundary)

    idxs_sorted = np.argsort(segmented.reshape((segmented.size)))
    _, idxs_start = np.unique(
        segmented.reshape((segmented.size))[idxs_sorted],
        return_index=True)
    idxs_for_values = np.split(idxs_sorted, idxs_start[1:])
    for idxs in idxs_for_values:
        rows, cols = np.unravel_index(idxs, img.shape[:2])
        data = img[rows, cols]
        lab = np.mean(img_lab[rows, cols], axis=0)
        patches.append((rows, cols, data, lab))
    return patches
예제 #5
0
    def compute(self, image, n_region, mask=None):
        """
        Parameters
        ----------
        image: numpy ndarray
            image array to be represented as regions
        n_region : int
            the number of regions to be generated
        mask: mask image to give region of interest
        Returns
        -------
        regions : a list of regions
        """

        gray_image = image.copy()
        if mask is not None:
            gray_image[mask > 0] = 0

        # Convert the original image to the 0~255 gray image
        gray_image = (gray_image - gray_image.min()) / (gray_image.max() - gray_image.min())
        labels = segmentation.slic(gray_image.astype(np.float),
                                   n_segments=n_region,
                                   slic_zero=True, sigma=2,
                                   multichannel=False,
                                   enforce_connectivity=True)
        # edge_map = filters.sobel(gray_image)  # just for 2-D image
        edge_map = filters.laplace(gray_image)

        # Given an image's initial segmentation and its edge map this method constructs the
        # corresponding Region Adjacency Graph (RAG). Each node in the RAG represents a set of
        # pixels within the image with the same label in labels. The weight between two adjacent
        # regions is the average value in edge_map along their boundary.
        rag = graph.rag_boundary(labels, edge_map)

        regions = []
        n_labels = labels.max() + 1
        for r in np.arange(n_labels):
            # get vox_pos
            position = np.transpose(np.nonzero(labels == r))

            # get vox_feat
            vox_feat = np.zeros((position.shape[0], 1))
            n_D = position.shape[1]
            for i in range(position.shape[0]):
                if n_D == 2:
                    vox_feat[i][0] = image[position[i][0], position[i][1]]
                elif n_D == 3:
                    vox_feat[i][0] = image[position[i][0], position[i][1], position[i][2]]
                else:
                    raise RuntimeError("We just consider 2_D and 3_D images at present!")

            regions.append(Region(position, vox_feat=vox_feat, r_id=r))

        for r in range(n_labels):
            for r_key in rag.edge[r].keys():
                regions[r].add_neighbor(regions[r_key])

        self.regions = regions
        self.image_shape = image.shape
        return regions
예제 #6
0
def generate_neighbor_matrix(superpixel_map, sequence):
    '''
    This function is to convert the input superpixel image into a 
    matrix with shape (number_of_superpixel, number_of_superpixel).
    It's used to indicate what's the neighbor of a superpixel. 
    "1" --> These two superpixels are neighbour. 
    "0" --> These two superpixels are not neightbour. 
    
    Arg:
        superpixel_map: (Width, Height)
        sequence: Segmentation values that corresponding to labels except background. 
    
    Return:
        Adjacency matrix. 
    '''

    pseudo_edge_map = np.ones(np.shape(superpixel_map))
    rag = graph.rag_boundary(superpixel_map, pseudo_edge_map, connectivity=2)
    num_super = rag.number_of_nodes()
    neighbor_matrix = np.zeros((num_super, num_super))

    nodes_list = rag.nodes()
    for i in nodes_list:

        if i not in sequence:
            continue

        for neighbor in rag.neighbors_iter(i):
            neighbor_not_background = np.intersect1d(neighbor, sequence)
            neighbor_matrix[i, neighbor_not_background] = 1

    return neighbor_matrix
예제 #7
0
def get_groups(original):
    """
    Finds a segmentation of image by taking an oversegmentation produced by the Priority-Flood watershed and
    progressively reducing with a boundary region adjacency graph

    :param original: Original RGB image to segment
    :return: Segmented image. label = 0 represents an edge, label = -1 represents a pruned area
    """
    original = gaussian(original, sigma=1.5, multichannel=True)
    original = downscale_to(original, area_limit=2e5)
    g = original[:, :, 1]

    def weight_boundary(RAG, src, dst, n):
        default = {'weight': 0.0, 'count': 0}
        count_src = RAG[src].get(n, default)['count']
        count_dst = RAG[dst].get(n, default)['count']
        weight_src = RAG[src].get(n, default)['weight']
        weight_dst = RAG[dst].get(n, default)['weight']
        count = count_src + count_dst
        return {
            'count': count,
            'weight': (count_src * weight_src + count_dst * weight_dst) / count
        }

    greyscale = rgb2gray(original)
    gradient = np.hypot(sobel(greyscale, axis=0), sobel(greyscale, axis=1))
    segmentation1 = watershed(gradient, markers=400, mask=greyscale > 0.3)
    RAG = graph.rag_boundary(segmentation1, gradient)
    segmentation2 = graph.merge_hierarchical(segmentation1,
                                             RAG,
                                             thresh=5e-3,
                                             rag_copy=False,
                                             in_place_merge=True,
                                             merge_func=lambda *args: None,
                                             weight_func=weight_boundary)
    segmentation2[greyscale < 0.3] = -1
    segmentation2 = prune(segmentation2,
                          abs_threshold=g.size / 1000,
                          default=-1)
    counts, lo, hi = [], [], []
    for label in set(segmentation2[segmentation2 >= 0]):
        interior = distance_transform_edt(segmentation2 == label) >= 1.5
        if np.sum(interior) >= 0:
            counts.append(np.sum(interior))
            lo.append(np.percentile(gradient[interior], q=70))
            hi.append(np.percentile(gradient[interior], q=90))

    edges = canny(greyscale,
                  low_threshold=np.average(lo, weights=counts),
                  high_threshold=np.average(hi, weights=counts))
    edges = binary_dilation(edges, disk(2))
    edges = binary_closing(edges, disk(5))
    edges = remove_small_objects(edges, g.size / 1000)
    edges = edges[1:-1, 1:-1]
    edges = np.pad(edges, pad_width=1, mode='constant', constant_values=1)
    groups = im_label(edges, background=1, connectivity=1)
    groups = prune(groups, abs_threshold=g.size / 1000)
    groups[greyscale <
           0.15] = -2  # Ignore black areas due to mechanical vignetting
    return groups
예제 #8
0
 def boundaryRAG(cls, data, labels, sigma):
     edges = sobel(data)
     mat = np.copy(labels) + 1 if np.min(labels) == 0 else np.copy(labels)
     g = graph.rag_boundary(mat, edges, connectivity=2)
     props = regionprops(mat)
     labelsToIndex = cls.labelsToPropsIndex(props)
     cls.addData(g, data.shape, sigma, props, labelsToIndex)
     return g
예제 #9
0
 def process(self, data):
     with Timer('superpixelnode_2'):
         im_l, im_r, rgb, labels = data
         edges = filters.sobel(color.rgb2gray(rgb))
         g = graph.rag_boundary(labels, edges)
         labels2 = graph.merge_hierarchical(labels,
                                            g,
                                            thresh=0.02,
                                            rag_copy=False,
                                            in_place_merge=True,
                                            merge_func=merge_boundary,
                                            weight_func=weight_boundary)
     return im_l, im_r, labels2.astype(np.uint8)
예제 #10
0
 def get_segments(self, frame):
     rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
     labels = slic(rgb, self.n_segments)
     edges = filters.sobel(color.rgb2gray(rgb))
     g = graph.rag_boundary(labels, edges)
     labels2 = graph.merge_hierarchical(labels,
                                        g,
                                        thresh=0.02,
                                        rag_copy=False,
                                        in_place_merge=True,
                                        merge_func=merge_boundary,
                                        weight_func=weight_boundary)
     return labels2.astype(np.uint8)
예제 #11
0
def merge_hier_boundary(labels, image, thresh=0.03, show_rag=False):
    """
    Merges the given labels using a RAG based on boundaries.

    Parameters
    ----------
    labels: ndarray
    image: ndarray
    thresh: float
    show_rag: bool
    
    Returns
    -------
    rag: RAG
    labels: ndarray
        Merged labels.
    """
    edges = filters.sobel(color.rgb2gray(image))
    rag = graph.rag_boundary(labels, edges)
    rag_copy = False
    if show_rag:
        rag_copy = True
        fig, ax = plt.subplots(1, 2, figsize=(10, 10))

    labels = graph.merge_hierarchical(labels,
                                      rag,
                                      thresh=thresh,
                                      rag_copy=rag_copy,
                                      in_place_merge=True,
                                      merge_func=merge_boundary,
                                      weight_func=weight_boundary)
    if show_rag:
        graph.show_rag(labels, rag, image, ax=ax[0])
        ax[0].title('Initial RAG')
        graph.show_rag(labels, graph.rag_boundary(labels, edges), ax=ax[1])
        ax[1].title('Final RAG')

    return rag, labels
예제 #12
0
def region_boundry(image):
    
    #image = imageGlobal

    labels = segmentation.slic(image, compactness=30, n_segments=400)
    edges = filters.sobel(image)
    edges_rgb = color.gray2rgb(edges)

    g = graph.rag_boundary(labels, edges)
    lc = graph.show_rag(labels, g, edges_rgb, img_cmap=None, edge_cmap='viridis',
                        edge_width=1.2)

    plt.colorbar(lc, fraction=0.03)
    io.show()
예제 #13
0
    def _get_neighbors_from_label_image(self,
                                        suffix,
                                        iterations=1,
                                        save_neighbors=True,
                                        **kwargs):
        ''' 
        From the image_label, find all_neighbors
        Assumes that the labels on the image are the same as in the feature table.
        On the label image, 0 is bg
        objectNumbers start at 1

        '''

        labels = np.unique(
            self.feature_table[self._column_objectnumber].values)
        custom_image_label = self.image_label.copy()
        custom_image_label[~np.isin(custom_image_label, labels)] = 0

        if self._debug:
            print(
                "_get_neighbors_from_label_image\n#labels = {}; #objects = {}, starting label id = {}, iterations={}"
                .format(len(labels), self.n, min(labels), iterations))

        all_borders = (morphological_gradient(custom_image_label, size=3) > 0)
        label_rag = graph.rag_boundary(custom_image_label,
                                       all_borders.astype(float),
                                       connectivity=iterations)
        label_rag.remove_node(0)

        adj_mat = np.array(nx.adjacency_matrix(label_rag).todense())
        sum_neighbors = np.sum(adj_mat, axis=0)

        self.adjacency_matrix[iterations] = [adj_mat]

        if self._debug:
            print("_get_neighbors_from_label_image", suffix)
            print("_get_neighbors_from_label_image", len(sum_neighbors),
                  len(labels), self.n)

        self.feature_table.loc[
            self.feature_table[self._column_objectnumber].isin(labels),
            'NumberNeighbors_{}'.format(suffix)] = sum_neighbors

        if save_neighbors:
            index_for_series = self.feature_table.index[self.feature_table[
                self._column_objectnumber].isin(labels)]
            self.feature_table.loc[:, 'neighbors_{}'.format(
                suffix)] = self.feature_table[self._column_objectnumber].apply(
                    lambda v: label_rag.neighbors(v))
예제 #14
0
def test_rag_boundary():
    labels = np.zeros((16, 16), dtype='uint8')
    edge_map = np.zeros_like(labels, dtype=float)

    edge_map[8, :] = 0.5
    edge_map[:, 8] = 1.0

    labels[:8, :8] = 1
    labels[:8, 8:] = 2
    labels[8:, :8] = 3
    labels[8:, 8:] = 4

    g = graph.rag_boundary(labels, edge_map, connectivity=1)
    assert set(g.nodes()) == set([1, 2, 3, 4])
    assert set(g.edges()) == set([(1, 2), (1, 3), (2, 4), (3, 4)])
    assert g[1][3]['weight'] == 0.25
    assert g[2][4]['weight'] == 0.34375
    assert g[1][3]['count'] == 16
def test_rag_boundary():
    labels = np.zeros((16, 16), dtype='uint8')
    edge_map = np.zeros_like(labels, dtype=float)

    edge_map[8, :] = 0.5
    edge_map[:, 8] = 1.0

    labels[:8, :8] = 1
    labels[:8, 8:] = 2
    labels[8:, :8] = 3
    labels[8:, 8:] = 4

    g = graph.rag_boundary(labels, edge_map, connectivity=1)
    assert set(g.nodes()) == set([1, 2, 3, 4])
    assert set(g.edges()) == set([(1, 2), (1, 3), (2, 4), (3, 4)])
    assert g[1][3]['weight'] == 0.25
    assert g[2][4]['weight'] == 0.34375
    assert g[1][3]['count'] == 16
예제 #16
0
def get_rag(gray_image, labels):
    """
    Get the region adjacency graph using the labeled image and corresponding the edge map generated by
    greyscale image with sobel filter

    :param gray_image: The greyscale image corresponding to the labeled image
    :type gray_image: numpy.ndarray
    :param labels: a labeled segmented image, where the pixels in the image are labeled with index of the \
    segmented regions.
    :type labels: numpy.ndarray
    :return: The region adjacency graph in dictionary \
    see https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_rag_boundary.html for more \
    references
    :rtype: skimage.future.graph.RAG
    """
    # Get rag of the labeled image
    edge_map = sobel(gray_image)
    rag = graph.rag_boundary(labels, edge_map)
    return rag
예제 #17
0
    def get_segments(self, frame):

        if not self.initialized:
            self.rgb = np.empty_like(frame)
            self.lab = np.empty_like(frame)
            self.initialized = True

        #with Timer('super-labels1'):
        cv2.cvtColor(frame, cv2.COLOR_BGR2RGB, dst=self.rgb)
        labels = slic(self.rgb, self.n_segments)
        #with Timer('super-labels2'):
        edges = filters.sobel(color.rgb2gray(self.rgb))
        g = graph.rag_boundary(labels, edges)
        labels2 = graph.merge_hierarchical(labels,
                                           g,
                                           thresh=0.02,
                                           rag_copy=False,
                                           in_place_merge=True,
                                           merge_func=merge_boundary,
                                           weight_func=weight_boundary)
        return labels2.astype(np.uint8)
예제 #18
0
파일: merge.py 프로젝트: qq2898/rio-segment
def rag_merge_threshold(edges, labels, threshold, size_pen):
    '''
    Merge adjacent segments using region adjacency graph based on strength of
    edge between them.
    '''

    # create region adjacency graph using the edge info to produce
    # edge weights between the nodes.
    click.echo('creating Region Adjacency Graph')
    rag = graph.rag_boundary(labels, edges)

    # calculate pixel counts for each node
    lab, counts = np.unique(labels.ravel(), return_counts=True)
    click.echo('starting with {} segments'.format(lab.max()))
    counts = (counts / counts.max()) * size_pen
    for i, n in enumerate(lab):
        rag.node[n].update({'pixels': counts[i]})

    # update initial edge weights to weight by mean size of nodes
    for n1, n2 in rag.edges_iter():
        total_pix = rag.node[n1]['pixels'] + rag.node[n2]['pixels']
        rag.edge[n1][n2]['weight'] += total_pix
        rag.edge[n2][n1]['weight'] += total_pix

    # calculate a value from the threshold percentile of edge weights.
    edge_weights = [x[2]['weight'] for x in rag.edges(data=True)]
    t = np.percentile(edge_weights, threshold)

    # merge adjacent labels iteratively if their edge weight is below the
    # required threshold.
    click.echo('merging segments with edge weights below {} percentile'.format(
            threshold))
    refined_labels = graph.merge_hierarchical(
            labels, rag, t, rag_copy=True, in_place_merge=True,
            merge_func=merge_nodes, weight_func=update_edge_weights)
    refined_labels, *_ = segmentation.relabel_sequential(refined_labels + 1,
                                                         offset=1)
    click.echo('merged into {} segments'.format(refined_labels.max()))
    return refined_labels
def region_boundry(img):
    gimg = color.rgb2gray(img)

    fig, ax = plt.subplots(nrows=1)

    labels = segmentation.slic(img, compactness=30, n_segments=400)
    edges = filters.sobel(gimg)
    edges_rgb = color.gray2rgb(edges)

    g = graph.rag_boundary(labels, edges)
    lc = graph.show_rag(labels,
                        g,
                        edges_rgb,
                        img_cmap=None,
                        edge_cmap='viridis',
                        edge_width=1.2)

    ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    ax.set_title("Original Image")

    plt.colorbar(lc, fraction=0.03)

    plt.tight_layout()
    plt.show()
예제 #20
0
 def _ncutb_seg(self, data_list):
     """
     This function use bounday instead of color to formulate the RAG
     Use the ncut method to segment the image
     [image, slic_label/[slic_param], [ncut_param]]
     """
     img = data_list[0]
     param = data_list[1]
     param_cut = data_list[2]
     if param_cut is None:
         threahold = 0.2
     else:
         threahold = param_cut[0]
     # Check if the param is the super pixel label or the num of super pixel
     # to be segmented
     try:
         num = int(param[0])
         # super pixel seg
         label1 = segmentation.slic(img,
                                    compactness=10,
                                    n_segments=num,
                                    max_iter=100,
                                    slic_zero=True)
     except:
         label1 = param
     # N-Cut
     # Edge detection
     edge = filters.sobel(color.rgb2gray(img))
     # Smooth the edge map
     edge = filters.gaussian(edge, 1)
     edge = filters.gaussian(edge, 1)
     # Reverse the energy map
     ne = edge.max() - edge
     rag = graph.rag_boundary(label1, ne)
     label2 = graph.cut_normalized(label1, rag, thresh=threahold)
     return label2
def make_network(mask, draw=False, save_loc=None):
    orig, image_labels, edge_map = get_img_labels_edge_map(mask)
    g = graph.rag_boundary(image_labels, edge_map)
    g.remove_node(0)
    if draw:
        fig, ax = plt.subplots(2, 2, figsize=(15, 15), dpi=200)
        ax = ax.ravel()
        ax[0].imshow(orig, cmap='gray')
        ax[2].imshow(label2rgb(image_labels, image=orig))
        ax[1].imshow(edge_map)
        lc = graph.show_rag(image_labels,
                            g,
                            edge_map,
                            ax=ax[3],
                            edge_width=5,
                            edge_cmap='Blues')

        fig.colorbar(lc, fraction=0.03, ax=ax[3])
        pos = {}
        for idx in list(g.nodes):
            pos[idx] = (np.array(g.nodes[idx]['centroid'])[::-1])
        nx.draw(g, pos, ax=ax[3])
        for a in ax:
            a.grid('off')
        fig.tight_layout()

        if save_loc is not None:
            fig.savefig(save_loc)
    else:
        # because if we don't draw then these features aren't added to the graph
        props = regionprops(image_labels)
        for (n, data), region, idx in zip(g.nodes(data=True), props,
                                          range(len(props))):
            data['centroid'] = tuple(map(int, region['centroid']))
            data['uid'] = idx
    return g
예제 #22
0
elif OVER_SEG == "quick":
    segments = segmentation.quickshift(gray2rgb(img),
                                       kernel_size=3,
                                       max_dist=6,
                                       ratio=0.5)
elif OVER_SEG == "water":
    gradient = sobel(rgb2gray(img))
    segments = segmentation.watershed(gradient, markers=400, compactness=0.001)
    segments -= 1
else:
    raise ValueError(OVER_SEG)

# Region Adjacency Graph
if ALGO == "hier":
    edges = filters.sobel(color.rgb2gray(img))
    g = graph.rag_boundary(segments, edges)
    labels = graph.merge_hierarchical(segments,
                                      g,
                                      thresh=0.08,
                                      rag_copy=True,
                                      in_place_merge=True,
                                      merge_func=merge_boundary,
                                      weight_func=weight_boundary)
elif ALGO == "ncut":
    g = graph.rag_mean_color(img, segments, mode='similarity')
    labels = graph.cut_normalized(segments,
                                  g,
                                  thresh=0.0002,
                                  num_cuts=20,
                                  in_place=False)
elif ALGO == "thresh":
예제 #23
0
def img_to_nodes(img, mask):
    def weight_boundary(graph, src, dst, n):
        """
        Handle merging of nodes of a region boundary region adjacency graph.

        This function computes the `"weight"` and the count `"count"`
        attributes of the edge between `n` and the node formed after
        merging `src` and `dst`.


        Parameters
        ----------
        graph : RAG
            The graph under consideration.
        src, dst : int
            The vertices in `graph` to be merged.
        n : int
            A neighbor of `src` or `dst` or both.

        Returns
        -------
        data : dict
            A dictionary with the "weight" and "count" attributes to be
            assigned for the merged node.

        """
        default = {'weight': 0.0, 'count': 0}

        count_src = graph[src].get(n, default)['count']
        count_dst = graph[dst].get(n, default)['count']

        weight_src = graph[src].get(n, default)['weight']
        weight_dst = graph[dst].get(n, default)['weight']

        count = count_src + count_dst
        return {
            'count': count,
            'weight': (count_src * weight_src + count_dst * weight_dst) / count
        }

    def merge_boundary(graph, src, dst):
        """Call back called before merging 2 nodes.

        In this case we don't need to do any computation here.
        """
        pass

    def separate_regions(labels, graph):
        indices = {}
        for x in range(labels.shape[0]):
            for y in range(labels.shape[1]):
                id = labels[x][y]
                if id not in indices:
                    indices[id] = []
                indices[id].append((x, y))
        nodes = {}
        for key, value in indices.items():
            nodes[key] = Node(key, graph, np.array(value))
        for n in nodes.values():
            n.update_neighbours(nodes, labels.size)
        return nodes

    def relabel(labels):
        idx, counts = np.unique(labels, return_counts=True)
        idx = idx[counts.argsort()]
        idx = idx[::-1]
        ch = np.zeros_like(idx)
        ch[idx] = np.arange(idx.size)
        labels = ch[labels]
        return labels

    edges = edg(img)
    labels = me.label(mask)
    g = graph.rag_boundary(labels, edges)
    labels = graph.merge_hierarchical(labels,
                                      g,
                                      thresh=0.2,
                                      rag_copy=False,
                                      in_place_merge=True,
                                      merge_func=merge_boundary,
                                      weight_func=weight_boundary)
    labels = relabel(labels)
    labels = mp.dilation(labels)
    labels = mp.area_closing(labels, 10)
    labels = mp.erosion(labels)
    g = graph.rag_boundary(labels, edges)
    nodes = separate_regions(labels, g)

    return labels, nodes
예제 #24
0
파일: dist.py 프로젝트: jmorys/EC_rainbow

flat = np.nanargmin(voro_tensor, axis=0)

flat = np.ma.MaskedArray(flat, mask)

plt.figure(dpi=800)
plt.imshow(flat)
io.show()


edges = filters.sobel(flat)
edges_rgb = color.gray2rgb(edges)
from skimage.future import graph
plt.figure(dpi=800)
g = graph.rag_boundary(flat, edges, connectivity=1)
g.remove_node(0)

lc = graph.show_rag(flat, g, edges_rgb, img_cmap=None, edge_cmap='viridis',
                    edge_width=1.2)

plt.colorbar(lc, fraction=0.03)
plt.show()

for i in range(1, len(uni)):
    print([uni[x] for x in g.neighbors(i)])

# plt.figure(dpi=500)
# plt.scatter(cols[rpos, 0], cols[rpos,1])
# plt.grid(True)
# plt.show()
예제 #25
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = slim.get_or_create_global_step()
    ######################
    # Gnerate the tfRecorder data #
    ######################
    datareader,Volshape,rindex,spmap,labelOrg,imgOrg=EvalSample_Gnerate(FLAGS.dataset_dir, 'Glabel.nrrd')
    Patchsize=len(rindex)
    ######################
    # Select the dataset #
    ######################

    dataset = dataset_factory.get_dataset(
        FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir,file_pattern=FLAGS.file_name,Datasize=Patchsize)

    ####################
    # Select the model #
    ####################
    #num_classes=2

    with tf.Graph().as_default():
        network_fn = nets_factory.get_network_fn(
                FLAGS.model_name,
                num_classes=(dataset.num_classes - FLAGS.labels_offset),
                is_training=False)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################

    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [image, label] = provider.get(['image', 'label'])
    # label -= FLAGS.labels_offset

    #####################################
    # Select the preprocessing function #
    #####################################
    preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
                 preprocessing_name,
                 is_training=False)

    eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
    image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

    images, labels = tf.train.batch(
             [image, label],
             batch_size=FLAGS.batch_size,
             num_threads=FLAGS.num_preprocessing_threads,
             capacity=5 * FLAGS.batch_size)

     ###################
    # Define the model #
    ####################

    logits, end_points = network_fn(images)
    probabilities = tf.nn.softmax(logits)
    pred = tf.argmax(logits, dimension=1)
    # if FLAGS.moving_average_decay:
    #   variable_averages = tf.train.ExponentialMovingAverage(
    #       FLAGS.moving_average_decay, tf_global_step)
    #   variables_to_restore = variable_averages.variables_to_restore(
    #       slim.get_model_variables())
    #   variables_to_restore[tf_global_step.op.name] = tf_global_step
    # else:
    #   variables_to_restore = slim.get_variables_to_restore()

    # #predictions = tf.argmax(logits, 1)
    # labels = tf.squeeze(labels)
    #
    # # Define the metrics:
    # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
    #     'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
    #     'Recall@5': slim.metrics.streaming_recall_at_k(
    #         logits, labels, 5),
    # })
    #
    # # Print the summaries to screen.
    # summary_ops = []
    # for name, value in names_to_values.items():
    #   summary_name = 'eval/%s' % name
    #   op = tf.scalar_summary(summary_name, value, collections=[])
    #   op = tf.Print(op, [value], summary_name)
    #   tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
    #   summary_ops.append(op)
    # # TODO(sguada) use num_epochs=1
    # if FLAGS.max_num_batches:
    #   num_batches = FLAGS.max_num_batches
    # else:
    #   # This ensures that we make a single pass over all of the data.
    #   num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
            checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)
    init_fn = slim.assign_from_checkpoint_fn(
                os.path.join(checkpoint_path),
                slim.get_model_variables())# vriable name???

    #Volshape=(50,61,61)
    imgshape=spmap.shape
    segResult=np.zeros(imgshape)
    groundtruth=np.zeros(imgshape)
    segPromap=np.zeros(imgshape)
    segPromapEdge=np.zeros(imgshape)
    PreMap=[]
    labellist=[]
    seglist=[]
    conv1list=[]
    conv2list=[]
    imgorglist=[]
    fclist=[]
    with tf.Session() as sess:
            # Load weights
            init_fn(sess)
           # sess.run(images.initializer, feed_dict)
            # Start input enqueue threads.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            num_iter = int(math.ceil(dataset.num_samples/float(FLAGS.batch_size)))
            step = 0

            try:
                while step < num_iter and not coord.should_stop():
                    # Run evaluation steps or whatever
                    segmentation,log,pmap,labelmap,imgpre,conv1,conv2,fc3= sess.run([pred,logits,probabilities,labels,images,end_points['conv1'],end_points['conv3'],end_points['fc3']])
                    step+=1
                    PreMap.extend(pmap)
                    conv1list.extend(conv1)
                    conv2list.extend(conv2)
                    fclist.extend(fc3)
                    imgorglist.extend(imgpre)
                    seglist.append(segmentation)
                    labellist.append(labelmap)
                    print('The No. %d/%d caculation'%(step,num_iter))
                    #Miaimshow.subplots(Pro_imgs, num=step+2, cols=8)
            except tf.errors.OutOfRangeError:
                print('Done evalutaion -- epoch limit reached')
            finally:
                # When done, ask the threads to stop.
                coord.request_stop()
            PreMap = np.array(PreMap)
            np.save(os.path.join(FLAGS.dataset_dir,'liverspProbmap.npy'), PreMap)
            # PreMap=np.squeeze(PreMap,axis=1)

           # PreMap_flat = PreMap.ravel()
           # PreMap_flat=np.divide((PreMap_flat - np.amin(PreMap_flat)) * 255, (np.amax(PreMap_flat) - np.amin(PreMap_flat)))
            m = 0
            for i in range(len(rindex)):
                segResult[spmap==rindex[i]]=np.array(seglist).ravel()[i]
                segPromap[spmap==rindex[i]]=PreMap[i,1]
                segPromapEdge[spmap==rindex[i]]=PreMap[i,2]
                groundtruth[spmap==rindex[i]]=np.array(labellist).ravel()[i]

            coord.join(threads)

            fig,ax= plt.subplots(nrows=2,ncols=3)

            from skimage.segmentation import mark_boundaries
            ax[0,0].set_title('Segmentation with superpixel map')
            ax[0,0].imshow(mark_boundaries(segResult, spmap))
            ax[0,1].set_title('Segmentation with ground truth map')
            ax[0,1].imshow(segResult)
            ax[0,1].imshow(labelOrg,alpha=0.5,cmap='jet')
            ax[0,2].set_title('Reading label')
            ax[0,2].imshow(groundtruth)

            ax[1,0].set_title('liver Probabilities map')
            ax[1,0].imshow(segPromap)
            ax[1,1].set_title('edge Probabilities map')
            ax[1,1].imshow(segPromapEdge)
            ax[1,2].set_title('liver +edge Probabilities map')
            ax[1,2].imshow(segPromapEdge+segPromap)

            segthpro=segPromapEdge+segPromap
            segthpro[segthpro<0.8]=0

    from skimage.segmentation import active_contour
    from skimage.measure import find_contours
    from skimage.filters import gaussian
    from skimage import morphology

    #edg=sobel(segResult.astype(int))
    segmorp=morphology.remove_small_objects(segthpro.astype(bool),5000)
    segopen=morphology.opening(segmorp,morphology.disk(3))
    segclose=morphology.closing(segopen,morphology.disk(15))
    fig,ax=plt.subplots(1,3)
    ax=ax.ravel()
    ax[0].imshow(segmorp)
    ax[0].set_title('Removed the small objects')
    ax[1].imshow(segopen)
    ax[1].set_title('After open operation')
    ax[2].imshow(segclose)
    ax[2].imshow(labelOrg,alpha=0.5,cmap='jet')
    ax[2].set_title('After close operation')
    plt.axis('off')
    from MiaUtils import Miametrics as metric
    mt=metric.MiaMetrics(logger)
    dsc=mt.DSCMetric(segclose,labelOrg.astype(bool))
    print('The dice similarity coefficient score is {}'.format(dsc))

    voe=mt.VOEMetric(segclose,labelOrg.astype(bool))
    print('The Volume overlap Error score is {}'.format(voe))

    rvd=mt.RVDMetric(segclose,labelOrg.astype(bool))
    print('The Relative voume difference score is {}'.format(rvd))

    from medpy.metric.binary import hd
    from medpy.metric.binary import asd
    from medpy.metric.binary import  obj_fpr
    from medpy.metric.binary import  obj_tpr
    Asd=asd(segclose,labelOrg.astype(bool))
    print('The Asd score is {}'.format(Asd))

    HD=hd(segclose,labelOrg.astype(bool))
    print('The Hausdorff Distance score is {}'.format(HD))
###************************************************************************
    ####superpixel-graph cuts mehtod computation
#######********************************************************************
    from skimage import segmentation, color,filters
    from skimage.future import graph
    img=DataNormalize(imgOrg)/255
    img=np.dstack((np.dstack((img, img)), img))
    labels1 = segmentation.slic(img, compactness=5, n_segments=2000,sigma=1)
    #labels1=spmap
    out1 = color.label2rgb(labels1, img, kind='avg')
    edge_map = filters.sobel(color.rgb2gray(img))
    g = graph.rag_boundary(labels1, edge_map)
    #g = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, g)
    out2 = color.label2rgb(labels2, img, kind='avg')

    fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
    ax[0].imshow(out1)
    ax[1].imshow(out2)
    for a in ax:
       a.axis('off')
    plt.tight_layout()
예제 #26
0
#array = np.transpose(array, (1, 0, 2))
print(array.shape)
print('Obtaining superstructures')
segments = slic(array,
                compactness=compactness,
                n_segments=numSegments,
                multichannel=False,
                convert2lab=convert2lab)
print('Number of SV: ', len(np.unique(segments)))
segments += 1
array = array.astype('int64')
mag = ndi.generic_gradient_magnitude(array, ndi.sobel, float)
mag *= 255.0 / np.max(mag)  # normalize (Q&D)

print('Obtaining RAG')
rag = graph.rag_boundary(segments, mag, connectivity=1)
print('Merging RAG´s segments')
segments2 = graph.merge_hierarchical(segments,
                                     rag,
                                     253,
                                     in_place_merge=True,
                                     rag_copy=False,
                                     merge_func=ji.merge_boundary,
                                     weight_func=ji.weight_boundary)
print('Final Number of SV: ', len(np.unique(segments2)))

# 110
#graph.show_rag(segments, rag, array, edge_cmap= 'viridis')
destino_guardar = Path(
    'C:/Users/Juan Ignacio/Documents/Movistar Cloud/TFM/Prueba_Feima_multiframe'
)
예제 #27
0
Construct a region boundary RAG with the ``rag_boundary`` function. The
function  :py:func:`skimage.future.graph.rag_boundary` takes an
``edge_map`` argument, which gives the significance of a feature (such as
edges) being present at each pixel. In a region boundary RAG, the edge weight
between two regions is the average value of the corresponding pixels in
``edge_map`` along their shared boundary.

"""
from skimage.future import graph
from skimage import data, segmentation, color, filters, io
from matplotlib import pyplot as plt

img = data.coffee()
gimg = color.rgb2gray(img)

labels = segmentation.slic(img, compactness=30, n_segments=400, start_label=1)
edges = filters.sobel(gimg)
edges_rgb = color.gray2rgb(edges)

g = graph.rag_boundary(labels, edges)
lc = graph.show_rag(labels,
                    g,
                    edges_rgb,
                    img_cmap=None,
                    edge_cmap='viridis',
                    edge_width=1.2)

plt.colorbar(lc, fraction=0.03)
io.show()
예제 #28
0
)
imagen1 = "prueba_slic_100_pequena.tif"
imagen2 = 'prueba_slic_100.tif'
ruta1 = directorio / imagen2

prueba1 = ji.read_tiff(ruta1)
numSegments = 2200000  ## 1/30 #19000 prueba2d #100000 slic100 peque
#superp = slic(prueba1, n_segments = numSegments,compactness= 0.1, multichannel= False, convert2lab= False)
superp += 1
print('Number of SV: ', len(np.unique(superp)))

prueba1 = prueba1.astype('int64')  # Precision
mag = ndi.generic_gradient_magnitude(prueba1, ndi.sobel,
                                     float)  # Float for rag_boundary
#mag *= 255.0 / np.max(mag)  # normalize (Q&D)
ragb = graph.rag_boundary(superp, mag, connectivity=1)
# CLUST "CORTAR"
umbralc = 1000
superp_co = graph.cut_threshold(superp, ragb, umbralc)
print('Intermediate Number of SV: ', len(np.unique(superp_co)))
superp_co += 1
# CLUST "UNIR"
umbralb = 3500
ragb2 = graph.rag_boundary(superp_co, mag, connectivity=1)
superp_un = graph.merge_hierarchical(superp_co,
                                     ragb2,
                                     umbralb,
                                     in_place_merge=True,
                                     rag_copy=False,
                                     merge_func=ji.merge_boundary,
                                     weight_func=ji.weight_boundary)
예제 #29
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr  7 10:13:38 2017

@author: luiz
"""

from skimage.future import graph
from skimage import data, segmentation, color, filters, io
from matplotlib import pyplot as plt


img = data.coffee()
gimg = color.rgb2gray(img)

labels = segmentation.slic(img, compactness=30, n_segments=400)
edges = filters.sobel(gimg)
edges_rgb = color.gray2rgb(edges)

g = graph.rag_boundary(labels, edges)
lc = graph.show_rag(labels, g, edges_rgb, img_cmap=None, edge_cmap='viridis',
                    edge_width=1.2)

plt.colorbar(lc, fraction=0.03)
io.show()
예제 #30
0
def process(path, N_SEGM, THRESH, ADAP):

    im = Image.open(path)  #'/home/olusiak/Obrazy/rois/41136_001.png-2.jpg')
    #ran=1

    #im_arr2 = np.fromstring(im.tobytes(), dtype=np.uint8)
    #im_arr2 = im_arr2.reshape((im.size[1], im.size[0], 3))
    #for x in range(len(im_arr2)):
    #    for y in range(len(im_arr2[0])):
    #        im_arr2[x][y][2]=0#=(im_arr2[0],255,255)
    #        im_arr2[x][y][1]=0
    #plt.imshow(im_arr2)
    #plt.show()

    im_arr = np.fromstring(im.tobytes(), dtype=np.uint8)
    im_arr = im_arr.reshape((im.size[1], im.size[0], 3))

    gray = cv2.cvtColor(im_arr, cv2.COLOR_BGR2GRAY)

    if ADAP:
        thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                       cv2.THRESH_BINARY_INV, 55, 20)

    else:
        data = dict()
        for i in gray.ravel():
            if data.has_key(i):
                data[i] += 1
            else:
                data[i] = 1
        # if data.has_key(255):
        max = 0
        id = None
        for k in data.keys():
            # print 'k ',k
            if data[k] >= max and k < 240 and k > 130:  # 50:#240:
                id = k
                max = data[k]
        # for k in data.keys():
        #    print 'k ',k,' ',data[k]
        # print 'key: ',id,' - ',max
        id -= THRESH  # 35
        ret, thresh = cv2.threshold(
            gray, id, 255, cv2.ADAPTIVE_THRESH_MEAN_C + cv2.THRESH_BINARY_INV
        )  #+cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+

    #plt.matshow(thresh,cmap='gray')
    #plt.show()

    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
    opening2 = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
    #plt.imshow(opening)
    #plt.show()
    for i in range(opening.shape[0]):
        for j in range(opening.shape[1]):
            if opening[i][j] == 0:
                im_arr[i][j] = (0, 0, 0)

    #plt.imshow(im_arr)
    #plt.show()
    #plt.matshow(opening)
    #plt.show()
    #plt.imshow(im_arr)
    #plt.show()

    #im = Image.fromarray(im_arr)
    #im.thumbnail((im.size[0] / ran, im.size[1] / ran), Image.ANTIALIAS)
    #im_arr = np.fromstring(im.tobytes(), dtype=np.uint8)
    #im_arr = im_arr.reshape((im.size[1], im.size[0], 3))

    #im = Image.fromarray(opening)
    #im.thumbnail((im.size[0] / ran, im.size[1] / ran), Image.ANTIALIAS)
    #opening = np.fromstring(im.tobytes(), dtype=np.uint8)
    #opening = opening.reshape((im.size[1], im.size[0]))

    img2 = im_arr

    edges = filters.sobel(color.rgb2gray(img2))
    labels = segmentation.slic(img2, compactness=30,
                               n_segments=N_SEGM)  #30 2000
    #g = graph.rag_mean_color(img2, labels)#added

    g = graph.rag_boundary(labels, edges)  #first

    #graph.show_rag(labels, g, img)
    #plt.title('Initial RAG')

    labels2 = graph.merge_hierarchical(
        labels,
        g,
        thresh=0.98,
        rag_copy=False,  # 0.08
        in_place_merge=True,
        merge_func=merge_boundary,
        weight_func=weight_boundary)

    #final_labels = graph.cut_threshold(labels, g, 29)#added
    #final_label_rgb = color.label2rgb(final_labels, img2, colors=cols, kind='overlay')#added
    #labels2=final_label_rgb#added
    #plt.imshow(final_label_rgb)
    #plt.show()
    #graph.show_rag(labels, g, im)
    #plt.title('RAG after hierarchical merging')

    #plt.figure()

    #ret, opening = cv2.threshold(opening,0,255,cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+

    #out = color.label2rgb(labels2, img2, kind='avg')
    s = set()
    for row in labels2:
        for e in row:
            s.add(e)
    #print 'sss ',len(s)
    cols = list()
    c = 0
    cp = len(s) + 5
    for r in range(0, 256, 1):
        for r2 in range(0, 256, 1):
            for r3 in range(0, 256, 1):
                cols.append((r, r2, r3))
                cp -= 1
                if cp == 0:
                    break

            if cp == 0:
                break
        if cp == 0:
            break
    # print 'cols', len(cols)

    shuffle(cols)

    img2 = np.zeros_like(img2)
    img2[:, :, 0] = opening2
    img2[:, :, 1] = opening2
    img2[:, :, 2] = opening2
    out = color.label2rgb(labels2, img2, colors=cols, kind='overlay', alpha=1)

    for i in range(img2.shape[0]):
        for j in range(img2.shape[1]):
            if img2[i][j][0] == 0 and img2[i][j][1] == 0 and img2[i][j][
                    2] == 0:  #,0,0]:
                out[i][j][0] = 0
                out[i][j][1] = 0
                out[i][j][2] = 0

    #print 'OUT'
    #plt.imshow(out)
    #plt.show()

    #plt.imshow(out)
    #plt.show()

    #xx = set()

    #for i in range(out.shape[0]):  # fx.shape[0]):
    #    for j in range(out.shape[1]):  # fx.shape[1]):


#            s = (out[i, j][0], out[i, j][1], out[i, j][2])

#            xx.add(s)

    out = np.uint8(out)
    im = Image.fromarray(out)

    #plt.imshow(out)
    #plt.show()
    return im
예제 #31
0
for l in files:
	s = "pic/Expressionism/"
	s += str(l)
	print(s)
	f.write(s + '	')
	img = io.imread(s)

	img = cv.GaussianBlur(img, (11,11), 0)
	labels = segmentation.slic(img, compactness=30, n_segments=150)
 	labels = labels + 1
 	regions = regionprops(labels)
 	edge_map = filters.sobel(color.rgb2gray(img))
 	label_rgb = color.label2rgb(labels, img, kind='avg')
 	label_rgb = segmentation.mark_boundaries(label_rgb, labels, (0, 1, 1))
 	edge_map = filters.sobel(color.rgb2gray(img))
 	rag = graph.rag_boundary(labels, edge_map)
 	for region in regions:
 		rag.node[region['label']]['centroid'] = region['centroid']
 		rag.node[region['label']]['color'] = label_rgb[region['centroid']]

 	red = np.asarray([1, 0, 0])
	green = np.asarray([0, 1, 0])	
	blue = np.asarray([0, 0, 1])
	yellow = np.asarray([1, 1, 0])
	purple = np.asarray([0.5, 0, 0.5])
	orange = np.asarray([1, 0.5, 0])
	palette = [red, green, blue, yellow, purple, orange]

	harmony = 0
	disharmony = 0
	tr = 0.4
예제 #32
0
def create_graph(floormap, return_dist=False, room_coordinates=False):
    """
    Segment the floormap into rooms and create a Region Adjacency Graph for the level.
    Many settings for decorating the graph are possible, by default the simplest form is returned.
    :param floormap: Binary image representing the level floor
    :param return_dist: If true, also returns the distance map of each point to the closest wall.
    :param room_coordinates: If true, each graph node will contain the room vertices and information about the walls
    :return: (Roommap, Graph) if return_dist is False, (Roommap, Graph, dist) otherwise
    """
    # Ensuring that floormap is always a boolean array
    floormap = floormap.astype(np.bool)
    #floormap = rescale(floormap, 2)
    dist = ndi.distance_transform_edt(floormap)
    threshold = int(dist.max())
    optimal_threshold = 0
    number_of_centers = 0
    # Finding room center and finding the optimal threshold (the one that maximizes the number of rooms)
    for i in range(int(dist.max()), int(dist.min()) - 1, -1):
        local_max = peak_local_max(dist,
                                   threshold_abs=threshold - i,
                                   indices=False,
                                   labels=floormap,
                                   min_distance=3)
        markers = ndi.label(local_max)[0]
        if markers.max() > number_of_centers:
            optimal_threshold = threshold - i
            number_of_centers = markers.max()

    # Computing roommap with the optimal threshold
    local_max = peak_local_max(dist,
                               min_distance=3,
                               indices=False,
                               labels=floormap,
                               threshold_abs=optimal_threshold)
    markers = ndi.label(local_max)[0]
    roommap = watershed(-dist, markers, mask=floormap)

    room_RAG_boundaries = skg.rag_boundary(
        roommap, filters.sobel(color.rgb2gray(roommap)))
    if room_coordinates:
        # For each floor...
        floors = label(floormap)
        for floor_id in range(max(1, floors.min()),
                              floors.max() +
                              1):  # Skipping label 0 (background)
            # Building the wall list for floor boundaries
            # Here the map is upsampled by a factor 2 before finding the contours, then coordinates are divided by two.
            # This is for avoiding "X" shaped connections between rooms due to how find_contours work
            floor_contour = find_contours(resize(
                floors == floor_id, (floors.shape[0] * 2, floors.shape[1] * 2),
                order=0),
                                          0.5,
                                          positive_orientation='low')[0] / 2
            walls_vertices = [tuple(v) for v in floor_contour]
            floor_boundaries = tuple(vertices_to_segment_list(walls_vertices))
            # Map of rooms belonging to current floor
            rooms = roommap * (floors == floor_id)
            for room_id in range(max(1, rooms.min()),
                                 rooms.max() +
                                 1):  # Skipping label 0 (background)
                if room_id not in rooms:
                    # Some room id may be in another floor, if they are enumerated horizontally
                    continue
                # Here the map is upsampled by a factor 2 before finding the contours, then coordinates are divided by two.
                # This is for avoiding "X" shaped connections between rooms due to how find_contours work
                room_contour = find_contours(resize(
                    rooms == room_id, (rooms.shape[0] * 2, rooms.shape[1] * 2),
                    order=0),
                                             0.5,
                                             fully_connected='high',
                                             positive_orientation='low')[0] / 2
                rooms_vertices = [tuple(v) for v in room_contour]
                room_boundaries = tuple(
                    vertices_to_segment_list(rooms_vertices))

                room_RAG_boundaries.node[room_id]['walls'] = list()
                for segment in room_boundaries:
                    leads_to = 0 if segment in floor_boundaries else None  # We cannot still know edges for other rooms but background
                    room_RAG_boundaries.node[room_id]['walls'].append(
                        (segment, leads_to))

            # Here we still miss the relation between boundary and edges.
            # Second pass
            for room_id in range(max(1, rooms.min()), rooms.max() + 1):
                if room_id not in rooms:
                    # Some room id may be in another floor, if they are enumerated horizontally
                    continue
                boundaries_current = {
                    wall
                    for wall in room_RAG_boundaries.node[room_id]['walls']
                    if wall[1] is None
                }
                for neigh in room_RAG_boundaries.adj[room_id]:
                    if neigh == 0:
                        continue
                    # Finding the neighbour boundaries. We must consider both directions for each vertex
                    boundaries_neigh = {
                        wall
                        for wall in room_RAG_boundaries.node[neigh]['walls']
                        if wall[1] is None
                    }
                    boundaries_neigh_reverse = {
                        _reverse_wall(wall)
                        for wall in room_RAG_boundaries.node[neigh]['walls']
                        if wall[1] is None
                    }

                    common_segments = boundaries_current.intersection(
                        boundaries_neigh)
                    common_segments_reversed = boundaries_current.intersection(
                        boundaries_neigh_reverse)
                    # Marking the boundary in the two nodes with the destination node
                    # Each node will contain the list
                    for cs in common_segments:
                        i_current = room_RAG_boundaries.node[room_id][
                            'walls'].index(cs)
                        i_neighbour = room_RAG_boundaries.node[neigh][
                            'walls'].index(cs)
                        room_RAG_boundaries.node[room_id]['walls'][
                            i_current] = (cs[0], neigh)
                        room_RAG_boundaries.node[neigh]['walls'][
                            i_neighbour] = (cs[0], room_id)
                    # Same thing in the case of reversed segments
                    for cs in common_segments_reversed:
                        rev_cs = _reverse_wall(cs)
                        i_current = room_RAG_boundaries.node[room_id][
                            'walls'].index(cs)
                        i_neighbour = room_RAG_boundaries.node[neigh][
                            'walls'].index(rev_cs)
                        room_RAG_boundaries.node[room_id]['walls'][
                            i_current] = (cs[0], neigh)
                        room_RAG_boundaries.node[neigh]['walls'][
                            i_neighbour] = (rev_cs[0], room_id)

    if return_dist:
        return roommap, room_RAG_boundaries, dist
    return roommap, room_RAG_boundaries
예제 #33
0
    def quantify(self, tile, img_seg, channel_names=None,
                 include_cell_intensity=True,
                 include_nucleus_intensity=False,
                 include_cell_graph=False,
                 spot_count_channels=None,
                 spot_count_params=None):
        ncyc, nz, _, nh, nw = tile.shape

        # Move cycles and channels to last axes (in that order)
        tile = np.moveaxis(tile, 0, -1)
        tile = np.moveaxis(tile, 1, -1)

        # Collapse tile to ZHWC (instead of cycles and channels being separate)
        tile = np.reshape(tile, (nz, nh, nw, -1))
        nch = tile.shape[-1]

        # Generate default channel names list if necessary
        if channel_names is None:
            channel_names = ['{:03d}'.format(i) for i in range(nch)]

        if nch != len(channel_names):
            raise ValueError(
                'Tile has {} channels but given channel name list has {} (they should be equal); '
                'channel names given = {}, tile shape = {}'
                .format(nch, len(channel_names), channel_names, tile.shape)
            )

        # Configure features to be calculated based on provided flags
        feature_calculators = [BasicCellFeatures()]
        if include_cell_intensity:
            feature_calculators.append(IntensityFeatures(nch, channel_names, COMP_CELL))
        if include_nucleus_intensity:
            feature_calculators.append(IntensityFeatures(nch, channel_names, COMP_NUCLEUS))
        if include_cell_graph:
            feature_calculators.append(GraphFeatures())
        if spot_count_channels is not None:
            indexes = [channel_names.index(c) for c in spot_count_channels]
            params = spot_count_params or {}
            feature_calculators.append(SpotFeatures(indexes, spot_count_channels, **params))

        # Compute list of resulting feature names (values will be added in this order)
        feature_names = [v for fc in feature_calculators for v in fc.get_feature_names()]

        feature_values = []
        for z in range(nz):
            # Calculate properties of masked+labeled cell components
            cell_props = measure.regionprops(img_seg[z][CELL_CHANNEL], cache=False)
            nucleus_props = measure.regionprops(img_seg[z][NUCLEUS_CHANNEL], cache=False)
            if len(cell_props) != len(nucleus_props):
                raise ValueError(
                    'Expecting cell and nucleus properties to have same length (nucleus props = {}, cell props = {})'
                    .format(len(nucleus_props), len(cell_props))
                )

            # Compute RAG for cells if necessary
            graph = None
            if include_cell_graph:
                labels = img_seg[z][CELL_CHANNEL]

                # rag_boundary fails on all zero label matrices so default to empty graph if that is the case
                # see: https://github.com/scikit-image/scikit-image/blob/master/skimage/future/graph/rag.py#L386
                if np.count_nonzero(labels) > 0:
                    graph = label_graph.rag_boundary(labels, np.ones(labels.shape))
                else:
                    graph = label_graph.RAG()

            # Loop through each detected cell and compute features
            for i in range(len(cell_props)):
                props = ObjectProperties(cell=cell_props[i], nucleus=nucleus_props[i])

                # Run each feature calculator and add results in order
                feature_values.append([
                    v for fc in feature_calculators
                    for v in fc.get_feature_values(tile, img_seg, graph, props, z)
                ])

        return pd.DataFrame(feature_values, columns=feature_names)
예제 #34
0
    if not in_place:
        self.remove_node(dst)

    return new

graph.RAG.merge_nodes = merge_nodes   


# <markdowncell>
# Now we can make a RAG that will be mergeable:



# <codecell>

g = graph.rag_boundary(ws, edges)   


# <markdowncell>
# g is now a *graph* in which each region is a node, and each node links to that
# regions neighbors. The edges have hold properties about the boundary between
# the corresponding region:



# <codecell>

plt.imshow(ws == 45)
print(g[45])