Example #1
0
def test_rag_error():
    img = np.zeros((10, 10, 3), dtype='uint8')
    labels = np.zeros((10, 10), dtype='uint8')
    labels[:5, :] = 0
    labels[5:, :] = 1
    with testing.raises(ValueError):
        graph.rag_mean_color(img, labels, 2, 'non existent mode')
Example #2
0
def mergeSuperpixels(
        superpixels, rgb_frame, sat_frame, depth_frame,
        rgb_thresh=55, sat_thresh=0.20, depth_thresh=25):

    if rgb_thresh >= 0:
        rgb_rag = graph.rag_mean_color(rgb_frame, superpixels)
        sp_merged_rgb = graph.cut_threshold(superpixels, rgb_rag, rgb_thresh)
        sp_joined = sp_merged_rgb

    if sat_thresh >= 0:
        sat_rag = graph.rag_mean_color(sat_frame, superpixels)
        sp_merged_sat = graph.cut_threshold(superpixels, sat_rag, sat_thresh)
        if rgb_thresh >= 0:
            sp_joined = segmentation.join_segmentations(
                sp_joined, sp_merged_sat)

    if depth_thresh >= 0:
        depth_rag = graph.rag_mean_color(depth_frame, superpixels)
        sp_merged_depth = graph.cut_threshold(
            superpixels, depth_rag, depth_thresh)
        if sat_thresh >= 0:
            sp_joined = segmentation.join_segmentations(
                sp_joined, sp_merged_depth)

    return sp_joined
Example #3
0
def test_rag_error():
    img = np.zeros((10, 10, 3), dtype='uint8')
    labels = np.zeros((10, 10), dtype='uint8')
    labels[:5, :] = 0
    labels[5:, :] = 1
    with testing.raises(ValueError):
        graph.rag_mean_color(img, labels,
                             2, 'non existent mode')
Example #4
0
    def get_rag_labels_signal(self):
        """Main working horse.

        Calculates the RAG, its corresponding labels and
        graph signal.
        """

        # first we create an initial RAG based on the k-means
        # segmentation
        rag = graph.rag_mean_color(self.image_tensor, self.segmentation_labels)

        labels = graph.cut_threshold(self.segmentation_labels, rag, thresh=20)

        # create rag from these new labels
        rag = graph.rag_mean_color(self.image_tensor, labels)

        # add 1 so labels start at 1
        labels = labels + 1

        # We will add a few more node attributes
        properties = measure.regionprops(labels)

        # we will create the graph signal now as well
        # so we only have to iterate through all the regions once.
        X = []

        # properties now contains a dict of various things that
        # we can calculate for each region. It is indexed by label.
        for region in rag.nodes:
            idx = region

            # get corresponding properties
            props = properties[idx]

            # get centeroid and normalise it
            centroid_x, centroid_y = props.centroid
            centroid_x = centroid_x / self.width
            centroid_y = centroid_y / self.height

            # get orientation of region
            orientation = props.orientation

            # update node
            rag.nodes[idx]["centroid"] = [centroid_x, centroid_y]
            rag.nodes[idx]["orientation"] = orientation

            # turn all the node attributes into an ordered array
            # and append it as a row to the graph signal
            X.append(attributes2array(rag.nodes[idx]))

        # stack X rows to create one array
        X = np.stack(X)

        return (rag, labels, X)
def clustering_RAG(img, op="disc", test=False):

    to_plot = []

    img_red = img[:, :, 0]

    if test:
        to_plot.append(("red_chan", img))

    (ancho, alto) = img_red.shape

    rr, cc = ellipse(ancho / 2, alto / 2, (ancho / 2), (alto / 2))
    mask_background = np.zeros((ancho, alto)) > 0
    mask_background[rr, cc] = 1
    """ Clustering k-means type """
    if op == "disc":
        labels1 = segmentation.slic(img,
                                    mask=mask_background,
                                    n_segments=250,
                                    compactness=15,
                                    sigma=1,
                                    start_label=1)
        out1 = color.label2rgb(labels1, img)

        if test:
            to_plot.append(("Cluster1", out1))

        g = graph.rag_mean_color(img, labels1, mode='similarity')
        labels2 = graph.cut_normalized(labels1, g)

    if op == "cup":
        labels1 = segmentation.slic(img,
                                    mask=mask_background,
                                    n_segments=100,
                                    compactness=10,
                                    sigma=1,
                                    start_label=1)
        out1 = color.label2rgb(labels1, img)

        if test:
            to_plot.append(("Cluster2", out1))

        g = graph.rag_mean_color(img, labels1)
        g = graph.rag_mean_color(img, labels1, mode='similarity')
        labels2 = graph.cut_threshold(labels1, g, 500)

    out2 = color.label2rgb(labels2, img)

    if test:
        to_plot.append(("RAGs", out2))

    if test:
        vi.plot_multy(to_plot, 1, 3, 'K-means + RAGs')
Example #6
0
def generate_texture(img,
                     slic_n=200,
                     slic_compactness=25,
                     rag_from_binary=False,
                     edge_threshold=1e-4,
                     verbose=False):
    img = img_as_float(img)

    if rag_from_binary:
        if verbose:
            print('Applying initial thresholding...', flush=True)
        grey_img = color.rgb2grey(img)
        binary_mask = (grey_img > threshold_li(grey_img)) * 1.0

    if verbose:
        print('Constructing superpixels...', flush=True)
    segments = slic(img, n_segments=slic_n, compactness=slic_compactness)
    if verbose:
        out_avg = color.label2rgb(segments, img, kind='avg')
        io.imshow(out_avg)
        io.show()

    if verbose:
        print('Building RAG...', flush=True)
    if rag_from_binary:
        RAG = graph.rag_mean_color(binary_mask, segments, mode='similarity')
    else:
        RAG = graph.rag_mean_color(img, segments, mode='similarity')
    if verbose:
        graph.show_rag(segments, RAG, img, border_color=(1, 0.7, 0))
        io.show()

    foreground_mask = find_foreground(img,
                                      segments,
                                      RAG,
                                      eps=edge_threshold,
                                      verbose=verbose)

    if foreground_mask.any():
        vertex, side = fit_square(foreground_mask, verbose=verbose)
        texture_patch = img[vertex[0]:vertex[0] + side + 1,
                            vertex[1]:vertex[1] + side + 1]
        if verbose:
            io.imshow(texture_patch)
            io.show()
        return texture_patch
    else:
        print('Texture can\'t be generated, try different parameters')
        return np.array([])
 def graphNormalizedCuts(self,img, labels,thresh=0.5,num_cuts=100,plot=True,save=0):
      g = graph.rag_mean_color(img, labels)
      new_labels = graph.cut_normalized(labels, g,thresh,num_cuts)
      if plot:
          plotLibs().dispImg(color.label2rgb(new_labels, img, kind='avg'),save=save,title='Ncut Label rgb Image')
          plotLibs().plotBoundaries(img,new_labels,save=save,title='Ncut Boundary Images')
      return new_labels
Example #8
0
    def process(self):
        """initialise values for image processing"""
        data_list = []
        n_seg = 50
        compact = 1
        file_names = self.raw_file_names
        masks = [
            'MRI-selection-masks/' + file
            for file in listdir('MRI-selection-masks/')
        ]
        for scan, mask in zip(file_names, masks):
            img = Image.open(scan)
            msk = Image.open(mask)

            scan_array = np.array(img)
            mask_array = np.array(msk)

            scan_segments = 1 + segmentation.slic(
                scan_array, compactness=1, n_segments=100)
            g = graph.rag_mean_color(scan_array, scan_segments)
            """Initialise values for data model construction"""
            edges = []
            kp_value = []
            kp_pos = []
            mask = []
            """Create edges in the graph"""
            for start in g.adj._atlas:
                for stop in list(g.adj._atlas[start].keys()):
                    edges.append([start, stop])
            regions = regionprops(scan_segments,
                                  intensity_image=rgb2gray(scan_array))
            """Collect kp positions and values"""
            for props in regions:
                cy, cx = props.weighted_centroid
                if (isnan(cy)) or (isnan(cx)):
                    cy, cx = props.centroid
                kp_pos.append([cy, cx])
                kp_value.append(scan_array[int(round(cy)), int(round(cx))])
                mask_value = mask_array[int(round(cy)), int(round(cx))]
                if mask_value > 0:
                    mask.append(1)
                else:
                    mask.append(0)
            """Format values to fit into the data_model"""
            keypoint_pos = torch.tensor(kp_pos)
            keypoint_val = torch.tensor(kp_value, dtype=torch.float32)
            y = torch.tensor(mask, dtype=torch.long)
            train_mask, test_mask, val_mask = keypoint_function.generate_random_masks(
                len(mask))
            """Create data object from image data"""
            data = Data(x=keypoint_val,
                        edge_index=edges,
                        pos=keypoint_pos,
                        y=y,
                        train_mask=train_mask,
                        test_mask=test_mask,
                        val_mask=val_mask)
            data_list.append(data)
        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
Example #9
0
 def meanColorRAG(cls, data, labels, sigma):
     mat = np.copy(labels) + 1 if np.min(labels) == 0 else np.copy(labels)
     g = graph.rag_mean_color(data, mat, connectivity=2)
     props = regionprops(mat)
     labelsToIndex = cls.labelsToPropsIndex(props)
     cls.addData(g, data.shape, sigma, props, labelsToIndex)
     return g
Example #10
0
def create_annotation_by_changing_colors(path_to_image: str) -> str:
    """
    Creates an annotation by replacing each color of original image with closest (by euclidean distance) color from a list of annotation colors
    :param path_to_image: Path to original image
    :return: Path to created annotation
    """
    # Read image
    image = cv2.imread(path_to_image)
    # Perform image segmentation
    labels = sgm.slic(image, n_segments=400, compactness=30)
    # Use average color for segments
    segmented_image = clr.label2rgb(labels, image, kind="avg")
    # Now let's make difference even more smoother with cuttting by threshold
    labels = grh.cut_threshold(labels,
                               grh.rag_mean_color(segmented_image, labels),
                               thresh=29)
    # And apply new labels
    segmented_image = clr.label2rgb(labels, segmented_image, kind="avg")
    # Prepare K-means cluster (use 4 clusters since annotation should have 4 colors)
    model = cls.KMeans(n_clusters=4)
    pixels = segmented_image.reshape(image.shape[0] * image.shape[1], 3)
    # Cluster pixels of segmented image
    model.fit(pixels)
    # Create an annotation
    annotation = np.ravel(ANNOTATION_COLORS[model.labels_]).reshape(
        image.shape).astype(np.uint8)
    image_file_name = os.path.splitext(os.path.basename(path_to_image))[-2]
    path_to_annotation = "%s%s" % (os.path.join(os.path.dirname(path_to_image),
                                                "%s_sem" % image_file_name),
                                   os.path.splitext(
                                       os.path.basename(path_to_image))[-1])
    # Save annotation
    cv2.imwrite(path_to_annotation, annotation)
    # Return a path to an annotation file
    return path_to_annotation
def imagedesprictor(image):

    labels = segmentation.slic(image, compactness=30, n_segments=400)
    g = graph.rag_mean_color(image, labels)
    u1 = g.edges()
    a = 0

    for x, y in u1:
        u = g.get_edge_data(x, y)
        a = a + u['weight']

    max1 = max(y for x, y in u1)
    max2 = max(x for x, y in u1)
    max3 = max(max1, max2)
    b1 = np.zeros((max3 + 1))
    l = len(u1)

    for i in range(max3 + 1):
        b = 0
        Ai = g.edges(i)
        for x, y in Ai:
            u = g.get_edge_data(x, y)
            b = b + u['weight']
        b1[i] = b / a

    array1 = np.zeros((l, 2))
    i = 0
    for x, y in u1:
        array1[i, 0] = b1[x]
        array1[i, 1] = b1[y]
        i = i + 1

    return array1
Example #12
0
def RAG(im, rag_filename='colorNet.png'):
    #labels , node_labels , cluster_centers , _= mean_shift(im);
    labels, node_labels, cluster_centers, _ = k_means(im)
    #print node_labels
    g = graph.rag_mean_color(im, labels, mode='distance')
    display(g, 'Region Adjacency Graph', node_labels, cluster_centers,
            rag_filename)
Example #13
0
    def plotRagwithColorMaps(self, img, labels):
        g = graph.rag_mean_color(img, labels)
        fig, ax = plt.subplots(nrows=2,
                               sharex=True,
                               sharey=True,
                               figsize=(6, 8))

        ax[0].set_title('RAG drawn with default settings')
        lc = graph.show_rag(labels, g, img, ax=ax[0])
        # specify the fraction of the plot area that will be used to draw the colorbar
        fig.colorbar(lc, fraction=0.03, ax=ax[0])

        ax[1].set_title('RAG drawn with grayscale image and viridis colormap')
        lc = graph.show_rag(labels,
                            g,
                            img,
                            img_cmap='gray',
                            edge_cmap='viridis',
                            ax=ax[1])
        fig.colorbar(lc, fraction=0.03, ax=ax[1])

        for a in ax:
            a.axis('off')

        plt.tight_layout()
        plt.show()
Example #14
0
def probabilitygraph(rgb):
    #  print rgb.shape
    rgb = rgb[0]
    #  print bottom[0][:,:,0].shape
    #  rgb[0] = (rgb[0]-np.min(rgb[0]))/(np.max(rgb[0])-np.min(rgb[0]))
    #  rgb[1] = (rgb[1]-np.min(rgb[1]))/(np.max(rgb[1])-np.min(rgb[1]))
    #  rgb[2] = (rgb[2]-np.min(rgb[2]))/(np.max(rgb[2])-np.min(rgb[2]))
    #  bottom[0][:,:,0] = preprocessing.normalize(bottom[0][:,:,0])
    #  bottom[0][:,:,1] = preprocessing.normalize(bottom[0][:,:,1])
    #  bottom[0][:,:,2] = preprocessing.normalize(bottom[0][:,:,2])
    img = rgb #bottom[0].data
    #  print "img", img.shape
    #  print img
    img = np.transpose(img,(1,2,0))
    img = np.array(img,dtype=np.float)
    #  print img

    #  print img.shape
    labels1 = segmentation.slic(img, compactness=30, n_segments=400)
    #  print "labels1",labels1
    #  out1 = color.label2rgb(labels1, img, kind='avg')
    #  print labels1.shape
    g = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, g)
    out2 = color.label2rgb(labels2, img, kind='avg')
    #  print "out2,",out2
    return out2
Example #15
0
def superpixel(cv2_img,
               n_segments=1111,
               compactness=10,
               normalized_cut=False,
               debug=True):
    """
    parameters like 1111 and 10 are selected by my intuition
    :param cv2_img:  cv2.imread('../example_images/2007_000039.jpg')
    :param debug:    print debug info
    :return: labels: [h,w] numpy array, unique_ids: [0, 1, ..., superpixel_n - 1]
    """

    # labels = segmentation.slic(cv2_img, n_segments=1111, compactness=10)
    labels = segmentation.slic(cv2_img,
                               n_segments=n_segments,
                               compactness=compactness)

    if normalized_cut:
        g = graph.rag_mean_color(cv2_img, labels, mode='similarity')
        labels = graph.cut_normalized(labels, g)

    unique_ids = np.unique(labels)

    logger.debug('this image has {} unique superpixels'.format(
        len(unique_ids)))

    return labels, unique_ids
Example #16
0
def segFelzenszwalb(sub_templateg,
                    ragthreshold=35,
                    scale=100,
                    sigma=0,
                    min_size=3):

    #print('Segmenting...')
    #sub_templateg = border_remove(sub_templateg)

    #s = timer()
    segmentos = felzenszwalb(sub_templateg,
                             scale=scale,
                             sigma=sigma,
                             min_size=min_size)
    n_zeros = np.count_nonzero(segmentos)
    if n_zeros > 0:
        #print('Computing RAG...')
        rag = graph.rag_mean_color(sub_templateg, segmentos, mode='distance')
        new_labels = graph.cut_threshold(segmentos, rag, ragthreshold)
        segmentos = clear_border(new_labels)
    #e = timer()
    #print('Segmentation Done! ', round((e - s) / 60, 3), ' min')
    # print('tempo Felzenszwalb', len(np.unique(segmentos)))

    print('Felzemwalb Segmentation for giants')
    plt.figure('Felzemwalb Segmentation for giants', figsize=(10, 8))
    plt.imshow(mark_boundaries(sub_templateg, segmentos, color=(1, 0, 0)))
    plt.axis('off')
    plt.show()

    return segmentos
def superpixel_extraction(image_i):
    """
    Extracts regions from image and refines segmentation with graph cut
    :param image_i: input image of shape (N, M, 3)
    :return: label matrix of shape (N, M, 1)
    """
    image = image_i.copy()
    image = image[:, :, ::-1]
    edges, top_left, bottom_right = perform_canny_edge_detection(image=image)
    grabbed_object = perform_grabcut_segmentation(image=image, top_left_point=top_left,
                                                  bottom_right_point=bottom_right)

    frontal_face, frontal_face_bbox = find_frontal_face(image=image, classifier_path='data/face.xml')
    if frontal_face is None:
        frontal_face, frontal_face_bbox = find_frontal_face(image=image,
                                                            classifier_path='data/profile.xml')
    if frontal_face is not None:
        skin_mask, hair_mask = get_subtraction_masks_for_skin_and_hair(image=image, face=frontal_face)
        result = grabbed_object.copy()
        result[skin_mask == 255] = [255, 255, 255]
        result[hair_mask == 255] = [255, 255, 255]
        x, y, w, h = frontal_face_bbox
        top_left = list(top_left)
        top_left[1] += h
        result = perform_grabcut_segmentation(image=result.copy(), top_left_point=top_left,
                                              bottom_right_point=bottom_right, iterations=1)
    else:
        result = grabbed_object.copy()
    result = result[:, :, ::-1]

    labels = segmentation.slic(image=result, n_segments=200, convert2lab=True, max_iter=100, min_size_factor=0.01,
                               max_size_factor=3, compactness=100)
    # labels = segmentation.quickshift(image, ratio=1, sigma=0.8, max_dist=20, convert2lab=True, kernel_size=10)
    rag = graph.rag_mean_color(image=color.rgb2lab(result), labels=labels)
    return graph.cut_threshold(labels=labels, rag=rag, thresh=3.5)
Example #18
0
 def normalized_graphcut(self, s_labels):
     start_time = time.time()
     _graph = graph.rag_mean_color(self.q_cur_frame, s_labels, mode="similarity")
     labels = graph.cut_normalized(s_labels, _graph)
     # self.c_frame = color.label2rgb(labels,self.q_cur_frame, kind='avg')
     cv2.imwrite("oucut.png", self.s_frame)
     print "Graph N Cut(preprocess) : ", time.time() - start_time
Example #19
0
    def compute_embeddings(self):
        """
        Compute the RAG and embeddings from the initial presegmentation
        """

        # computing RAG
        self._RAG = graph.rag_mean_color(self._image_lab,
                                         self._presegmentation,
                                         connectivity=2,
                                         mode='similarity',
                                         sigma=self._sigma)

        # computing embeddings
        Gn2v = nv.Graph(self._RAG, False, 2, .5)
        Gn2v.preprocess_transition_probs()
        walks = Gn2v.simulate_walks(20, 20)
        walks = [list(map(str, walk)) for walk in walks]
        model = Word2Vec(walks,
                         size=16,
                         window=5,
                         min_count=0,
                         sg=1,
                         workers=4,
                         iter=1)

        representation = model.wv
        nodes = self._RAG.nodes()
        self._embeddings = [
            representation.get_vector(str(node)).tolist() for node in nodes
        ]
Example #20
0
    def _merge(self, thr_pixels=750, thr=0.65):
        """
        (Private) Procedure that merge while possible. First pixels, then similarity. 
        This is Algorithm 2 from GeSt: a new image segmentation technique based on graph embedding.
        
        :param thr_pixels:
            The threshold size for merging.
        :param thr:
            The threshold for merging. This value depends on the distance considered. 
        """

        if (self._segmentation_merged is None):
            self._segmentation_merged = copy(self._segmentation)
        # initial computation, will be maintained during algorithm
        self._merged_RAG = graph.rag_mean_color(self._image_lab,
                                                self._segmentation_merged,
                                                connectivity=2,
                                                mode='similarity',
                                                sigma=self._sigma)

        while (True):
            regions = measure.regionprops(self._segmentation_merged)
            merged = self._similarity_merge(regions, thr)
            if (merged):
                continue
            merged = self._pixels_merge(regions, thr_pixels)
            if (merged):
                continue
            break

        _, self._segmentation_merged = unique(self._segmentation_merged,
                                              return_inverse=1)
        self._segmentation_merged = (1 + self._segmentation_merged).reshape(
            self._presegmentation.shape)
def quantizeWithoutPos(im, img_base_name, output_dir):
    labels1 = run_k_means(im, compactness=30, n_segments=400)
    g = graph.rag_mean_color(im, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, g)

    img_name = os.path.splitext(img_base_name)[0]
    save_as_mat_file(labels2, img_name, output_dir)
Example #22
0
    def _ncut_seg(self, data_list):
        """
        Use the ncut method to segment the image
        [image, slic_label/[slic_param], [ncut_param]]
        """
        img = data_list[0]
        param = data_list[1]
        param_cut = data_list[2]
        if param_cut is None:
            threahold = 0.001
        else:
            threahold = param_cut[0]
        # Check if the param is the super pixel label or the num of super pixel
        # to be segmented
        try:
            num = int(param[0])
            # super pixel seg
            label1 = segmentation.slic(img,
                                       compactness=10,
                                       n_segments=num,
                                       slic_zero=True)
        except:
            label1 = param
        # N-Cut
        g = graph.rag_mean_color(img, label1, mode='similarity')
        try:
            label2 = graph.cut_normalized(label1, g, thresh=threahold)
        except:
            log.error(
                '\033[01;31mERROR\033[0m: Unknow Error in cut_normalized \
function.')
            label2 = np.zeros(label1.shape).astype('int')
        return label2
Example #23
0
 def preview(self, ips, para):
     lab = self.app.get_img(para['lab']).img
     connect = ['4-connected', '8-connected'].index(para['connect']) + 1
     g = graph.rag_mean_color(ips.snap, lab, connect, para['mode'],
                              para['sigma'])
     lab = graph.cut_normalized(lab, g, para['thresh'], para['num'])
     ips.img[:] = color.label2rgb(lab, ips.snap, kind='avg')
Example #24
0
def mejorar_pluma(array, n_segments=2000):
    """Function to improve the ash footprint shape with RAG filtering.

    Parameters
    --------------------------------------------------
    array :
       2D array image with nonzero values for all the pixels above 0.
    n_segments :
       Number of pixels for the RAG filtering. The value of 2000 is selected
       by empiricaly observation. More pixels during the rearrange values
    output
    --------------------------------------------------
    filtered array with the dilate-closed object.
    """
    # equalizar histograma para unicamente negativos
    arreq = muestra_banda(array).reshape(array.shape)
    arreq = arreq.astype(int)
    labels1 = segmentation.slic(arreq, compactness=30, n_segments=n_segments)
    out1 = color.label2rgb(labels1, arreq, kind='avg')
    gra = graph.rag_mean_color(arreq, labels1)
    labels2 = graph.cut_threshold(labels1, gra, 29)
    out2 = color.label2rgb(labels2, arreq, kind='avg')
    fig, axe = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(6, 8))
    axe[0].imshow(out1)
    ma = axe[1].imshow(out2)
    fig.colorbar(ma)
    for a in axe:
        a.axis('off')
    plt.tight_layout()
    fig.show()
    return fig, out1, out2
Example #25
0
def test_rag_hierarchical():
    img = np.zeros((8, 8, 3), dtype='uint8')
    labels = np.zeros((8, 8), dtype='uint8')

    img[:, :, :] = 31
    labels[:, :] = 1

    img[0:4, 0:4, :] = 10, 10, 10
    labels[0:4, 0:4] = 2

    img[4:, 0:4, :] = 20, 20, 20
    labels[4:, 0:4] = 3

    g = graph.rag_mean_color(img, labels)
    g2 = g.copy()
    thresh = 20  # more than 11*sqrt(3) but less than

    result = merge_hierarchical_mean_color(labels, g, thresh)
    assert(np.all(result[:, :4] == result[0, 0]))
    assert(np.all(result[:, 4:] == result[-1, -1]))

    result = merge_hierarchical_mean_color(labels, g2, thresh,
                                           in_place_merge=True)
    assert(np.all(result[:, :4] == result[0, 0]))
    assert(np.all(result[:, 4:] == result[-1, -1]))

    result = graph.cut_threshold(labels, g, thresh)
    assert np.all(result == result[0, 0])
Example #26
0
    def segment(self,
                compactness=50,
                n_segments=100,
                connectivity=1,
                sigma=500,
                num_cuts=200):
        """
        Function responable of image segmentation
        """
        if self.segments is None:
            # Make segmentation slice
            labels1 = segmentation.slic(self.image,
                                        compactness=compactness,
                                        n_segments=n_segments)

            # Convert slice into rgb image based on avg
            out1 = color.label2rgb(labels1, self.image, kind='avg')

            # Complete segmentation using graph cut
            g = graph.rag_mean_color(self.image,
                                     labels1,
                                     mode='similarity',
                                     connectivity=connectivity,
                                     sigma=sigma)
            labels = graph.cut_normalized(labels1, g, num_cuts=num_cuts)

            image_segmentation = color.label2rgb(labels,
                                                 self.image,
                                                 kind='avg')
            self._set_boxes(labels)
Example #27
0
    def set(image, phase):

        # get Cv image
        image = pol.getCV(baseimage, win=9)

        # get the labels through watershed
        labels = im_watershed(image)

        # Generate the RAG
        g = graph.rag_mean_color(image, labels)

        # set attributes of all nodes
        g = set_attr(baseimage, phase, labels, g0)

        # get neighbors
        #g.neighbors(3)

        #merge nodes i,j . optional specfy weight function
        #g.merge_nodes(4,5, weight_func=???)

        #to accecss properties of none n use g.[n]
        # to reset weight between neighbors i,j: g[i].get(j)['weight'] = K
        Freq = 2500  # Set Frequency To 2500 Hertz
        Dur = 1000  # Set Duration To 1000 == 1 second
        winsound.Beep(Freq, Dur)

        return g, labels
Example #28
0
    def merge_clusters(self, segmented_image, threshold=5):
        """Merge tiny superpixel clusters.
        Superpixel segmentations result in oversegmented images. Based on graph
        theoretic tools, similar clusters are merged.

        Parameters
        ----------
        segmented_image : ndarray
            Label image, output of a segmentation.
        threshold : float, optional
            Regions connected by edges with smaller weights are combined.

        Returns
        -------
        merged_superpixels : ndarray
            The new labelled array.
        """

        if self.__stored_graph is None:
            # Region Adjacency Graph (RAG) not yet determined -> compute it
            g = graph.rag_mean_color(self.original_image, segmented_image)
            self.__stored_graph = g
        else:
            g = self.__stored_graph
        merged_superpixels = graph.cut_threshold(segmented_image, g, threshold, in_place=False)
        if self.__interactive_mode:
            io.imshow(color.label2rgb(merged_superpixels, self.original_image, kind='avg'))
            io.show()
            print('Tiny clusters merged. '
                  'Number of segments: {0}'.format(np.amax(merged_superpixels)))
        return merged_superpixels
def test_rag_hierarchical():
    img = np.zeros((8, 8, 3), dtype='uint8')
    labels = np.zeros((8, 8), dtype='uint8')

    img[:, :, :] = 31
    labels[:, :] = 1

    img[0:4, 0:4, :] = 10, 10, 10
    labels[0:4, 0:4] = 2

    img[4:, 0:4, :] = 20, 20, 20
    labels[4:, 0:4] = 3

    g = graph.rag_mean_color(img, labels)
    g2 = g.copy()
    thresh = 20  # more than 11*sqrt(3) but less than

    result = merge_hierarchical_mean_color(labels, g, thresh)
    assert (np.all(result[:, :4] == result[0, 0]))
    assert (np.all(result[:, 4:] == result[-1, -1]))

    result = merge_hierarchical_mean_color(labels,
                                           g2,
                                           thresh,
                                           in_place_merge=True)
    assert (np.all(result[:, :4] == result[0, 0]))
    assert (np.all(result[:, 4:] == result[-1, -1]))

    result = graph.cut_threshold(labels, g, thresh)
    assert np.all(result == result[0, 0])
def before_method(img_path,
                  sp_met='felzenszwalb',
                  graph_met='syn_met',
                  admm_met='admm',
                  num_cuts=3,
                  dist_hist=False,
                  lambda_coff=False,
                  n_iter=1000):
    m_img = imread(img_path)

    # superpixels method
    if sp_met == 'felzenszwalb':
        segments = felzenszwalb(m_img, scale=10, sigma=0.5, min_size=100)
    elif sp_met == 'slic':
        segments = slic(m_img, compactness=30, n_segments=400)
    else:
        warnings.warn("Warning Message: no superpixels method parameter")

    # generate graph matrix
    if graph_met == 'lib_met':
        g = graph.rag_mean_color(m_img, segments)
        w = nx.to_scipy_sparse_matrix(g, format='csc')
        entries = w.sum(axis=0)
        d = sparse.dia_matrix((entries, 0), shape=w.shape).tocsc()
        m = w.shape[0]
        d2 = d.copy()
        d2.data = np.reciprocal(np.sqrt(d2.data, out=d2.data), out=d2.data)
        matrix = d2 * (d - w) * d2

        # matrix eigen-decomposition, scipy.sparse.linalg
        vals, vectors = scipy.sparse.linalg.eigsh(matrix,
                                                  which='SM',
                                                  k=min(100, m - 2))
        vals, vectors = np.real(vals), np.real(vectors)
        index1, index2, index3 = np.argsort(vals)[0], np.argsort(
            vals)[1], np.argsort(vals)[2]
        ev1, ev, ev3 = vectors[:, index1], vectors[:, index2], vectors[:,
                                                                       index3]

    elif graph_met == 'syn_met':
        ev = syn_graph_met(m_img,
                           segments,
                           lambda_coff=lambda_coff,
                           dist_hist=dist_hist)

    else:
        warnings.warn('Warning Message: graph_met argument missing')

    if admm_met == 'admm':
        sp_label = admm(n_vector=ev, n_iter=n_iter, num_cuts=num_cuts)

    elif admm_met == 'density_admm':
        sp_label = relation_density_admm(n_vector=ev, num_cuts=num_cuts)

    else:
        warnings.warn('Warning Message: admm_met argument missing')

    p_label, labels = pixels_label(m_img, segments, sp_label)
    return p_label
Example #31
0
 def preview(self, ips, para):
     print(para)
     lab = ImageManager.get(para['lab']).img
     connect = ['4-connected', '8-connected'].index(para['connect']) + 1
     g = graph.rag_mean_color(ips.snap, lab, connect, para['mode'],
                              para['sigma'])
     lab = graph.cut_threshold(lab, g, para['thresh'])
     ips.img[:] = color.label2rgb(lab, ips.snap, kind='avg')
Example #32
0
def graph_maker(image, labels):
    def _weight_mean_color(graph, src, dst, n):
        diff = graph.node[dst]['mean color'] - graph.node[n]['mean color']
        diff = np.linalg.norm(diff)
        return {'weight': diff}

    def merge_mean_color(graph, src, dst):
        graph.node[dst]['total color'] += graph.node[src]['total color']
        graph.node[dst]['pixel count'] += graph.node[src]['pixel count']
        graph.node[dst]['mean color'] = (graph.node[dst]['total color'] /
                                         graph.node[dst]['pixel count'])

    def translate_graph(nodes_data, edges_data):
        N = len(nodes_data)  # number of nodes
        a = {n[0]: i for i, n in enumerate(nodes_data)}
        g2 = rag.RAG()
        # generating the edges with the new numeration
        for e in edges_data:
            g2.add_edge(a[e[0]], a[e[1]], weight=e[2]['weight'])

        # reassinging the values to each node
        for n, values in nodes_data:
            n2 = a[n]
            for k, v in values.items():
                g2.node[n2][k] = v
        return g2

    g = graph.rag_mean_color(image, labels)

    offset = 1
    # create a map array
    map_array = np.arange(labels.max() + 1)
    for n, d in g.nodes(data=True):
        for label in d['labels']:
            map_array[label] = offset
        offset += 1

    # compute centroids to the nodes
    g_labels = map_array[labels]
    regions = regionprops(g_labels)
    for (n, data), region in zip(g.nodes(data=True), regions):
        data['centroid'] = region['centroid']
    if g.number_of_nodes() < 75:
        print('warning: number of nodes is less than 75, ',
              g.number_of_nodes())
    while g.number_of_nodes() > 75:
        indices = list(range(g.number_of_edges()))
        shuffle(indices)
        edges_data = list(g.edges(data=True))
        edges = [edges_data[index] for index in indices]
        edges = sorted(edges, key=lambda t: t[2].get('weight', 1))
        src, dst = edges[0][0], edges[0][1]
        merge_mean_color(g, src, dst)
        g.merge_nodes(src, dst, weight_func=_weight_mean_color)

    nodes_data = g.nodes(data=True)
    edges_data = list(g.edges(data=True))
    return translate_graph(nodes_data, edges_data)
Example #33
0
def getBinaryCostByWaggoner(nextOriginalImage,
                            lastHumanLabeledRGB,
                            lastLabeled,
                            type="intensityImage",
                            edgeOriginalImage=np.zeros((1, 1)),
                            neighborLength=9,
                            infiniteCost=100):
    """
    Function:Obtain the Binary Cost of this layer to be segmented with waggoner's method
        Input:nextOriginalImage:Last layer's original image
              lasthumanLabeledRGB:RGB image labeled by human of last layer's image
              lastLabeled: Labeled image of last layer's image
              type:The type of image need to handled("intensityImage" or "edgeImage")(optional,default "intensityImage")
              edgeOriginalImage:Border image, useful when type == "edgeImage"
              neighborLength: Neighbor boundary length, (optional, must be singular, default is 9)
        Ouput: binaryCost,m * 3(m presents the number of edges, the first column is the beginning of edge, the second column is the end of the edge, the third column is the edge weight)
    """

    # Convert the image to a grayscale image if it is a color image
    inputImageGray = np.zeros(nextOriginalImage.shape)
    if nextOriginalImage.ndim == 3:
        inputImageGray = cv.cvtColor(nextOriginalImage, cv.COLOR_BGR2GRAY)
    elif nextOriginalImage.ndim == 2:
        inputImageGray = nextOriginalImage

    # Initialize edges (m*3), each row corresponds to one edge,
    # the first two values are for the start and end of the edge, and the last corresponding the weight of edge
    inds = np.arange(inputImageGray.size).reshape(inputImageGray.shape)
    horz = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
    vert = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
    edges = np.zeros((horz.shape[0] + vert.shape[0], 3))
    edges[:, :-1] = np.vstack([horz, vert]).astype(np.int32)

    # Build area adjacency map
    rag = graph.rag_mean_color(lastHumanLabeledRGB, lastLabeled)
    for index in range(0, edges.shape[0]):
        startPoint = edges[index][0].astype(np.int32)
        endPoint = edges[index][1].astype(np.int32)
        startRowIndex, startColIndex = pointIndexToCoordinate(
            inputImageGray.shape[0], inputImageGray.shape[1], startPoint)
        endRowIndex, endColIndex = pointIndexToCoordinate(
            inputImageGray.shape[0], inputImageGray.shape[1], endPoint)

        if type == "intensityImage":
            edges[index][2] = gFunc(inputImageGray[startRowIndex,
                                                   startColIndex],
                                    inputImageGray[endRowIndex, endColIndex],
                                    imageType=type,
                                    m=1)
        elif type == "edgeImage":
            edges[index][2] = gFunc(edgeOriginalImage[startRowIndex,
                                                      startColIndex],
                                    edgeOriginalImage[endRowIndex,
                                                      endColIndex],
                                    imageType=type,
                                    m=1)

    return edges.astype(np.int32)
Example #34
0
def normalized_cut( img, ncut = 10 ):
	'''
	segment image by normalized_cut
	'''
	labels1 = segmentation.slic(img, convert2lab = True, compactness=40, n_segments=400)
#	out1 = color.label2rgb(labels1, img, kind='avg')

	g = graph.rag_mean_color(img, labels1, mode='similarity')
	labels2 = graph.cut_normalized(labels1, g, num_cuts=ncut)
#	out2 = color.label2rgb(labels2, img, kind='avg')

	return labels2
Example #35
0
def test_ncut_stable_subgraph():
    """ Test to catch an error thrown when subgraph has all equal edges. """

    img = np.zeros((100, 100, 3), dtype='uint8')

    labels = np.zeros((100, 100), dtype='uint8')
    labels[:50, :50] = 1
    labels[:50, 50:] = 2

    rag = graph.rag_mean_color(img, labels, mode='similarity')
    new_labels = graph.cut_normalized(labels, rag, in_place=False)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)

    assert new_labels.max() == 0
Example #36
0
    def get_idxs(self):

        cell_types, cell_ids = (self.data_fields['CellType'],
                                self.data_fields['CellId'])
        e_idx = []
        v_idx = []
        for mcs, cell_type, cell_id in zip(self.step_values,
                                           cell_types, cell_ids):
            rag = graph.rag_mean_color(cell_type, cell_id)
            direct = [(mcs, s, t) for s, t in rag.edges()]
            fliped = [(mcs, t, s) for s, t in rag.edges()]
            e_idx.extend(direct + fliped)
            v_idx.extend([(mcs, cell_id) for cell_id in rag.nodes()])

        self.e_idx = pd.MultiIndex.from_tuples(e_idx,
                                               names=['t', 'srce', 'trgt'])
        self.v_idx = pd.MultiIndex.from_tuples(v_idx,
                                               names=['t', 'cell'])
Example #37
0
def norm_grap_cut(image, max_edge=10000000, max_rec=4, compactness=2,
                  nrSupPix=2000):
    """Normalized graph cut wrapper for 2D numpy arrays.

    Parameters
    ----------
        image: np.ndarray (2D)
            Volume histogram.
        max_edge: float
            The maximum possible value of an edge in the RAG. This corresponds
            to an edge between identical regions. This is used to put self
            edges in the RAG.
        compactness: float
            From skimage slic_superpixels.py slic function:
            Balances color proximity and space proximity. Higher values give
            more weight to space proximity, making superpixel shapes more
            square/cubic. This parameter depends strongly on image contrast and
            on the shapes of objects in the image.
        nrSupPix: int, positive
            The (approximate) number of superpixels in the region adjacency
            graph.

    Returns
    -------
        labels2, labels1: np.ndarray (2D)
            Segmented volume histogram mask image. Each label has a unique
            identifier.

    """
    # scale for uint8 conversion
    image = np.round(255 / image.max() * image)
    image = image.astype('uint8')

    # scikit implementation expects rgb format (shape: NxMx3)
    image = np.tile(image, (3, 1, 1))
    image = np.transpose(image, (1, 2, 0))

    labels1 = slic(image, compactness=compactness, n_segments=nrSupPix,
                   sigma=2)
    # region adjacency graph (rag)
    g = graph.rag_mean_color(img, labels1, mode='similarity_and_proximity')
    labels2 = graph.cut_normalized(labels1, g, max_edge=max_edge,
                                   num_cuts=1000, max_rec=max_rec)
    return labels2, labels1
Example #38
0
def test_threshold_cut():

    img = np.zeros((100, 100, 3), dtype='uint8')
    img[:50, :50] = 255, 255, 255
    img[:50, 50:] = 254, 254, 254
    img[50:, :50] = 2, 2, 2
    img[50:, 50:] = 1, 1, 1

    labels = np.zeros((100, 100), dtype='uint8')
    labels[:50, :50] = 0
    labels[:50, 50:] = 1
    labels[50:, :50] = 2
    labels[50:, 50:] = 3

    rag = graph.rag_mean_color(img, labels)
    new_labels = graph.cut_threshold(labels, rag, 10, in_place=False)
    # Two labels
    assert new_labels.max() == 1

    new_labels = graph.cut_threshold(labels, rag, 10)
    # Two labels
    assert new_labels.max() == 1
Example #39
0
def main():
    img = misc.imread("wheat.png")

    # labels1 = segmentation.slic(img, compactness=100, n_segments=9)
    labels1 = segmentation.slic(img, compactness=50, n_segments=4)
    out1 = color.label2rgb(labels1, img, kind='overlay')
    print(labels1.shape)

    g = graph.rag_mean_color(img, labels1)
    labels2 = graph.cut_threshold(labels1, g, 29)
    out2 = color.label2rgb(labels2, img, kind='overlay')

    # get roi
    # logicalIndex = (labels2 != 1)
    # gray = rgb2gray(img);
    # gray[logicalIndex] = 0;


    plt.figure()
    io.imshow(out1)
    plt.figure()
    io.imshow(out2)
    io.show()
Example #40
0
def test_cut_normalized():

    img = np.zeros((100, 100, 3), dtype='uint8')
    img[:50, :50] = 255, 255, 255
    img[:50, 50:] = 254, 254, 254
    img[50:, :50] = 2, 2, 2
    img[50:, 50:] = 1, 1, 1

    labels = np.zeros((100, 100), dtype='uint8')
    labels[:50, :50] = 0
    labels[:50, 50:] = 1
    labels[50:, :50] = 2
    labels[50:, 50:] = 3

    rag = graph.rag_mean_color(img, labels, mode='similarity')

    new_labels = graph.cut_normalized(labels, rag, in_place=False)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)
    # Two labels
    assert new_labels.max() == 1

    new_labels = graph.cut_normalized(labels, rag)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)
    assert new_labels.max() == 1
img[:,:,0] = lbp

#more from: https://vcansimplify.wordpress.com/2014/07/06/scikit-image-rag-introduction/
#slic segmentation
labels1 = slic(img, n_segments=100, compactness=5)
labels1 = labels1 + 1
regions1 = regionprops(labels1)

labels1_rgb = color.label2rgb(labels1, img, kind='avg')
show_img(labels1_rgb)

label1_rgb = segmentation.mark_boundaries(labels1_rgb, labels1, (0, 0, 0))
show_img(label1_rgb)

# RAG graph for the first segmentation
rag = rag_mean_color(img, labels1)
for region in regions1:
    rag.node[region['label']]['centroid'] = region['centroid']

#labels1 = cut_threshold(segments_slic, g)

edges_drawn_all = display_edges(label1_rgb, rag, 20 )

show_img(edges_drawn_all)

labels2 = cut_normalized(labels1, rag)
labels2 = labels2 + 1
#regions2 = regionprops(labels2)

labels2_rgb = color.label2rgb(labels2, img, kind='avg')
show_img(labels2_rgb)
Example #42
0
    This method computes the mean color of `dst`.

    Parameters
    ----------
    graph : RAG
        The graph under consideration.
    src, dst : int
        The vertices in `graph` to be merged.
    """
    graph.node[dst]['total color'] += graph.node[src]['total color']
    graph.node[dst]['pixel count'] += graph.node[src]['pixel count']
    graph.node[dst]['mean color'] = (graph.node[dst]['total color'] /
                                     graph.node[dst]['pixel count'])


img = data.coffee()
labels = segmentation.slic(img, compactness=30, n_segments=400)
g = graph.rag_mean_color(img, labels)

labels2 = graph.merge_hierarchical(labels, g, thresh=35, rag_copy=False,
                                   in_place_merge=True,
                                   merge_func=merge_mean_color,
                                   weight_func=_weight_mean_color)

g2 = graph.rag_mean_color(img, labels2)

out = color.label2rgb(labels2, img, kind='avg')
out = segmentation.mark_boundaries(out, labels2, (0, 0, 0))
io.imshow(out)
io.show()
Example #43
0
for image_name in image_names:
    print("Processing image:", image_name)
    image = io.imread(image_name + ".jpg")
    ms_output = {}
    for b in bandwidths:
        for r in radius:
            eps = 1
            new_image = mean_shift(image, radius=r, bandwidth=b, eps=eps)
            ms_output[b] = new_image
            io.imsave(image_name + "_ms_local_b" + str(b) +
                                            "_r" + str(r) +
                                            "_e" + str(eps) + "_floodfill.jpg", new_image)

    print("Computing N-Cut ...")
    label_slic = segmentation.slic(image, compactness=20, n_segments=600)
    mean = graph.rag_mean_color(image, label_slic, mode='similarity')
    label_ncut = graph.cut_normalized(label_slic, mean)
    ncut_output = color.label2rgb(label_ncut, image, kind='avg')
    io.imsave(image_name + "_ncut.jpg", ncut_output)

    # plt.figure().suptitle('Original')
    # io.imshow(image)
    #
    # for b in bandwidths:
    #     plt.figure().suptitle('Result of Mean Shift - Bandwidth = ' + str(b))
    #     io.imshow(ms_output[b])
    #
    # plt.figure().suptitle('Result of N-Cut')
    # io.imshow(ncut_output)
    # io.show()
Example #44
0
    for edge in g.edges_iter():
        n1, n2 = edge
        r1, c1 = map(int, g.node[n1]['centroid'])
        r2, c2 = map(int, g.node[n2]['centroid'])

        n_green = np.array([0, 1, 0])
        n_red = np.array([1, 0, 0])

        line = draw.line(r1, c1, r2, c2)
        circle = draw.circle(r1, c1, 2)
        norm_weight = (g[n1][n2]['weight']-min_weight)/(max_weight-min_weight)

        image[line] = norm_weight*n_red + (1-norm_weight)*n_green
        image[circle] = 1, 1, 0   #the center of the node
    return image


if __name__ == '__main__':
    img = data.coffee()
    # img = data.camera()

    labels1 = segmentation.slic(img, compactness=30, n_segments=120000)
    out1 = color.label2rgb(labels1, img, kind='avg')
    show_img(out1)

    g = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, g)
    print labels2
    out2 = color.label2rgb(labels2, img, kind='avg')

    show_img(out2)
This example constructs a Region Adjacency Graph (RAG) and merges regions
which are similar in color. We construct a RAG and define edges as the
difference in mean color. We then join regions with similar mean color.
"""

from skimage import data, io, segmentation, color
from skimage.future import graph
from matplotlib import pyplot as plt


img = data.coffee()

labels1 = segmentation.slic(img, compactness=30, n_segments=400)
out1 = color.label2rgb(labels1, img, kind='avg')

g = graph.rag_mean_color(img, labels1)
labels2 = graph.cut_threshold(labels1, g, 29)
out2 = color.label2rgb(labels2, img, kind='avg')

fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True,
                       figsize=(6, 8))

ax[0].imshow(out1)
ax[1].imshow(out2)

for a in ax:
    a.axis('off')

plt.tight_layout()
Example #46
0
        line = draw.line(r1, c1, r2, c2)
        circle = draw.circle(r1, c1, 2)
        norm_weight = (g[n1][n2]['weight']-min_weight)/(max_weight-min_weight)

        image[line] = norm_weight*n_red + (1-norm_weight)*n_green
        image[circle] = 1, 1, 0   #the center of the node
    return image

if __name__=='__main__':
    # '/home/auroua/workspace/PycharmProjects/data/label_rgb.png'
    demo_image = io.imread('/home/auroua/workspace/PycharmProjects/data/label_rgb.png')
    # show_img(demo_image)

    #使用k-means对图像进行聚类   segments 100类
    labels = segmentation.slic(demo_image, compactness=30, n_segments=100)
    print labels
    labels = labels + 1
    #通过regionprops 生成图像区域的信息
    regions = regionprops(labels)

    label_rgb = color.label2rgb(labels, demo_image, kind='avg')
    #(0, 1, 1) RGB 决定了边界的颜色
    label_rgb = segmentation.mark_boundaries(label_rgb, labels, (0, 1, 1))

    #计算临接节点的相似度  similarity不仅衡量 色彩上的相似度  也衡量距离上的相似度  region adjacency graph
    rag = graph.rag_mean_color(demo_image, labels, mode='similarity')
    for region in regions:
        rag.node[region['label']]['centroid'] = region['centroid']

    label_rgb = display_edges(label_rgb, rag)
    show_img(label_rgb)