def mergeSuperpixels(
        superpixels, rgb_frame, sat_frame, depth_frame,
        rgb_thresh=55, sat_thresh=0.20, depth_thresh=25):

    if rgb_thresh >= 0:
        rgb_rag = graph.rag_mean_color(rgb_frame, superpixels)
        sp_merged_rgb = graph.cut_threshold(superpixels, rgb_rag, rgb_thresh)
        sp_joined = sp_merged_rgb

    if sat_thresh >= 0:
        sat_rag = graph.rag_mean_color(sat_frame, superpixels)
        sp_merged_sat = graph.cut_threshold(superpixels, sat_rag, sat_thresh)
        if rgb_thresh >= 0:
            sp_joined = segmentation.join_segmentations(
                sp_joined, sp_merged_sat)

    if depth_thresh >= 0:
        depth_rag = graph.rag_mean_color(depth_frame, superpixels)
        sp_merged_depth = graph.cut_threshold(
            superpixels, depth_rag, depth_thresh)
        if sat_thresh >= 0:
            sp_joined = segmentation.join_segmentations(
                sp_joined, sp_merged_depth)

    return sp_joined
def split(superpixels, annotation):
    """
    split superpixels with different label
    """
    superpixels = sg.join_segmentations(superpixels, annotation[:,:,0] // 255 * annotation[:,:,1])
    superpixels = sg.join_segmentations(superpixels, annotation[:,:,0] // 255 * annotation[:,:,2])
    superpixels = enforce_connectivity(superpixels)
    superpixels, _, _ = sg.relabel_sequential(superpixels)
    return superpixels + 1
Exemple #3
0
def main():
    opts = docopt.docopt(__doc__)
    datadir = opts['<datadir>']
    outputdir = os.path.join(datadir, 'superpixel')

    if not os.path.isdir(outputdir):
        os.makedirs(outputdir)

    for input_fn in glob.glob(os.path.join(datadir, 'input', '*.JPG')):
        input_base = os.path.join(outputdir,
            os.path.splitext(os.path.basename(input_fn))[0])
        if os.path.isfile(input_base + '.npz'):
            print('Skipping since {} exists'.format(input_base))
            continue

        print('Input: ' + input_fn)
        input_im = imageio.imread(input_fn)

        print('Converting to LAB colorspace')
        lab_im = skimcolor.rgb2lab(input_im)

        labels = np.zeros(input_im.shape[:2])

        print('Segmenting (watershed)...')
        labels = skimseg.join_segmentations(labels, ws_segment(lab_im[..., 0]))

        print('Segmenting (slic)...')
        # Set number of segments so each segment is roughly seg_size*seg_size in area
        seg_size = 128
        n_segments = 1 + int(input_im.shape[0] * input_im.shape[1] /
            (seg_size*seg_size))
        labels = skimseg.join_segmentations(labels,
            skimseg.slic(lab_im, n_segments=n_segments, sigma=1,
                compactness=0.1, multichannel=True, convert2lab=False,
                slic_zero=True)
        )

        print('Enforcing connectivity')
        # Enforce connectivity. This is important otherwise superpixels may be
        # spread over image.
        labels = skimmeas.label(labels)

        print('Saving output...')

        # Write visualisation
        imageio.imwrite(input_base + '-visualisation.jpg',
            skimseg.mark_boundaries(input_im, labels))

        # Write output
        np.savez_compressed(input_base + '.npz', labels=labels)
Exemple #4
0
def tune_parameters(img, bg_mask):
    """
    Try different hyper parameters of segmentation in order to find the best ones.
    :param img: The data to be segmented.
    :param bg_mask: The background mask
    :return: Show a figure presenting the result.
    """
    segments_images = []
    scales = [35, 36, 37]
    sigma = 0
    min_sizes = [8, 9, 10]
    titles = []
    for scale in scales:
        for min_size in min_sizes:
            current_segment = felzenszwalb(img,
                                           scale=scale,
                                           sigma=sigma,
                                           min_size=min_size)
            segments_images.append(current_segment)
            titles.append('scale={}, min_size={}, num_segments={}'.format(
                scale, min_size, len(np.unique(current_segment))))
    total_size = scales.__len__() * min_sizes.__len__()
    cols = int(np.floor(np.sqrt(total_size)))
    rows = int(np.ceil(np.sqrt(total_size)))
    fig, ax = plt.subplots(rows, cols)
    for i, seg in enumerate(segments_images):
        segments = join_segmentations(bg_mask, seg)
        r = np.mod(i, rows)
        c = int(np.floor(i / rows))
        ax[r, c].imshow(segments)
        ax[r, c].axis('off')
        ax[r, c].set_title(titles[i], {'fontsize': 9})
    plt.show()
def merge_proposals(proposals, sps, thresh):
    masks = (proposals[sps] > thresh).astype(int)

    base = np.zeros_like(masks[:, :, 0])
    for mask in np.rollaxis(masks, 2):
        base = seg.join_segmentations(base, mask)
    return base
def getOverlap(index, segmasks, fro, dilation=9):
    joint = segmentation.join_segmentations(
        morphology.dilation((segmasks == fro['label'][index]).astype(int),
                            morphology.disk(dilation)),
        (tVorMask == fro['VorLabel'][index]).astype(int))
    final = (joint == np.max(joint)) * fro.loc[index, 'label']
    return (final)
Exemple #7
0
def test_join_segmentations():
    s1 = np.array([[0, 0, 1, 1], [0, 2, 1, 1], [2, 2, 2, 1]])
    s2 = np.array([[0, 1, 1, 0], [0, 1, 1, 0], [0, 1, 1, 1]])

    # test correct join
    # NOTE: technically, equality to j_ref is not required, only that there
    # is a one-to-one mapping between j and j_ref. I don't know of an easy way
    # to check this (i.e. not as error-prone as the function being tested)
    j = join_segmentations(s1, s2)
    j_ref = np.array([[0, 1, 3, 2], [0, 5, 3, 2], [4, 5, 5, 3]])
    assert_array_equal(j, j_ref)

    # test correct exception when arrays are different shapes
    s3 = np.array([[0, 0, 1, 1], [0, 2, 2, 1]])
    with testing.raises(ValueError):
        join_segmentations(s1, s3)
Exemple #8
0
def select_targets(image1, image2):
    """Creates an image containing the overlapping regions from the passed
    images.

    Args:
        image1 (array-like): A binary image serving as reference in comparing
            image regions with another image.

        image2 (array-like): A binary image serving as samples in comparing
            image regions with the reference image.

    Returns:
        targets (array-like): An image containing the common regions in both of
            the passed images.

    """

    _image1, _image2 = _check_image(image1), _check_image(image2)

    masked = join_segmentations(_image1, _image2)
    masks = np.unique(masked)

    targets = np.zeros_like(_image1)
    targets[masked == masks[-1]] = 255

    return targets
Exemple #9
0
def _join_seg(segmentation_rw, segmentation_ws):
    rw_ws_join = join_segmentations(segmentation_rw, segmentation_ws)
    rw_ws_join[~segmentation_ws] = 0

    labeled_segmentation = label(rw_ws_join)
    labeled_segmentation = remove_small_objects(labeled_segmentation, 800)

    return labeled_segmentation
Exemple #10
0
def test_join_segmentations():
    s1 = np.array([[0, 0, 1, 1],
                   [0, 2, 1, 1],
                   [2, 2, 2, 1]])
    s2 = np.array([[0, 1, 1, 0],
                   [0, 1, 1, 0],
                   [0, 1, 1, 1]])

    # test correct join
    # NOTE: technically, equality to j_ref is not required, only that there
    # is a one-to-one mapping between j and j_ref. I don't know of an easy way
    # to check this (i.e. not as error-prone as the function being tested)
    j = join_segmentations(s1, s2)
    j_ref = np.array([[0, 1, 3, 2],
                      [0, 5, 3, 2],
                      [4, 5, 5, 3]])
    assert_array_equal(j, j_ref)

    # test correct exception when arrays are different shapes
    s3 = np.array([[0, 0, 1, 1], [0, 2, 2, 1]])
    with pytest.raises(ValueError):
        join_segmentations(s1, s3)
Exemple #11
0
def UnderSegmentation(seg_path, GT_path):
    '''
    Implementation of undersegmentation measurement based on two image segmentations (one target, one ground truth)

    Parameters:
    edge_path - path/to/edge_file
    GT_path - path/to/GroundTruth_file

    Returns:
    Undersegmentation value
    '''
    img = io.imread(seg_path).astype(int)
    GT = io.imread(GT_path).astype(int)

    N = GT.shape[0] * GT.shape[1]

    # find the intersection between two segmentations (this outputs all combinations of intersects)
    intersection = segmentation.join_segmentations(img, GT)

    # find all the unique intersecting segmentations
    intersection_u = np.unique(intersection)

    # create a map of counts of elements in the intersection for each intersecting segment type
    intersection_map = {}
    for intersect in intersection_u:
        intersect_coors = np.where(intersection == intersect)
        intersection_map[intersect] = len(intersect_coors[0])

    img_u = np.unique(img)

    score = 0

    # iterate through each original segmentations
    for segment in img_u:
        im_coors = np.where(img == segment)
        # calculate |P| value
        P = len(im_coors[0])
        check = {}

        # check the overlap between original segmentation and intesect
        # calculate undersegmentation
        for i in xrange(len(im_coors[0])):
            found = intersection[im_coors[0][i], im_coors[1][i]]
            try:
                check[found]
            except KeyError:
                check[found] = True
                subscore = P - intersection_map[found]
                score += min(intersection_map[found], subscore)
    return float(score) / N
def UnderSegmentation(seg_path, GT_path):
    '''
    Implementation of undersegmentation measurement based on two image segmentations (one target, one ground truth)

    Parameters:
    edge_path - path/to/edge_file
    GT_path - path/to/GroundTruth_file

    Returns:
    Undersegmentation value
    '''
    img = io.imread(seg_path).astype(int)
    GT = io.imread(GT_path).astype(int)

    N = GT.shape[0] * GT.shape[1]

    # find the intersection between two segmentations (this outputs all combinations of intersects)
    intersection = segmentation.join_segmentations(img, GT)

    # find all the unique intersecting segmentations
    intersection_u = np.unique(intersection)

    # create a map of counts of elements in the intersection for each intersecting segment type
    intersection_map = {}
    for intersect in intersection_u:
        intersect_coors = np.where(intersection == intersect)
        intersection_map[intersect] = len(intersect_coors[0])

    img_u = np.unique(img)

    score = 0

    # iterate through each original segmentations
    for segment in img_u:
        im_coors = np.where(img == segment)
        # calculate |P| value
        P = len(im_coors[0])
        check = {}

        # check the overlap between original segmentation and intesect
        # calculate undersegmentation
        for i in xrange(len(im_coors[0])):
            found = intersection[im_coors[0][i], im_coors[1][i]]
            try:
                check[found]
            except KeyError:
                check[found] = True
                subscore = P - intersection_map[found]
                score += min(intersection_map[found], subscore)
    return float(score) / N
Exemple #13
0
def main():
    if len(sys.argv) != 2:
        print "ERROR! Correct usage is:"
        print "\tpython test_random_walker.py [gps_point_collection.dat]"
        return

    GRID_SIZE = 500
    results = np.zeros((GRID_SIZE, GRID_SIZE), np.float)

    # Load GPS points
    with open(sys.argv[1], "rb") as fin:
        point_collection = cPickle.load(fin)

    for pt in point_collection:
        y_ind = math.floor((pt[0] - const.RANGE_SW[0]) /
                           (const.RANGE_NE[0] - const.RANGE_SW[0]) * GRID_SIZE)
        x_ind = math.floor((pt[1] - const.RANGE_NE[1]) /
                           (const.RANGE_SW[1] - const.RANGE_NE[1]) * GRID_SIZE)
        results[x_ind, y_ind] += 1.0
        if results[x_ind, y_ind] >= 64:
            results[x_ind, y_ind] = 63
    results /= np.amax(results)

    thresholded_results = np.zeros((GRID_SIZE, GRID_SIZE), np.bool)

    THRESHOLD = 0.02
    for i in range(0, GRID_SIZE):
        for j in range(0, GRID_SIZE):
            if results[i, j] >= THRESHOLD:
                thresholded_results[i, j] = 1
            else:
                thresholded_results[i, j] = 0

    #segments_fz = felzenszwalb(results, scale=100, sigma=0.5, min_size=50)
    segments_slic = join_segmentations(results,
                                       ratio=10,
                                       n_segments=250,
                                       sigma=1)
    #segments_quick = quickshift(results, kernel_size=3, max_dist=6, ratio=0.5)

    fig = plt.figure(figsize=(30, 16))
    ax = fig.add_subplot(121, aspect='equal')
    ax.imshow(results, cmap=plt.cm.gray)

    ax = fig.add_subplot(122)
    ax.imshow(mark_boundaries(results, segments_slic))

    plt.show()
def main():
    if len(sys.argv) != 2:
        print "ERROR! Correct usage is:"
        print "\tpython test_random_walker.py [gps_point_collection.dat]"
        return
    
    GRID_SIZE = 500
    results = np.zeros((GRID_SIZE, GRID_SIZE), np.float)
    
    # Load GPS points
    with open(sys.argv[1], "rb") as fin:
        point_collection = cPickle.load(fin)
    
    for pt in point_collection:
        y_ind = math.floor((pt[0] - const.RANGE_SW[0]) / (const.RANGE_NE[0] -const.RANGE_SW[0]) * GRID_SIZE)
        x_ind = math.floor((pt[1] - const.RANGE_NE[1]) / (const.RANGE_SW[1] -const.RANGE_NE[1]) * GRID_SIZE)
        results[x_ind, y_ind] += 1.0
        if results[x_ind, y_ind] >= 64:
            results[x_ind, y_ind] = 63
    results /= np.amax(results)
    
    thresholded_results = np.zeros((GRID_SIZE, GRID_SIZE), np.bool)
    
    THRESHOLD = 0.02
    for i in range(0, GRID_SIZE):
        for j in range(0, GRID_SIZE):
            if results[i,j] >= THRESHOLD:
                thresholded_results[i,j] = 1
            else:
                thresholded_results[i,j] = 0
                
    #segments_fz = felzenszwalb(results, scale=100, sigma=0.5, min_size=50)
    segments_slic = join_segmentations(results, ratio=10, n_segments=250, sigma=1)
    #segments_quick = quickshift(results, kernel_size=3, max_dist=6, ratio=0.5)
                
    fig = plt.figure(figsize=(30,16))
    ax = fig.add_subplot(121, aspect='equal')
    ax.imshow(results, cmap=plt.cm.gray)
    
    ax = fig.add_subplot(122)
    ax.imshow(mark_boundaries(results, segments_slic))

    plt.show()
Exemple #15
0
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30.0 / 255] = background
markers[coins > 150.0 / 255] = foreground

ws = watershed(edges, markers)
seg1 = ndi.label(ws == foreground)[0]

# make segmentation using SLIC superpixels
seg2 = slic(coins, n_segments=117, max_iter=160, sigma=1, compactness=0.75,
            multichannel=False)

# combine the two
segj = join_segmentations(seg1, seg2)

# show the segmentations
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(9, 5), sharex=True, sharey=True,
                         subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
ax[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('Image')

color1 = label2rgb(seg1, image=coins, bg_label=0)
ax[1].imshow(color1, interpolation='nearest')
ax[1].set_title('Sobel+Watershed')

color2 = label2rgb(seg2, image=coins, image_alpha=0.5)
ax[2].imshow(color2, interpolation='nearest')
ax[2].set_title('SLIC superpixels')
def shape_symmetry_ratios(segmentation, angle_step=9):
    """Calculate shape symmetry ratios over a range of angles from 0 to 180 degrees.

    # Arguments :
        segmentation:  The segmented image whose shape symmetry is tested.
        angle_step: Int. The step used to go from 0 to 180 degrees. Each angle permits to score symmetry in the
                       corresponding orientation.

    # Outputs :
        ratios: The list of symmetry ratios (scores) obtained over all angles tested.

    # Note on metric used to calculate scores :
        The Jaccard Index is used to perform symmetry calculus.
    """

    properties = regionprops(segmentation)
    centroid = properties[0].centroid

    angles = [-k for k in range(0, 181, angle_step)]
    ratios = [0] * len(angles)

    for angle in angles:

        rotIm = rotate(segmentation, angle, resize=True, center=centroid)
        thresh = threshold_otsu(rotIm)
        rotIm = 1 * (rotIm > thresh)

        properties = regionprops(rotIm)
        centroid = properties[0].centroid

        im2flip = rotIm[0:int(centroid[0]), 0:np.shape(rotIm)[1]]
        flipIm = np.flip(im2flip, 0)

        lenIm2compare = np.shape(rotIm)[0] - int(centroid[0])

        if (lenIm2compare > np.shape(flipIm)[0]):
            black = np.zeros(
                [np.shape(rotIm)[0] - int(centroid[0]),
                 np.shape(rotIm)[1]])
            black[0:np.shape(flipIm)[0], 0:np.shape(rotIm)[1]] = flipIm
            flipIm = black
            im2compare = rotIm[int(centroid[0]):np.shape(rotIm)[0],
                               0:np.shape(rotIm)[1]]

        else:
            black = np.zeros([int(centroid[0]), np.shape(rotIm)[1]])
            black[0:lenIm2compare, 0:np.shape(rotIm)[1]] = rotIm[
                int(centroid[0]):np.shape(rotIm)[0], 0:np.shape(rotIm)[1]]
            im2compare = black

        histoComp = histogram(im2compare)
        histoFlip = histogram(flipIm)

        if histoComp[0][-1] > histoFlip[0][-1]:
            wPix = histoComp[0][-1]
        else:
            wPix = histoFlip[0][-1]

        join = join_segmentations(flipIm, im2compare)
        histoJoin = histogram(join)
        truePix = histoJoin[0][-1]

        ratio = truePix / wPix
        ratios[int(angle / angle_step)] = 100 * ratio

    return ratios
Exemple #17
0
def getVoronoiStyle(seg_file,max_voro_area,voro_imfile,voro_imfile_2,voro_outfile,voro_transfile):
    temp = np.asarray(np.load(seg_file,allow_pickle=True)).item()
    masks = temp['masks']

    im = np.zeros_like(np.array(masks))

    fro = pd.DataFrame(measure.regionprops_table(masks, properties=['label','centroid']))


    points_mask = np.array(fro[['centroid-0','centroid-1']].to_numpy())

    vor = Voronoi(points_mask)

    my_dpi=im.shape[1]

    plt.rcParams['figure.dpi'] = my_dpi
    plt.rcParams['figure.figsize'] = ( im.shape[0]/my_dpi,im.shape[1]/my_dpi)
    fig = plt.figure();

    for simplex in vor.ridge_vertices:
        simplex = np.asarray(simplex)
        if np.all(simplex >= 0):
            plt.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1], 'k-',c='black',linewidth=.2)

    center = points_mask.mean(axis=0)
    for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
        simplex = np.asarray(simplex)
        if np.any(simplex < 0):
            i = simplex[simplex >= 0][0] # finite end Voronoi vertex
            t = points_mask[pointidx[0]] - points_mask[pointidx[1]]  # tangent
            t = t / np.linalg.norm(t)
            n = np.array([-t[1], t[0]]) # normal
            midpoint = points_mask[pointidx].mean(axis=0)
            far_point = vor.vertices[i] + np.sign(np.dot(midpoint - center, n)) * n * 100
            plt.plot([vor.vertices[i,0], far_point[0]],
                     [vor.vertices[i,1], far_point[1]], 'k-',c='black',linewidth=.2)

    plt.xlim([0, im.shape[0]]); plt.ylim([0,im.shape[1]])
    plt.axis('off')
    fig.tight_layout(pad=0)
    plt.savefig(voro_imfile, dpi=my_dpi, #bbox_inches='tight',#dpi=my_dpi,
                transparent=False, pad_inches=0,facecolor='white')
    plt.close()
    im2 = io.imread(voro_imfile)
    voro = (im2[:,:,0])
    voro = voro[1:-1, 1:-1]
    voro = np.pad(voro, pad_width=1, mode='constant')
    distance = ndi.distance_transform_edt(voro)
    coords = peak_local_max(distance, footprint=np.ones((1, 1)), labels=voro)
    mask = np.zeros(distance.shape, dtype=bool)
    mask[tuple(coords.T)] = True
    markers, _ = ndi.label(mask)
    labels = segmentation.watershed(-distance, markers, mask=voro)
    labels = morphology.remove_small_objects(labels, min_size=40, connectivity=1, in_place=False)
    labels = morphology.dilation(labels, morphology.square(3))
    segmasks = masks
    segmasks = morphology.dilation(segmasks,morphology.square(3))

    sizeOfSegs = pd.DataFrame(measure.regionprops_table(labels, properties=['label','area']))
    bigMasks = np.array(sizeOfSegs[sizeOfSegs['area']>=max_voro_area]['label'])
    newVorMask = np.copy(labels)[::-1,:]
    for bMI in range(len(bigMasks)):
        print("progress:"+str(bMI)+'/'+str(len(bigMasks)))
        chckMtx = (labels == bigMasks[bMI])[::-1,:]

        for i in range(len(points_mask)):
            confirm = points_mask[i]
            print(points_mask[i])
            print("---")

        tmp_cellpose_mask = (morphology.dilation((segmasks == int(fro[(fro['centroid-0']==confirm[0])&(fro['centroid-1']==confirm[1])]['label'])).T,morphology.disk(11))).astype(int)
        tmp_voronoi_mask = 2*chckMtx.astype(int)
        tmp_join = segmentation.join_segmentations(tmp_cellpose_mask,tmp_voronoi_mask)
        tmp_join = (tmp_join == np.max(tmp_join))

        newVorMask[newVorMask == bigMasks[bMI]] = 0
        newVorMask[tmp_join] = bigMasks[bMI]

    np.save(voro_outfile, newVorMask.T, allow_pickle=True, fix_imports=True)
    io.imsave(voro_imfile_2, segmentation.find_boundaries(newVorMask).T)

    oldAssign = pd.DataFrame(measure.regionprops_table(masks, properties=['label','centroid']))
    newAssign = pd.DataFrame(measure.regionprops_table(newVorMask, properties=['label','centroid']))

    Clps2Voro = pd.DataFrame()

    for nlab in range(newAssign.shape[0]):
        tmpMtx = (newVorMask == newAssign['label'][nlab])
        for olab in range(oldAssign.shape[0]):
            if (tmpMtx[int(np.round(oldAssign['centroid-1'][olab])),int(np.round(oldAssign['centroid-0'][olab]))]):
                Clps2Voro = Clps2Voro.append(pd.DataFrame([newAssign['label'][nlab], oldAssign['label'][olab]]).T)

    Clps2Voro = Clps2Voro.rename(columns={0: "voro_label", 1: "clps_label"})
    Clps2Voro = Clps2Voro.reset_index(drop=True)
    Clps2Voro.to_csv(voro_transfile)
Exemple #18
0
def view_all_join(gt, automated_seg, num_elem=6, axis=None):
    """Generate an interactive figure highlighting the VI error.

    Parameters
    ----------
    gt: nd-array with shape M*N.
        This corresponds to the 'ground truth'.
    auto: nd-array with same shape as gt. This
        corresponds to the automated segmentation.
    num_elem: Int, optional.
        This parameter determines the number of comps
        shown upon click. Set to output '4' by default.

    Returns
    -------
    A window with six panels - the top middle image corresponds to the
    components that are the worst false merges in the automated
    segmentation, which share significant area with the clicked-upon segment.
    Likewise, the top middle image shows the worst false splits.
    """
    if gt.shape != automated_seg.shape:
        return "Input arrays are not of the same shape."
    elif (type(gt) or type(automated_seg)) != np.ndarray:
        return "Input arrays not of valid type."
    vint = np.vectorize(int)
    # Compute the join seg of the automatic seg and the ground truth.
    joint_seg = join_segmentations(automated_seg, gt)
    # Contingency table for merges
    cont_table_m = ev.contingency_table(automated_seg, joint_seg)
    # Contingency table for splits
    cont_table_s = ev.contingency_table(joint_seg, gt)
    # Sort the VI according to the largest false merge components.
    merge_idxs_m, merge_errs_m = ev.sorted_vi_components(
        joint_seg, automated_seg)[0:2]  #merges
    #Sort the VI according to the largest false split components.
    split_idxs_s, split_errs_s = ev.sorted_vi_components(joint_seg,
                                                         gt)[0:2]  #split
    #Find the indices of these largest false merge components, and largest false splits, in descending order.
    merge_idxs_sorted, split_idxs_sorted = np.argsort(
        merge_idxs_m), np.argsort(split_idxs_s)
    #Sort the errors according to the indices.
    merge_unsorted, split_unsorted = merge_errs_m[
        merge_idxs_sorted], split_errs_s[split_idxs_sorted]
    # Color both the seg and gt according to the intensity of the split VI error.
    merge_err_img, split_err_img = merge_unsorted[
        automated_seg], split_unsorted[gt]

    if axis is None:
        fig, ax = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True)
        plt.setp(ax.flat, adjustable='box-forced')
    else:
        fig, ax = plt.subplots(nrows=len(axis) // 2,
                               ncols=len(axis) // 2,
                               sharex=True,
                               sharey=True)
        for i in range(len(axis) // 2):
            ax[0, i] = ax[i]
        for i in range(0, (len(axis) // 2)):
            ax[1, i] = ax[i + 2]

    ax[0, 0].imshow(RAW)
    viz.imshow_magma(merge_err_img, alpha=0.4, axis=ax[0, 0])
    ax[0, 1].imshow(RAW)
    axes_image_1 = viz.imshow_rand(joint_seg, alpha=0.4, axis=ax[0, 1])
    ax[0, 2].imshow(RAW)
    viz.imshow_rand(gt, alpha=0.4, axis=ax[0, 2])
    ax[1, 0].imshow(RAW)
    viz.imshow_magma(split_err_img, alpha=0.4, axis=ax[1, 0])
    ax[1, 1].imshow(RAW)
    axes_image = viz.imshow_rand(joint_seg, alpha=0.4, axis=ax[1, 1])
    ax[1, 2].imshow(RAW)
    viz.imshow_rand(automated_seg, alpha=0.4, axis=ax[1, 2])
    ax[0, 0].set_title(
        "Worst merge comps colored by VI error: click to show them on second panel."
    )
    ax[0, 1].set_title("Worst merge comps.")
    ax[0, 2].set_title("Ground Truth.")
    ax[1, 0].set_title(
        "Worst split comps colored by VI error: click to show them on second panel."
    )
    ax[1, 1].set_title("Worst split comps.")
    ax[1, 2].set_title("Automated Seg.")

    @jit
    def drawer(seg, comps, limit=True):
        """Dynamically redraw the worst split/merge comps."""
        a_seg = np.zeros_like(seg.astype('float64'))
        factor = (seg.max() // num_elem)
        lim = 0.0
        for i, (j, k, z) in enumerate(comps):
            lim += k
            if z < 0.01:
                continue
            a_seg += (seg == j) * ((i + 1) * factor)
            if limit:
                if lim >= 0.98:
                    break
        return a_seg

    @jit
    def _onpress(event):
        """Matplotlib 'onpress' event handler."""

        if not (event.inaxes == ax[1, 0] or event.inaxes == ax[0, 0]
                or event.inaxes == ax[0, 2] or event.inaxes == ax[1, 2]):
            fig.text(0.5,
                     0.5,
                     s="Must click on left or right axes to show comps!",
                     ha="center")
            fig.canvas.draw_idle()
        if event.inaxes == ax[0, 0] or event.inaxes == ax[0, 2]:
            if event.button != 1:
                return
            for txt in fig.texts:
                txt.set_visible(False)
            fig.canvas.draw()
            x, y = vint(event.xdata), vint(event.ydata)
            # Find the indices of the false merge bodies overlapping with the coordinates of the mouse click.
            worst_merge_comps_m = ev.split_components(automated_seg[y, x],
                                                      num_elems=None,
                                                      cont=cont_table_m,
                                                      axis=0)
            new_seg_m = drawer(joint_seg, worst_merge_comps_m, limit=False)
            axes_image_1.set_array(new_seg_m)

            fig.canvas.draw()

        if event.inaxes == ax[1, 0] or event.inaxes == ax[1, 2]:
            if event.button != 1:
                return
            for txt in fig.texts:
                txt.set_visible(False)
            fig.canvas.draw()
            x, y = vint(event.xdata), vint(event.ydata)
            # Find the indices of the false split bodies overlapping with the coordinates of the mouse click.
            worst_split_comps_s = ev.split_components(gt[y, x],
                                                      num_elems=None,
                                                      cont=cont_table_s,
                                                      axis=1)
            new_seg_s = drawer(joint_seg, worst_split_comps_s)
            axes_image.set_array(new_seg_s)

            fig.canvas.draw()

    fig.canvas.mpl_connect('button_press_event', _onpress)
    plt.ioff()
    plt.show()
def withinLesionPatchesExtractor(image, segImage, patchSize):
    """Extract patches only taken within the lesion.

    # Arguments :
        image:     The dermoscopic image where the patches are taken.
        segImage:  The corresponding segmented image.
        patchSize: Int. The size of the patches taken. For example, if `patchSize` = 32, the function takes
                   32*32 patches.

    # Outputs :
        k:       The number of patches created.
        points:  The list of points used to create the patches. Each point corresponds to the patch's upper left
                 corner
        patches: The list of patches created.
    """

    histoSeg = histogram(segImage)
    numPix = histoSeg[0][-1]

    numPatchLine = np.shape(image)[1] // patchSize
    numPatchCol = np.shape(image)[0] // patchSize

    points = []
    blk = np.zeros(np.shape(segImage))
    k = 0

    patches = []

    for countLine in range(0, numPatchLine):
        for countCol in range(0, numPatchCol):

            start = (countCol * patchSize, countLine * patchSize)
            extent = (patchSize, patchSize)
            rr, cc = rectangle(start, extent=extent)
            blk[rr, cc] = 1

            join = join_segmentations(blk, segImage)
            histoJoin = histogram(join)

            # Extract patches beginning at 0;0.
            if histoJoin[0][
                    -1] == patchSize * patchSize and histoJoin[0][1] != numPix:
                points.append([countCol * patchSize, countLine * patchSize])
                patch = img_as_ubyte(
                    image[countCol * patchSize:countCol * patchSize +
                          patchSize, countLine *
                          patchSize:countLine * patchSize + patchSize])
                patches.append(patch)
                k += 1
            blk[rr, cc] = 0

            # Extract patches beginning at 0 + patchSize/2;0 + patchSize/2.
            if countCol * patchSize + int(3 * patchSize / 2) < np.shape(
                    image)[0] and countLine * patchSize + int(
                        3 * patchSize / 2) < np.shape(image)[1]:

                start = (countCol * patchSize + int(patchSize / 2),
                         countLine * patchSize + int(patchSize / 2))
                extent = (patchSize, patchSize)
                rr, cc = rectangle(start, extent=extent)
                blk[rr, cc] = 1

                join = join_segmentations(blk, segImage)
                histoJoin = histogram(join)

                if histoJoin[0][-1] == patchSize * patchSize and histoJoin[0][
                        1] != numPix:
                    points.append([
                        countCol * patchSize + int(patchSize / 2),
                        countLine * patchSize + int(patchSize / 2)
                    ])
                    patch = img_as_ubyte(
                        image[countCol * patchSize +
                              int(patchSize / 2):countCol * patchSize +
                              patchSize + int(patchSize / 2),
                              countLine * patchSize +
                              int(patchSize / 2):countLine * patchSize +
                              patchSize + int(patchSize / 2)])
                    patches.append(patch)
                    k += 1
            blk[rr, cc] = 0

    return (k, points, patches)
Exemple #20
0
		{  2.0,  3.0,  1.0,  0.0,  0.0,  1.0,  3.0,  2.0 }
	};

	image_object = filters.normalize(alpha = 0.01, beta = 0.1, scale = 'absolute')
	image_object_gray = color.rgb2gray(image_object)
	absolute_best_segmented_image = NULL
	no_of_segments_in_abs_best = 1
	iterations_till_now_for_t = 0

	for(t in range(25, max(image_object_gray))):
		try:
			new_segmented_image = seg.inverse_gaussian_gradient(image_object_gray)
			selected_threshold = t
			iterations_till_now_for_s = 0
			for(s in range(1, 255)):
				single_88_positive_image = new_segmented_image.filter(single_88_positive)
				single_88_negative_image = new_segmented_image.filter(single_88_negative)
				new_segmented_image = min(single_88_positive_image, single_88_negative_image)
				new_segmented_image = seg.join_segmentations(image_object_gray, new_segmented_image)
				if(quality(new_segmented_image) > quality(absolute_best_segmented_image)):
					absolute_best_segmented_image = new_segmented_image
					no_of_segments_in_abs_best = s
					iterations_till_now_for_s += 1
			iterations_till_now_for_t += 1
		except:
			print("Error: Finding optimized image is not possible for given image.")
		finally:
			print("No runtime error till now, internally.")

	return absolute_best_segmented_image, no_of_segments_in_abs_best
    imsave(
        path.join(out_exp_dir, "labs_{}.tif".format(chan_names[syn_type][2])),
        ch2_labs.astype("uint16"))

    ch1_props = regionprops(ch1_labs)
    ch2_props = regionprops(ch2_labs)

    log(
        "    Found {} {} clusters".format(len(ch1_props),
                                          chan_names[syn_type][1]), logf)

    log(
        "    Found {} {} clusters".format(len(ch2_props),
                                          chan_names[syn_type][2]), logf)

    merged_labs = join_segmentations(ch1_labs, ch2_labs)

    merged_labs = merged_labs * (ch2_labs > 0) * (ch1_labs > 0)

    merged_props = regionprops(merged_labs)

    log("  Found {} synapses".format(len(merged_props)), logf)

    if generate_RGB:
        imsave(
            path.join(out_exp_dir, "merged_labs_col.tiff"),
            label2rgb(merged_labs,
                      bg_label=0,
                      colors=[[randint(255),
                               randint(255),
                               randint(255)] for i in range(len(merged_props))
Exemple #22
0
def merge_segmentations(s1, s2):
    merged_s = segmentation.join_segmentations(s1, s2)
    
    return merged_s
Exemple #23
0
def randomPatchForDataset(image, segImage, patchSize, num, index):
    """Create a dataset of "a" randomly taken patches in a dermoscopic image and save them in the folder
       "patchesDataSet". They will be used to create pairs of "a" and "b" patches.
       Note that patches over borders are ignored.

            # Arguments :
                image:     The image in which the patches are randomly taken.
                segImage:  The corresponding segmented image.
                patchSize: Int. The size of the patches taken. For example, if `patchSize` = 32, the function takes
                           32*32 patches.
                num:       Int. The number of patches wanted.
                index:     Int. This parameter permits to save patches with their right name.

            # Outputs :
                points:  The list of points randomly taken to create the patches. Each point coresponds to the patch's
                         upper left corner.
                inOrOut: A list of integer coefficients corresponding to each point randomly taken.
                                    1: the corresponding patch is within the lesion
                                    0: the corresponding patch is out of the lesion

            # Note on folders organisation :
                A folder named "patchesDataSet" has to already exist.
            """

    histoSeg = histogram(segImage)
    numPix = histoSeg[0][-1]
    points = []
    inOrOut = []

    blk = np.zeros(np.shape(segImage))

    k = index
    while k != num + index:
        ligne = randint(0, np.shape(image)[0] - patchSize - 1)
        col = randint(0, np.shape(image)[1] - patchSize - 1)

        start = (ligne, col)
        extent = (patchSize, patchSize)
        rr, cc = rectangle(start, extent=extent)
        blk[rr, cc] = 1

        join = join_segmentations(blk, segImage)
        histoJoin = histogram(join)

        conditionNumPix = histoJoin[0][-1] == patchSize * patchSize

        if conditionNumPix:

            if histoJoin[0][1] == numPix:
                inOrOut.append(0)

            else:
                inOrOut.append(1)

            points.append([ligne, col])
            rdpatch = img_as_ubyte(image[ligne:ligne + patchSize,
                                         col:col + patchSize])
            imsave(
                f"{package_path()}/data/patchesDataSet/patch" + str(k) +
                "a.bmp", rdpatch)
            k += 1

        blk[rr, cc] = 0

    return (points, inOrOut)
foreground, background = 1, 2
markers[coins < 30] = background
markers[coins > 150] = foreground

ws = watershed(edges, markers)
seg1 = nd.label(ws == foreground)[0]

# make segmentation using SLIC superpixels

# make the RGB equivalent of `coins`
coins_colour = np.tile(coins[..., np.newaxis], (1, 1, 3))
seg2 = slic(coins_colour, n_segments=30, max_iter=160, sigma=1, ratio=9,
            convert2lab=False)

# combine the two
segj = join_segmentations(seg1, seg2)

### Display the result ###

# make a random colormap for a set number of values
def random_cmap(im):
    np.random.seed(9)
    cmap_array = np.concatenate(
        (np.zeros((1, 3)), np.random.rand(np.ceil(im.max()), 3)))
    return mpl.colors.ListedColormap(cmap_array)

# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
axes[1].imshow(seg1, cmap=random_cmap(seg1), interpolation='nearest')
Exemple #25
0
model_graph, mean_vertex, std_vertex, mean_edge, std_edge = normalize_graph(model_graph)
print("Model:",represent_srg(model_graph, class_names=class_names))

# Step 3: Generating observation
# -----------------------
# Applying gradient
smoothed = ndi.gaussian_filter(observation_volume.data, (5,5,1))
smoothed = smoothed/np.max(smoothed) # normalization for magnitude
#display_volume(smoothed, cmap="gray")
magnitude = np.sqrt(ndi.filters.sobel(smoothed, axis=0)**2 + ndi.filters.sobel(smoothed, axis=1)**2 + ndi.filters.sobel(smoothed, axis=2)**2)
#display_volume(magnitude, cmap="gray", title="Magnitude")
observed_labelmap_data = watershed(magnitude, markers=500, compactness=0.001)-1

# overlaying with the TRUE borders of the liver
from skimage.segmentation import join_segmentations
observed_labelmap_data = join_segmentations(observed_labelmap_data, model_labelmap.data == 10)

display_segments_as_lines(observation_volume.data, observed_labelmap_data)
#display_volume(observed_labelmap_data,cmap=ListedColormap(np.random.rand(255,3)))
#display_overlayed_volume(observation_volume.data, observed_labelmap_data, label_colors=np.random.rand(255,3),width=1,level=0.5)

# Step 4: Generating super-observation graph
# -----------------------
super_graph = build_graph(observation_volume.data, observed_labelmap_data, add_edges=False)
super_graph = normalize_graph(super_graph,mean_vertex, std_vertex, mean_edge, std_edge)
super_adjacency = rag.RAG(observed_labelmap_data)
# print("Superobservation:",represent_srg(super_graph))

# Step 5: Generating initial solution
# -----------------------
solution = np.empty(super_graph.vertices.shape[0])
Exemple #26
0
def datasetCreator(patchesPerImage, patchSize, overlap):
    """Create a dataset of patches. This dataset is composed of similar and non similar pairs of "a" and "b" patches.

            # Arguments :
                patchesPerImage: The amount of patches wanted for each image.
                patchSize:       Int. The size of the patches taken. For example, if `patchSize` = 32, the function takes
                                 32*32 patches.
                overlap:         Int. Similar pairs of patched are created by shifting "a" patch from `overlap` pixels
                                 to have the "b" patch.

            # Outputs :
                Only save the patches into a folder named "patchesDataSet". Then order the patches created into two new
                folders (within the "patchesDataSet" folder) : "Similar" and "nonSimilar".
            """
    os.makedirs(f'{package_path()}/data/patchesDataSet/',
                exist_ok=True)  # Make sure dirs exist.
    df = pd.read_excel(f"{package_path()}/symtab.xlsx")
    ims = df["Image Name"]
    ims = list(ims)

    # ---------------Creation of the "a" patches-----------------
    images = ims
    index = 0
    allPoints = []
    allInorOut = []

    for img in images:
        segIm = load_segmentation(img)

        contour = find_contours(segIm, 0)
        cnt = contour[0]
        minx = min(cnt[:, 1])
        maxx = max(cnt[:, 1])
        maxy = min(cnt[:, 0])
        miny = max(cnt[:, 0])
        segIm = segIm[int(maxy):int(miny), int(minx):int(maxx)]

        im = load_dermoscopic(img)

        imCrop = im[int(maxy):int(miny), int(minx):int(maxx)]

        points, inOrOut = randomPatchForDataset(imCrop, segIm, patchSize,
                                                patchesPerImage, index)
        allPoints += [points]
        allInorOut += [inOrOut]

        index += patchesPerImage

    #---------------Creation of the "b" patches (to have pairs of patches "a" and "b")-----------------
    for countIndex in range(len(images)):

        segIm = load_segmentation(images[countIndex])

        contour = find_contours(segIm, 0)
        cnt = contour[0]
        minx = min(cnt[:, 1])
        maxx = max(cnt[:, 1])
        maxy = min(cnt[:, 0])
        miny = max(cnt[:, 0])
        segIm = segIm[int(maxy):int(miny), int(minx):int(maxx)]

        im = load_dermoscopic(images[countIndex])

        imCrop = im[int(maxy):int(miny), int(minx):int(maxx)]

        pts = allPoints[countIndex]
        ioo = allInorOut[countIndex]

        histoSeg = histogram(segIm)
        numPix = histoSeg[0][-1]

        blk00 = np.zeros(np.shape(segIm))
        blk01 = np.zeros(np.shape(segIm))
        blk10 = np.zeros(np.shape(segIm))
        blk11 = np.zeros(np.shape(segIm))

        k = 0
        for c in range(int(len(pts) / 2)):

            start00 = (pts[c][0] + overlap, pts[c][1])
            start01 = (pts[c][0] - overlap, pts[c][1])
            start10 = (pts[c][0], pts[c][1] + overlap)
            start11 = (pts[c][0], pts[c][1] - overlap)
            extent = (patchSize, patchSize)

            if pts[c][0] + overlap + patchSize < np.shape(imCrop)[0]:
                rr, cc = rectangle(start00, extent=extent)
                blk00[rr, cc] = 1
            if pts[c][0] - overlap > 0:
                rr, cc = rectangle(start01, extent=extent)
                blk01[rr, cc] = 1
            if pts[c][1] + overlap + patchSize < np.shape(imCrop)[1]:
                rr, cc = rectangle(start10, extent=extent)
                blk10[rr, cc] = 1
            if pts[c][1] - overlap > 0:
                rr, cc = rectangle(start11, extent=extent)
                blk11[rr, cc] = 1

            join00 = join_segmentations(blk00, segIm)
            histoJoin00 = histogram(join00)
            join01 = join_segmentations(blk01, segIm)
            histoJoin01 = histogram(join01)
            join10 = join_segmentations(blk10, segIm)
            histoJoin10 = histogram(join10)
            join11 = join_segmentations(blk11, segIm)
            histoJoin11 = histogram(join11)

            if histoJoin00[0][-1] == patchSize * patchSize:
                patch = img_as_ubyte(
                    imCrop[pts[c][0] + overlap:pts[c][0] + overlap + patchSize,
                           pts[c][1]:pts[c][1] + patchSize])
                imsave(
                    f"{package_path()}/data/patchesDataSet/patch" +
                    str(k + countIndex * patchesPerImage) + "b.bmp", patch)
            elif histoJoin01[0][-1] == patchSize * patchSize:
                patch = img_as_ubyte(
                    imCrop[pts[c][0] - overlap:pts[c][0] - overlap + patchSize,
                           pts[c][1]:pts[c][1] + patchSize])
                imsave(
                    f"{package_path()}/data/patchesDataSet/patch" +
                    str(k + countIndex * patchesPerImage) + "b.bmp", patch)
            elif histoJoin10[0][-1] == patchSize * patchSize:
                patch = img_as_ubyte(imCrop[pts[c][0]:pts[c][0] + patchSize,
                                            pts[c][1] + overlap:pts[c][1] +
                                            overlap + patchSize])
                imsave(
                    f"{package_path()}/data/patchesDataSet/patch" +
                    str(k + countIndex * patchesPerImage) + "b.bmp", patch)
            elif histoJoin11[0][-1] == patchSize * patchSize:
                patch = img_as_ubyte(imCrop[pts[c][0]:pts[c][0] + patchSize,
                                            pts[c][1] - overlap:pts[c][1] -
                                            overlap + patchSize])
                imsave(
                    f"{package_path()}/data/patchesDataSet/patch" +
                    str(k + countIndex * patchesPerImage) + "b.bmp", patch)
            else:
                patch = img_as_ubyte(imCrop[pts[c][0]:pts[c][0] + patchSize,
                                            pts[c][1]:pts[c][1] + patchSize])
                imsave(
                    f"{package_path()}/data/patchesDataSet/patch" +
                    str(k + countIndex * patchesPerImage) + "b.bmp", patch)

            blk00[rr, cc] = 0
            blk01[rr, cc] = 0
            blk10[rr, cc] = 0
            blk11[rr, cc] = 0

            k += 1

        for idx in range(int(patchesPerImage / 2), int(len(ioo))):

            indexesZero = []
            indexesOne = []
            coeff = ioo[idx]

            ind = 0

            for digit in ioo:

                if digit == 0:
                    indexesZero.append(ind)

                else:
                    indexesOne.append(ind)
                ind += 1

            # If the "a" patch treated is within the lesion, then the corresponding "b" patch is chosen out of it. If there
            # are only in or only out patches, then take randomly a patch from another image
            #TODO : take care about the random choice in lines 236 and 249 (must have enough patches to make choice, eg : if
            # patchesPerImage=10 and have to make a choice for the 6th patch then you make a random choice in (0;-4) which
            # is impossible.
            if coeff == 1:
                if indexesZero != []:
                    rdInd = choice(indexesZero)
                    rdPt = pts[rdInd]
                    patch = img_as_ubyte(imCrop[rdPt[0]:rdPt[0] + patchSize,
                                                rdPt[1]:rdPt[1] + patchSize])
                    imsave(
                        f"{package_path()}/data/patchesDataSet/patch" +
                        str(idx + countIndex * patchesPerImage) + "b.bmp",
                        patch)
                else:
                    actual = idx + countIndex * patchesPerImage
                    rdIdx = randint(0, actual - patchesPerImage)
                    shutil.copyfile(
                        f"{package_path()}/data/patchesDataSet/patch" +
                        str(rdIdx) + "a.bmp",
                        f"{package_path()}/data/patchesDataSet/patch" +
                        str(actual) + "b.bmp")

            else:
                if indexesOne != []:
                    rdInd = choice(indexesOne)
                    rdPt = pts[rdInd]
                    patch = img_as_ubyte(imCrop[rdPt[0]:rdPt[0] + patchSize,
                                                rdPt[1]:rdPt[1] + patchSize])
                    imsave(
                        f"{package_path()}/data/patchesDataSet/patch" +
                        str(idx + countIndex * patchesPerImage) + "b.bmp",
                        patch)
                else:
                    actual = idx + countIndex * patchesPerImage
                    rdIdx = randint(0, actual - patchesPerImage)
                    shutil.copyfile(
                        f"{package_path()}/data/patchesDataSet/patch" +
                        str(rdIdx) + "a.bmp",
                        f"{package_path()}/data/patchesDataSet/patch" +
                        str(actual) + "b.bmp")

    #---------------Move created pairs of patches to the Similar or nonSimilar folder-----------------
    for compteur in range(0, (len(ims) * patchesPerImage)):
        crit = compteur // int(patchesPerImage / 2)
        if (crit % 2 == 0):
            shutil.move(
                f"{package_path()}/data/patchesDataSet/patch" + str(compteur) +
                "a.bmp",
                f"{package_path()}/data/patchesDataSet/Similar/patch" +
                str(compteur) + "a.bmp")
            shutil.move(
                f"{package_path()}/data/patchesDataSet/patch" + str(compteur) +
                "b.bmp",
                f"{package_path()}/data/patchesDataSet/Similar/patch" +
                str(compteur) + "b.bmp")

        else:
            shutil.move(
                f"{package_path()}/data/patchesDataSet/patch" + str(compteur) +
                "a.bmp",
                f"{package_path()}/data/patchesDataSet/nonSimilar/patch" +
                str(compteur) + "a.bmp")
            shutil.move(
                f"{package_path()}/data/patchesDataSet/patch" + str(compteur) +
                "b.bmp",
                f"{package_path()}/data/patchesDataSet/nonSimilar/patch" +
                str(compteur) + "b.bmp")