Example #1
0
    def normalize_filtered_image(self, img, fissure_image):
        prefiltered = self.remove_salt_noise(img)
        gauss_img = gaussian(image, sigma=1.5)
        max_val = np.max(gauss_img)
        min_val = np.min(gauss_img)
        
        img_norm = (img - min_val) / (max_val - min_val)
        img_norm[img_norm > 1.0] = 1.0
        img_norm[img_norm < 0.0] = 0.0

        laplace_img[:,:,i] = laplace(gauss_img[:,:,i], 3)
        laplace_img[fissure_image>0] = -1
        laplace_max = np.max(laplace_img)
        
        spots = h_maxima(log, h=h)
        spots[log<=eps] = 0
        spots[gauss_img<=gauss_threshold] = 0
            
        lab = label(spots)
        properties = regionprops(lab)
        coordinates = [properties[i]['centroid'] for i in range(len(properties))]

        props = regionprops(ws, gauss_img[:,:,i])
        intensities['gauss'][channel] = np.array([props[k]['mean_intensity'] for k in range(len(props))])
        perc = np.percentile(gauss_img[:,:,i], perc_for_vis)
        maxvals['gauss'][channel] = perc[1]
        minvals['gauss'][channel] = perc[0]

            
        return
Example #2
0
    def _segment_nuclei_watershed(img,
                                  imgCyto=[],
                                  voronoi=None,
                                  cellsize=5,
                                  hThresh=0.001):
        from skimage import filters, measure
        from skimage.util import invert
        #from skimage.morphology import watershed
        from skimage.segmentation import watershed
        from oyLabImaging.Processing.improcutils import trithresh, awt, imimposemin
        from skimage.feature import peak_local_max
        from scipy.ndimage.morphology import distance_transform_edt
        from skimage.morphology import erosion, dilation, opening, closing, h_maxima, disk

        #wavelet transform and SAR
        W = awt(img, 9)
        img = np.sum(W[:, :, 1:8], axis=2)

        if voronoi is None:
            #Smoothen
            voronoi = {}
            imgSmooth = filters.gaussian(img, sigma=cellsize)
            img_hmax = h_maxima(imgSmooth, hThresh)  #threshold
            coordinates = peak_local_max(img_hmax, footprint=np.ones((30, 30)))
            RegionMax = np.zeros_like(img, dtype=np.bool)
            RegionMax[tuple(coordinates.T)] = True
            RegionMax = RegionMax.astype('int')
            se = disk(cellsize)
            RegionMax = closing(RegionMax, se)
            imgBW = dilation(RegionMax, se)
            dt = distance_transform_edt(1 - imgBW)
            DL = watershed(dt, watershed_line=1)
            RegionBounds = DL == 0  #the region bounds are==0 voronoi cells
            voronoi['imgBW'] = imgBW
            voronoi['RegionBounds'] = RegionBounds

        imgBW = voronoi['imgBW']
        RegionBounds = voronoi['RegionBounds']

        #gradient magnitude
        GMimg = filters.sobel(filters.gaussian(img, sigma=cellsize))
        GMimg[np.logical_or(imgBW, RegionBounds)] = 0
        L = watershed(GMimg, markers=measure.label(imgBW), watershed_line=1)

        #We use regionprops
        props = measure.regionprops(L)
        Areas = np.array([r.area for r in props])

        #remove BG region and non-cells
        Areas > 10000
        BG = [i for i, val in enumerate(Areas > 10000) if val]

        if any(BG):
            for i in np.arange(len(BG)):
                L[L == BG[i] + 1] = 0

        L = measure.label(L)
        return L
Example #3
0
def construct_weighted_graph(bin_image, local_max_h=2):
    '''
    Construct edge weight graph from binary image.
    :param bin_image: cell binary image
    :return point_list: all points embedded in the triangulation, used for location query
    :return edge_list: list of edges in the triangulation
    :return edge_weight_list: edge weight corresponds to the edge list.
    '''
    volume_shape = bin_image.shape
    bin_cell = ndimage.morphology.binary_opening(bin_image).astype(np.float)
    bin_memb = bin_cell == 0
    bin_cell_edt = ndimage.morphology.distance_transform_edt(bin_cell)

    # get local maximum SegMemb
    local_maxima_mask = h_maxima(bin_cell_edt, local_max_h)
    [maxima_x, maxima_y, maxima_z] = np.nonzero(local_maxima_mask)
    #  find boundary points to force large weight
    x0 = np.where(maxima_x == 0)[0]
    x1 = np.where(maxima_x == volume_shape[0] - 1)[0]
    y0 = np.where(maxima_y == 0)[0]
    y1 = np.where(maxima_y == volume_shape[1] - 1)[0]
    z0 = np.where(maxima_z == 0)[0]
    z1 = np.where(maxima_z == volume_shape[2] - 1)[0]
    b_indx = np.concatenate((x0, y0, z0, x1, y1, z1), axis=None).tolist()
    point_list = np.stack((maxima_x, maxima_y, maxima_z), axis=1)
    tri_of_max = Delaunay(point_list)
    triangle_list = tri_of_max.simplices
    edge_list = []
    for i in range(triangle_list.shape[0]):
        for combine_pairs in combinations(triangle_list[i].tolist(), r=2):
            edge_list.append([combine_pairs[0], combine_pairs[1]])
    # add edges for all boundary points
    for i in range(len(b_indx)):
        for j in range(i, len(b_indx)):
            one_point = b_indx[i]
            another_point = b_indx[j]
            if ([one_point, another_point]
                    in edge_list) or ([another_point, one_point] in edge_list):
                continue
            edge_list.append([one_point, another_point])

    weights_volume = bin_memb * 10000  # construct weights volume for graph
    edge_weight_list = []
    for one_edge in edge_list:
        start_x0 = point_list[one_edge[0]]
        end_x1 = point_list[one_edge[1]]
        if (one_edge[0] in b_indx) and (one_edge[1] in b_indx):
            edge_weight = 0  # All edges between boundary points are set as zero
        elif (one_edge[0] in b_indx) or (one_edge[1] in b_indx):
            edge_weight = 10000 * 10
        else:
            edge_weight = line_weight_integral(start_x0, end_x1,
                                               weights_volume)

        edge_weight_list.append(edge_weight)

    return point_list.tolist(), edge_list, edge_weight_list
def detect(img, T, h, d):

    p1 = peak_local_max(img, min_distance=int(np.round(d)), threshold_abs=T)
    p2 = np.stack(np.nonzero(h_maxima(img, h)), axis=1)

    p1 = set([tuple(x) for x in p1.tolist()])
    p2 = set([tuple(x) for x in p2.tolist()])

    detections = list(p1.intersection(p2))

    return detections
Example #5
0
def show_Connected():
    img1 = imread('1.png', True)
    img2 = np.zeros_like(img1).astype(np.int16)
    img2 = show_analysis(img1)

    ls = regionprops(img2)
    #偏心率过滤
    lst = np.array([0] * (len(ls) + 1))
    #面积以及偏心率过滤
    for i in ls:
        if i.eccentricity < 0.7: lst[i.label] = 0
        else: lst[i.label] = i.label
    img1 = lst[img2.astype(np.int32)]

    #这里要用copy,否则会跟着变化
    img_temp1 = img1.copy()

    #去除了后label还是不变,重新label
    label(img1, generate_binary_structure(2, 2), output=img2)

    #将距离变换图像反转
    lst = [i for i in range(256)]
    lst = np.array(lst[::-1])
    img2 = distance_transform_edt(img2)

    markers = h_maxima(img2, 2)
    markers = binary_dilation(markers,
                              structure=generate_binary_structure(2, 2))
    label(markers, generate_binary_structure(2, 2), img1)
    img2 = lst[img2.astype(np.int32)]

    #分水岭
    img2 = watershed(img2, img1, watershed_line=True)
    lst = np.array([1] + [0] * 254)
    #提取出边界
    edg = lst[img2]

    plt.subplot(121)
    lst = np.array([0] + [1] * 254)
    img1 = lst[img_temp1]
    plt.imshow(img1)

    plt.subplot(122)
    img2 = edg * img1 + img1
    plt.imshow(img2)

    plt.show()
def segment_image(img, params=None):
    """
    ********************
    :param img:
    :param params:
    :return:
    """
    if params is None:
        params = {
                'cell_size_est': 0.17,  # ???
                'background_blur': 130,  # ??
                'image_blur': 1.5,  # gaussian kernel
                'block_size': 101,  # 2n-1 (1-inf)
                'thresh': 0.01,  # for binarization
                'smallest_object': 60,  # pixels
                'dist_intensity_ratio': 0.75,  # 0-1 weight
                'separation_distance': 8,  # pixels
                'edge_filter_blur': 2.0,  # kernel width in pixels
                'watershed_ratio': 0.15,  # 0-1 ratio of distance from edge vs bwgeodesic
                 }

    img_arr = np.array(img).astype(np.float)  # convert to numpy array
    img_norm = img_arr/np.max(img_arr)  # normalized img (0-1)
    img_pad = np.pad(img_norm, [1, 1], 'constant')  # pads edge with 0s
    imgSmooth = gaussian(img_pad, sigma=params['cell_size_est'])  # gaussian filter

    # Threshold
    img_hmax = h_maxima(imgSmooth, params['thresh'])
    img_hmax[np.where(img_hmax) == 1] = params['thresh']  # replace maxima with thresh value
    local_max_ixs = peak_local_max(img_hmax)
    RegionMax = img_hmax.copy()
    RegionMax[local_max_ixs[:, 0], local_max_ixs[:, 1]] = 1  # replace local maxima with 1s
    # RegionMax = filters.median(RegionMax)  # despeckle/clean array
    I = imgSmooth.copy()
    I[RegionMax] = 1  # set the ceiling to 1
    imgBW =


    # Generate Threshold
    # img_blur = threshold_local(img_arr, params['block_size'])  # functionally blurs image
    # img_bin = img_arr > img_blur  # binarize from threshold
    # img_thresh = filters.median(img_bin)  # despeckle binary image


    img_sobel = sobel(img_arr)  # sobel magnitude sqrt(sobel_h^2 + sobel_v^2)
Example #7
0
def wbc_segmentation_hsv(img, outputdir=None):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    gray = 255 - hsv[:, :, 1]

    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)

    thresh_clean = 255 * morphology.remove_small_objects(
        thresh.astype(bool), min_size=2500, connectivity=4).astype(np.uint8)

    kernel = np.ones((5, 5), np.uint8)
    opening = cv2.morphologyEx(thresh_clean,
                               cv2.MORPH_OPEN,
                               kernel,
                               iterations=2)
    sure_bg = cv2.erode(opening, kernel, iterations=2)
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    h_max = h_maxima(dist_transform, 1)
    h_max = cv2.dilate(h_max, kernel, iterations=3)
    #h_max = np.ma.masked_less(h_max,1)
    #cv2.imwrite('bfs_threshold.png', thresh)
    #cv2.imwrite('bfs_opening.png', opening)

    #ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
    ret, sure_fg = cv2.threshold(h_max, 0.3 * h_max.max(), 255, 0)
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)
    ret, markers = cv2.connectedComponents(sure_fg)
    markers = markers + 1
    markers[unknown == 255] = 0

    markers = cv2.watershed(img, markers)
    bw = (markers > 1).astype(int)
    #bw=binary_fill_holes(bw).astype(np.uint8)

    #img[markers == -1] = [255,0,0]

    #bw_clean=255*morphology.remove_small_objects(bw.astype(bool), min_size=1000, connectivity=4).astype(np.uint8)
    #cv2.imwrite('bfs_cleaning.png', 255*bw_clean)
    #rbc_gone = 255*remove_small_objects(bw_clean.astype(bool), min_size=30000, connectivity=4).astype(np.uint8)
    #rbc_only = cv2.subtract(255*bw_clean, rbc_gone)
    #cv2.imwrite(outputdir +'.jpg', img)
    return thresh_clean, 255 * bw.astype(np.uint8)  #bw_clean
Example #8
0
def rbc_segmentation(img, outputdir=None):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    ret, thresh = cv2.threshold(gray, 127, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    kernel = np.ones((5, 5), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
    sure_bg = cv2.dilate(opening, kernel, iterations=2)
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    h_max = h_maxima(dist_transform, 5)
    #h_max = cv2.dilate(h_max,kernel, iterations=3)
    #h_max = np.ma.masked_less(h_max,1)
    cv2.imwrite('bfs_threshold.png', thresh)
    cv2.imwrite('bfs_opening.png', opening)
    #plt.figure()
    #plt.imshow(img)
    #plt.imshow(h_max)
    #plt.show()
    #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0)
    ret, sure_fg = cv2.threshold(h_max, 0.3 * h_max.max(), 255, 0)
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)
    ret, markers = cv2.connectedComponents(sure_fg)
    markers = markers + 1
    markers[unknown == 255] = 0

    markers = cv2.watershed(img, markers)
    bw = (markers > 1).astype(int)
    bw = binary_fill_holes(bw).astype(np.uint8)

    #img[markers == -1] = [255,0,0]

    bw_clean = morphology.remove_small_objects(bw.astype(bool),
                                               min_size=1000,
                                               connectivity=4).astype(np.uint8)
    cv2.imwrite('bfs_cleaning.png', 255 * bw_clean)
    rbc_gone = 255 * remove_small_objects(
        bw_clean.astype(bool), min_size=19000, connectivity=4).astype(np.uint8)
    rbc_only = cv2.subtract(255 * bw_clean, rbc_gone)
    #cv2.imwrite(outputdir +'.jpg', img)
    return distance_transform_edt((rbc_only > 0).astype(np.uint8)), rbc_only
Example #9
0
    def findCenters(self, img):
        u = time.time()

        img -= morphology.area_opening(img, area_threshold=3500)
        ##print(time.time()-u)
        img = morphology.opening(img, morphology.rectangle(19, 1))
        img = morphology.opening(img, morphology.rectangle(1, 17))
        u = time.time()
        tim = time.time()
        img2 = morphology.h_maxima(img, 5)
        img2 = morphology.dilation(img2, morphology.disk(4))

        img2 = label(img2)

        num_things = round(np.max(img2))

        colors_arr = [[] for i in range(num_things)]
        for i in range(len(img2)):
            for j in range(len(img2[i])):
                if round(img2[i][j]) != 0:
                    colors_arr[round(img2[i][j] - 1)].append((i, j))

        centers = []
        for i in range(len(colors_arr)):
            count = 0
            totalcol = 0
            totalrow = 0
            for j in colors_arr[i]:

                totalcol += j[1]
                totalrow += j[0]
                count += 1
            totalcol /= count
            totalrow /= count
            centers.append((int(totalrow), int(totalcol)))
        ####print(time.time()-u)
        ##print(time.time()-u)
        for i in range(4):
            centers = analysis.find_RL_UD(img, centers)
        centers = self.clear_near(centers)
        return centers
Example #10
0
def segment_image(cells_img, sato1, sato2, a=1., b=1., c=1., m1=1., m2=1.):
    """
    :param cells_img: cell prediction confidence image
    :param sato1, sato2: CreateSato objects
    :param a, b, c, m1, m2: fine tuning parameters
    :returns thresholded image with layers for colors
    """
    cells_img = img_as_float32(cells_img)
    gauss_downstream = filters.gaussian(cells_img, multichannel=True,
                                        sigma=2).astype(np.float32)
    threshed1 = gauss_downstream > 0.5
    threshed2 = cells_img > 0.5
    threshed = (threshed1 * 1. + threshed2 * 1.) > 0
    del threshed1, threshed2
    gauss_energy = filters.gaussian(cells_img, multichannel=True,
                                    sigma=6).astype(np.float32)

    vesses = sato1(gauss_downstream)
    maxima_vals = vesses * m1 + gauss_energy * m2
    disc = morphology.disk(4)
    maxes = np.zeros(vesses.shape, dtype=np.uint8)
    for i in range(3):
        maxes[..., i] = morphology.h_maxima(maxima_vals[..., i],
                                            h=0.05,
                                            selem=disc).astype(np.float16)
    del maxima_vals
    vesses_prim = sato2(gauss_downstream)
    energy = -(a * vesses + b * gauss_energy - c * vesses_prim)
    segmented_cells = np.zeros(cells_img.shape, dtype=np.uint32)
    for i in range(3):
        markers, _ = ndi.label(maxes[..., i])
        segmented_cells[..., i] = segmentation.watershed(energy[..., i],
                                                         markers,
                                                         mask=threshed[..., i])

    return threshed, segmented_cells
        
        g.col_num = 2
        g.title_list = ['Ground', 'No ground (object candidates)', 
                        'Fill ground holes (object candidates)',
                        'All object candidates (union)', 'opening result', 
                        'opening corrected with accumulation']
        g.save_name = 'Object_detection'
        plot = g.showing([im_ground, obj1, obj2, obj, residue, obj_restored])    
        
#%%
    if True:
        '''Watershed segmentation of the objects'''
        
        #Selection of local maxima
        obj_height = obj_restored * im_max
        marks = skmorpho.h_maxima(obj_height,1, selem=None)
        #Labelling as markers
        marks = ndi.label(marks)[0]
        #watershed segmentation
        label_obj = skmorpho.watershed(-obj_height, marks)
        
        #Coarser segmentation
        #reference elevation image
        ref = im_accum * im_min * obj_restored
        #removal of small objects already segmented
        ref = morpho.grey_opening(ref, size = 1)
        #Downsizing factor
        DS_factor = 10
        #Resize image     
        image_resized = resize(ref, (ref.shape[0] // DS_factor, ref.shape[1] // DS_factor))
        #New local maxima
    # eccentricity filter             
    for i in ls:
        if i.eccentricity<0.7 : lst[i.label] = 0
        else : lst[i.label] = i.label
    img_filter = lst[labeled1.astype(np.int32)]

    # relabel
    labeled2 = label(img_filter.copy(), generate_binary_structure(2, 2))[0].astype('uint8')

    # distance transform
    lst = [i for i in range(256)]
    lst = np.array(lst[::-1])
    distance = distance_transform_edt(labeled2)

    #find the maximum as the seed
    markers = h_maxima(distance, 2)
    maximums = np.where(markers>0)
    print(maximums)
    markers = binary_dilation(markers, structure=generate_binary_structure(2, 2))
    img_markers = label(markers, generate_binary_structure(2, 2))[0].astype('uint8')
    labeled1 = lst[labeled1.astype(np.int32)]   

    #分水岭
    watershed = watershed(labeled1, img_markers, watershed_line=True)
    lst = np.array([1]+[0]*254)
    #提取出边界
    watershed_line = lst[watershed]

    lst=np.array([0]+[1]*254)
    img1=lst[img_filter]
        clean = im_accum_bin & residue.astype(bool)
        obj_restored = obj_clean + clean
        
        g.col_num = 2
        g.title_list = ['Ground', 'No ground (object candidates)', 
                        'Fill ground holes (object candidates)',
                        'All object candidates (union)', 'opening result', 
                        'opening corrected with accumulation']
        g.save_name = 'Object_Detection'
        plot = g.showing([im_ground, obj1, obj2, obj, residue, obj_restored])    
        
#%%
    if True:
        print('Object segmentation')
        obj_height = obj_restored * im_max
        marks = skmorpho.h_maxima(obj_height, 2, selem=None)
        marks = ndi.label(marks)[0]
        label_obj = skmorpho.watershed(-obj_height, marks, connectivity=1,
                           offset=None, mask=obj_restored.astype(bool), compactness=0, watershed_line=False)
        g.cmap = 'flag'  
        g.save_name = 'Object_Segmentation'
        g.title_list = ['Object segmentation']
        g.showing(label_obj)
                

#%%
    if True:
        print('Save cloud')
        mount_cloud_labels = image_to_2Dcloud(label_obj, elevation_mask)
        im_ground = make_binary(im_ground, 0, 'int')
Example #14
0
def deep_watershed(outputs,
                   radius=10,
                   maxima_threshold=0.1,
                   interior_threshold=0.01,
                   maxima_smooth=0,
                   interior_smooth=1,
                   maxima_index=0,
                   interior_index=-1,
                   label_erosion=0,
                   small_objects_threshold=0,
                   fill_holes_threshold=0,
                   pixel_expansion=None,
                   maxima_algorithm='h_maxima',
                   **kwargs):
    """Uses ``maximas`` and ``interiors`` to perform watershed segmentation.
    ``maximas`` are used as the watershed seeds for each object and
    ``interiors`` are used as the watershed mask.

    Args:
        outputs (list): List of [maximas, interiors] model outputs.
            Use `maxima_index` and `interior_index` if list is longer than 2,
            or if the outputs are in a different order.
        radius (int): Radius of disk used to search for maxima
        maxima_threshold (float): Threshold for the maxima prediction.
        interior_threshold (float): Threshold for the interior prediction.
        maxima_smooth (int): smoothing factor to apply to ``maximas``.
            Use ``0`` for no smoothing.
        interior_smooth (int): smoothing factor to apply to ``interiors``.
            Use ``0`` for no smoothing.
        maxima_index (int): The index of the maxima prediction in ``outputs``.
        interior_index (int): The index of the interior prediction in
            ``outputs``.
        label_erosion (int): Number of pixels to erode segmentation labels.
        small_objects_threshold (int): Removes objects smaller than this size.
        fill_holes_threshold (int): Maximum size for holes within segmented
            objects to be filled.
        pixel_expansion (int): Number of pixels to expand ``interiors``.
        maxima_algorithm (str): Algorithm used to locate peaks in ``maximas``.
            One of ``h_maxima`` (default) or ``peak_local_max``.
            ``peak_local_max`` is much faster but seems to underperform when
            given regious of ambiguous maxima.

    Returns:
        numpy.array: Integer label mask for instance segmentation.

    Raises:
        ValueError: ``outputs`` is not properly formatted.
    """
    try:
        maximas = outputs[maxima_index]
        interiors = outputs[interior_index]
    except (TypeError, KeyError, IndexError):
        raise ValueError('`outputs` should be a list of at least two '
                         'NumPy arryas of equal shape.')

    valid_algos = {'h_maxima', 'peak_local_max'}
    if maxima_algorithm not in valid_algos:
        raise ValueError('Invalid value for maxima_algorithm: {}. '
                         'Must be one of {}'.format(
                             maxima_algorithm, valid_algos))

    total_pixels = maximas.shape[1] * maximas.shape[2]
    if maxima_algorithm == 'h_maxima' and total_pixels > 5000**2:
        warnings.warn('h_maxima peak finding algorithm was selected, '
                      'but the provided image is larger than 5k x 5k pixels.'
                      'This will lead to slow prediction performance.')
    # Handle deprecated arguments
    min_distance = kwargs.pop('min_distance', None)
    if min_distance is not None:
        radius = min_distance
        warnings.warn('`min_distance` is now deprecated in favor of `radius`. '
                      'The value passed for `radius` will be used.',
                      DeprecationWarning)

    # distance_threshold vs interior_threshold
    distance_threshold = kwargs.pop('distance_threshold', None)
    if distance_threshold is not None:
        interior_threshold = distance_threshold
        warnings.warn('`distance_threshold` is now deprecated in favor of '
                      '`interior_threshold`. The value passed for '
                      '`distance_threshold` will be used.',
                      DeprecationWarning)

    # detection_threshold vs maxima_threshold
    detection_threshold = kwargs.pop('detection_threshold', None)
    if detection_threshold is not None:
        maxima_threshold = detection_threshold
        warnings.warn('`detection_threshold` is now deprecated in favor of '
                      '`maxima_threshold`. The value passed for '
                      '`detection_threshold` will be used.',
                      DeprecationWarning)

    if maximas.shape[:-1] != interiors.shape[:-1]:
        raise ValueError('All input arrays must have the same shape. '
                         'Got {} and {}'.format(
                             maximas.shape, interiors.shape))

    if maximas.ndim not in {4, 5}:
        raise ValueError('maxima and interior tensors must be rank 4 or 5. '
                         'Rank 4 is 2D data of shape (batch, x, y, c). '
                         'Rank 5 is 3D data of shape (batch, frames, x, y, c).')

    input_is_3d = maximas.ndim > 4

    # fill_holes is not supported in 3D
    if fill_holes_threshold and input_is_3d:
        warnings.warn('`fill_holes` is not supported for 3D data.')
        fill_holes_threshold = 0

    label_images = []
    for maxima, interior in zip(maximas, interiors):
        # squeeze out the channel dimension if passed
        maxima = nd.gaussian_filter(maxima[..., 0], maxima_smooth)
        interior = nd.gaussian_filter(interior[..., 0], interior_smooth)

        if pixel_expansion:
            fn = cube if input_is_3d else square
            interior = dilation(interior, selem=fn(pixel_expansion * 2 + 1))

        # peak_local_max is much faster but has poorer performance
        # when dealing with more ambiguous local maxima
        if maxima_algorithm == 'peak_local_max':
            coords = peak_local_max(
                maxima,
                min_distance=radius,
                threshold_abs=maxima_threshold,
                exclude_border=kwargs.get('exclude_border', False))

            markers = np.zeros_like(maxima)
            slc = tuple(coords[:, i] for i in range(coords.shape[1]))
            markers[slc] = 1
        else:
            # Find peaks and merge equal regions
            fn = ball if input_is_3d else disk
            markers = h_maxima(image=maxima,
                               h=maxima_threshold,
                               selem=fn(radius))

        markers = label(markers)
        label_image = watershed(-1 * interior, markers,
                                mask=interior > interior_threshold,
                                watershed_line=0)

        if label_erosion:
            label_image = erode_edges(label_image, label_erosion)

        # Remove small objects
        if small_objects_threshold:
            label_image = remove_small_objects(label_image,
                                               min_size=small_objects_threshold)

        # fill in holes that lie completely within a segmentation label
        if fill_holes_threshold > 0:
            label_image = fill_holes(label_image, size=fill_holes_threshold)

        # Relabel the label image
        label_image, _, _ = relabel_sequential(label_image)

        label_images.append(label_image)

    label_images = np.stack(label_images, axis=0)
    label_images = np.expand_dims(label_images, axis=-1)

    return label_images
def windowed_radon(img,
                   radon_matrix,
                   theta=None,
                   method='h_maxima',
                   threshold_rel=2,
                   threshold_rel_global=False,
                   threshold_mean=1.5,
                   return_lines=False,
                   debug=False):
    """
    Get coarse-grained anisotropy tensor for img via windowed Radon transform.
    
    Basic version with no support for curved surfaces, and only one option
    for maxima detection, to make code easier to read.

    See https://doi.org/10.7554/eLife.27454. This function first
    calculates a windowed Radon transform, then detects maxima of the Radon
    transform and finally computes a coarse-grained anisotropy tensor for each
    Radon patch according to tensorify_radon. Alternatively, the function can
    also return the centroid, orientation and intensity of each detected edge
    segment (for verification purposes). This information is returned in a
    2d-array like nested list format, each entry corresponding to the list
    of detected line segments from one particular Radon window.

    By default, the Radon transform is weighted so a to calculate the line
    density along each ray (this is done approximately).

    The crucial step in anisotropy detection is finding the maxima of the
    Radon transform. This is implemented using the h-maxima transform. It
    is necessary to tune the "h"-parameter of the h-maxima transform and check
    that your choice correctly detects all edge segment of interest.

    As an example of why maxima detection is so important, consider the Radon
    transform of an image containing a single line, and average over the
    projection distance delta, (so as to obtain a function of angle only). The
    result is _independent_ of the original line orientation!

    For performance reasons, this function uses a sparse matrix representation
    of the Radon transform, computed in advance using get_radon_tf_matrix.

    Parameters
    ----------
    img : np.array
        Input image.
    radon_matrix : sparse.csc_matrix
        Sparse matrix representing the Radon transform (remember, it's linear).
        Computed by get_radon_tf_matrix. Determines the size of the radon
        window.
    theta : np.array, optional
        Angles at which radon transform is computed. Defaults to
        np.linspace(0, np.pi, edge_length, endpoint=False) where edge_length
        is the window size (defined by the shape of radon_matrix).
    method : str, optional
        Either 'h_maxima' or 'global_maximum' (faster).
    threshold_rel : float, optional
        Threshold for maxima detection in radon transform. For "h_maxima",
        this is the minimal height of maxima in units of the Radon transform's
        standard deviation. For "peak_local_max", it is the minimal height of
        maxima in multiples of the global maximum.
    threshold_mean : float, optional
        2nd auxilliary threshold. All Radon transform maxima must be at least
        threshold_mean * mean(Radon transform). This removes e.g. the global
        maximum always returned by the h maxima transform if it is not
        sufficiently pronounced.
    threshold_rel_global :  float, optional
        Global threshold for h-maxima computation, in units of image
        std deviation. If non-zero, overrules the local h-maxima thresholds.
        Use when large regions of image contain no anisotropy.
    return_lines : bool, optional
        Whether to also return a list of centroids, orientations & intensities
        of all detected edges instead of just the coarse-grained anisotropy
        tensor.

    Returns
    -------
    lines : list of lists of np.arrays of shape (..., 4)
        Returned only if return_lines is True. Lines detected in each window,
        formatted as follows (x_coord, y_coord, 
    m :  np.array of shape (...,..., 2, 2)
        Coarse grained anisotropy tensor. The first two axes are "spatial"
        indices and have the following extent:
            ceil(2*(img_shape - edge_length+1)/(edge_length+1)),
        where edge_length is the radon window size.

    """
    edge_length = np.sqrt(radon_matrix.shape[1]).astype(int)
    e_len = int((edge_length - 1) / 2)
    if theta is None:
        theta = np.linspace(0,
                            np.pi,
                            int(radon_matrix.shape[0] / (edge_length - 4)),
                            endpoint=False)
    e_len_cut = int((radon_matrix.shape[0] / theta.size - 1) / 2)
    delta = np.arange(-e_len_cut, e_len_cut + 1)
    # needed because line_density radon transform returns cut-off

    # director matrix
    q_matrix = np.array([[np.sin(theta)**2, -np.sin(theta) * np.cos(theta)],
                         [-np.sin(theta) * np.cos(theta),
                          np.cos(theta)**2]])

    global_h = threshold_rel_global * img.std()

    # iterate over sub-arrays
    m = []
    lines = []
    for r in np.arange(e_len, img.shape[0] - e_len, e_len + 1):
        m_row = []
        lines_row = []
        for c in np.arange(e_len, img.shape[1] - e_len, e_len + 1):
            patch = img[r - e_len:r + e_len + 1, c - e_len:c + e_len + 1]
            radon_window = radon_matrix.dot(patch.flatten())
            radon_window = radon_window.reshape(2 * e_len_cut + 1, theta.size)
            # compute mean based threshold for maxima
            thr_mean = (radon_window.min() + threshold_mean *
                        (np.median(radon_window) - radon_window.min()))
            if method == 'h_maxima':
                h = (global_h if global_h else
                     radon_window.std() * threshold_rel + 1e-2)
                max_mask = h_maxima(radon_window, h)
                max_mask = binary_dilation(max_mask, iterations=1)
                max_mask *= (radon_window > thr_mean)
            if method == 'global_maximum':
                max_mask = np.zeros(radon_window.shape)
                ind = np.unravel_index(np.argmax(radon_window, axis=None),
                                       radon_window.shape)
                max_mask[ind] = radon_window[ind] > thr_mean
            if method == 'thr_mean':
                max_mask = radon_window > thr_mean

            if return_lines and max_mask.any():
                max_mask = shrink_to_centroid(max_mask)
                # now, get centers of lines for each maximum
                max_intensity = radon_window[max_mask.astype(bool)]
                max_delta = delta[np.where(max_mask)[0]]
                max_theta = theta[np.where(max_mask)[1]]
                # get distance of line segment centroids to image center,
                # component orthogonal to line orientation
                max_center = max_delta * np.stack(
                    [np.cos(max_theta), -np.sin(max_theta)])
                # component parallel to line orientation
                par_com = np.array([
                    get_segment_com(patch, ang, off, pad=2)
                    for ang, off in zip(max_theta, max_delta)
                ])
                # add to coordinates of radon window center
                max_center += par_com * np.stack(
                    [np.sin(max_theta), np.cos(max_theta)])
                max_loc = max_center.T + np.array([c, r])
                lines_row.append(list(zip(max_intensity, max_theta, max_loc)))
            else:
                lines_row.append([])
            if debug:
                print(r, c)
                for l in lines_row[-1]:
                    print('theta', 180 / np.pi * l[1])
                    print('loc', l[2])
                    a, b = np.sin(l[1]), np.cos(l[1])
                    plt.plot(5 * np.array([-a, 0, a]) + l[2][0] + e_len - c,
                             5 * np.array([-b, 0, b]) + l[2][1] + e_len - r,
                             color='red',
                             lw=1)
                plt.imshow(patch * disk(e_len), vmin=0, vmax=1)
                plt.show()
            radon_window = (radon_window * max_mask).sum(axis=0)
            m_row.append((q_matrix * radon_window).sum(axis=2))
        lines.append(lines_row)
        m.append(m_row)
    m = np.stack(m)
    if return_lines:
        return lines, m
    return m
Example #16
0
def find_local_maximum(image, h):
    local_maximum_mask = h_maxima(image, h)

    return local_maximum_mask
Example #17
0
def split_nuclei(mask,minimal_nuclei_size,h,sphere,min_dist):
    
    # mask_erosion = [8, 8, 5]
    # X,Y,Z = np.meshgrid(np.linspace(-1,1,mask_erosion[0]),np.linspace(-1,1,mask_erosion[1]),np.linspace(-1,1,mask_erosion[2]))
    # sphere = np.sqrt(X**2 + Y**2 + Z**2) < 1
    
    # mask = binary_closing(mask,sphere)
    
    mask = remove_small_objects(mask,minimal_nuclei_size)
    mask = remove_small_holes(mask,minimal_nuclei_size)
    
    D = bwdist(mask, diminsion_weights=(1,1,1))
    
    
    
    
    
    
    maxima2 = h_maxima(D,h)
    
    
    # centroids, num = label(maxima2)
    # centroids = np.array(center_of_mass(maxima2, centroids, range(1,1+num))).astype(np.int32)
    # tmp = np.zeros_like(D)
    # tmp[tuple(centroids.T)] = D[tuple(centroids.T)]
    

    peak_idx = peak_local_max(D, min_distance=min_dist,exclude_border=False)
    maxima1 = np.zeros_like(D, dtype=bool)
    maxima1[tuple(peak_idx.T)] = True
    
    
    # plt.imshow(np.max(binary_dilation(maxima1 & maxima2,sphere),axis=2))
    # plt.show()
    
    labeled_maxima,num = label(maxima1 & maxima2)
    
    
    
    labels = watershed(-D, labeled_maxima, mask=mask,watershed_line=True)
    
    
    seeds = remove_small_objects(labels>0,30000)
    

        
    seeds = binary_erosion(seeds)
    DD = bwdistgeodesic(seeds,mask,diminsion_weights=[1,1,1]);
    
    
    
    labeled_seeds,num = label(seeds)
    
    labelss = watershed(DD, labeled_seeds, mask=mask,watershed_line=True)
    
    # plt.imshow(labels[:,:,35])
    # plt.show()

    return labelss
    
    
Example #18
0
def detection(
    input_image,
    minref=35,
    maxref=48,
    mindiff=6,
    minsize=50,
    minmax=41,
    mindis=10,
    output_feat=False,
    time="000000000",
):
    """
    This function detects thunderstorms using a multi-threshold approach. It is
    recommended to use a 2-D Cartesian maximum reflectivity composite, however the
    function will process any 2-D array.
    The thunderstorm cell detection requires both scikit-image and pandas.

    Parameters
    ----------
    input_image : array-like
        Array of shape (m,n) containing input image, usually maximum reflectivity in
        dBZ with a resolution of 1 km. Nan values are ignored.
    minref : float, optional
        Lower threshold for object detection. Lower values will be set to NaN.
        The default is 35 dBZ.
    maxref : float, optional
        Upper threshold for object detection. Higher values will be set to this value.
        The default is 48 dBZ.
    mindiff : float, optional
        Minimal difference between two identified maxima within same area to split area
        into two objects. The default is 6 dBZ.
    minsize : float, optional
        Minimal area for possible detected object. The default is 50 pixels.
    minmax : float, optional
        Minimum value of maximum in identified objects. Objects with a maximum lower
        than this will be discarded. The default is 41 dBZ.
    mindis : float, optional
        Minimum distance between two maxima of identified objects. Objects with a
        smaller distance will be merged. The default is 10 km.
    output_feat: bool, optional
        Set to True to return only the cell coordinates.
    time : string, optional
        Date and time as string. Used to label time in the resulting dataframe.
        The default is '000000000'.

    Returns
    -------
    cells_id : pandas dataframe
        Pandas dataframe containing all detected cells and their respective properties
        corresponding to the input image.
        Columns of dataframe: ID - cell ID, time - time stamp, x - array of all
        x-coordinates of cell, y -  array of all y-coordinates of cell, cen_x -
        x-coordinate of cell centroid, cen_y - y-coordinate of cell centroid, max_ref -
        maximum (reflectivity) value of cell, cont - cell contours
    labels : array-like
        Array of shape (m,n), grid of labelled cells.
    """
    if not SKIMAGE_IMPORTED:
        raise MissingOptionalDependency(
            "skimage is required for thunderstorm DATing " "but it is not installed"
        )
    if not PANDAS_IMPORTED:
        raise MissingOptionalDependency(
            "pandas is required for thunderstorm DATing " "but it is not installed"
        )
    filt_image = np.zeros(input_image.shape)
    filt_image[input_image >= minref] = input_image[input_image >= minref]
    filt_image[input_image > maxref] = maxref
    max_image = np.zeros(filt_image.shape)
    max_image[filt_image == maxref] = 1
    labels, n_groups = ndi.label(max_image)
    for n in range(1, n_groups + 1):
        indx, indy = np.where(labels == n)
        if len(indx) > 3:
            max_image[indx[0], indy[0]] = 2
    filt_image[max_image == 2] = maxref + 1
    binary = np.zeros(filt_image.shape)
    binary[filt_image > 0] = 1
    labels, n_groups = ndi.label(binary)
    for n in range(1, n_groups + 1):
        ind = np.where(labels == n)
        size = len(ind[0])
        maxval = np.nanmax(input_image[ind])
        if size < minsize:  # removing too small areas
            binary[labels == n] = 0
            labels[labels == n] = 0
        if maxval < minmax:  # removing areas with too low max value
            binary[labels == n] = 0
            labels[labels == n] = 0
    filt_image = filt_image * binary
    if mindis % 2 == 0:
        elem = mindis - 1
    else:
        elem = mindis
    struct = np.ones([elem, elem])
    if np.nanmax(filt_image.flatten()) < minref:
        maxima = np.zeros(filt_image.shape)
    else:
        maxima = skim.h_maxima(filt_image, h=mindiff, selem=struct)
    loc_max = np.where(maxima > 0)

    loc_max = longdistance(loc_max, mindis)
    i_cell = labels[loc_max]
    n_cell = np.unique(labels)[1:]
    for n in n_cell:
        if n not in i_cell:
            binary[labels == n] = 0
            labels[labels == n] = 0

    maxima_dis = np.zeros(maxima.shape)
    maxima_dis[loc_max] = 1

    areas, lines = breakup(input_image, np.nanmin(input_image.flatten()), maxima_dis)

    cells_id, labels = get_profile(areas, binary, input_image, loc_max, time, minref)

    if not output_feat:
        return cells_id, labels
    if output_feat:
        return np.column_stack(
            [np.array(cells_id.cen_x), np.array(cells_id.cen_y)]
        )
Example #19
0
def findPeaks_hdome(calib,
                    npix_min=0,
                    npix_max=0,
                    atot_thr=0,
                    son_min=0,
                    hvalue=0,
                    r0=0,
                    dr=0,
                    mask=None):
    hmax = h_maxima(calib, hvalue)
    if mask is not None: hmax = np.multiply(hmax, mask)

    ll = label(hmax)
    regions = regionprops(ll)

    numPeaks = len(regions)
    x = np.zeros((numPeaks, ))
    y = np.zeros((numPeaks, ))
    for i, p in enumerate(regions):
        x[i], y[i] = p.centroid

    innerRing = r0
    outerRing = dr
    width = int(outerRing * 2 + 1)

    outer, rr, rc, n, m = donutMask(width,
                                    width,
                                    outerRing,
                                    innerRing,
                                    centreRow=0,
                                    centreCol=0)
    inner, rr, rc, n, m = donutMask(width,
                                    width,
                                    innerRing,
                                    0,
                                    centreRow=0,
                                    centreCol=0)
    if width % 2 == 0:
        lh = rh = int(width / 2)
    else:
        lh = int(width / 2)
        rh = int(width / 2) + 1

    snr = np.zeros_like(x)
    tot = np.zeros_like(x)
    numPix = np.zeros_like(x)
    numInner = len(np.where(inner == 1)[0])
    for i in range(numPeaks):
        d = calib[int(x[i]) - lh:int(x[i]) + rh, int(y[i]) - lh:int(y[i]) + rh]
        try:
            meanSig = np.mean(d[np.where(inner == 1)])
            stdNoise = np.std(d[np.where(outer == 1)])
            meanBackground = np.mean(d[np.where(outer == 1)])
            tot[i] = np.sum(
                d[np.where(inner == 1)]) - numInner * meanBackground
            numPix[i] = len(np.where(d[np.where(inner == 1)] >= hvalue)[0])
            if stdNoise == 0:
                snr[i] = -1
            else:
                snr[i] = meanSig / stdNoise
        except:
            snr[i] = 0

    ind = (snr >= son_min) & (tot >= atot_thr) & (numPix >= npix_min) & (
        numPix < npix_max)
    x = x[ind]
    y = y[ind]
    npix = numPix[ind]
    atot = tot[ind]
    son = snr[ind]

    peaks = np.zeros((len(x), 5))
    if len(x) > 0:
        peaks[:, 0] = x.T
        peaks[:, 1] = y.T
        peaks[:, 2] = npix.T
        peaks[:, 3] = atot.T
        peaks[:, 4] = son.T

    return peaks
Example #20
0
def segmentByClustering(rgbImage, colorSpace, clusteringMethod,
                        numberOfClusters):
    #module importation
    import pandas as pd
    import numpy as np
    from sklearn.cluster import KMeans
    import matplotlib.pyplot as plt
    from skimage import io, color
    import cv2
    import ipdb
    from sklearn.cluster import AgglomerativeClustering
    xyimg = []

    # normalizing function
    def debugImg(rawData):
        toShow = np.zeros((rawData.shape), dtype=np.uint8)
        cv2.normalize(rawData,
                      toShow,
                      alpha=0,
                      beta=255,
                      norm_type=cv2.NORM_MINMAX,
                      dtype=cv2.CV_8U)
#       cv2.imwrite('img.jpg', toShow)

    def xy(img):
        height = np.size(img, 0)
        width = np.size(img, 1)
        mat = np.zeros((height, width, 2))
        mat[::, ::,
            1] = (mat[::, ::, 1] + np.arange(width)[np.newaxis, :]) / width
        mat[::, ::,
            0] = (mat[::, ::, 0] + np.arange(height)[:, np.newaxis]) / height
        return mat

    def merge(img, xy):
        im = np.sum(img, axis=-1)
        xysum = np.sum(xy, axis=-1)
        fin = np.add(im, xysum) / 5
        return fin

    #resize if it is hierarchical
    if clusteringMethod == 'hierarchical':
        #       rgbImage = cv2.resize(rgbImage, (0,0), fx=0.5, fy=0.5)
        height = np.size(rgbImage, 0)
        width = np.size(rgbImage, 1)
    else:
        # ipdb.set_trace()
        height = np.size(rgbImage, 0)
        width = np.size(rgbImage, 1)
    img = rgbImage
    #change to the specified color space
    if colorSpace == "lab":
        img_lab = color.rgb2lab(rgbImage)
        debugImg(img_lab)

        img = img_lab
    elif colorSpace == "hsv":
        img_hsv = color.rgb2hsv(rgbImage)
        debugImg(img_hsv)
        img = img_hsv
    elif colorSpace == "rgb+xy":
        r = rgbImage[:, :, 0]
        g = rgbImage[:, :, 1]
        b = rgbImage[:, :, 2]
        xyimg = xy(rgbImage)

    elif colorSpace == "lab+xy":
        img_lab = color.rgb2lab(rgbImage)
        debugImg(img_lab)
        img = img_lab
        xyimg = xy(img_lab)
    elif colorSpace == "hsv+xy":
        img_hsv = color.rgb2hsv(rgbImage)
        debugImg(img_hsv)
        img = img_hsv
        xyimg = xy(img)
    else:
        img = rgbImage
#       img = color.rgb2gray(img)
# preparation to classifiers

#proceed to the specified clustering method
    f = img
    #     img=merge(f,xyimg)

    debugImg(img)

    if clusteringMethod == "kmeans":
        feat = img.reshape(height * width, 3)
        kmeans = KMeans(n_clusters=numberOfClusters).fit_predict(feat)
        segmentation = np.reshape(kmeans, (height, width))

    elif clusteringMethod == "gmm":
        from sklearn import mixture
        feat = img.reshape(height * width, 3)
        gmm = mixture.GaussianMixture(
            n_components=numberOfClusters).fit_predict(feat)
        segmentation = np.reshape(gmm, (height, width))

    elif clusteringMethod == "hierarchical":
        feat = img.reshape(height * width, 1)
        clustering = AgglomerativeClustering(
            n_clusters=numberOfClusters).fit_predict(feat)
        segmentation = (np.reshape(clustering, (height, width)))

#       segmentation=cv2.resize(segmentation, None, fx = 2, fy = 2, interpolation = cv2.INTER_CUBIC)
    else:
        from skimage import morphology
        from skimage import feature
        import skimage
        img = color.rgb2gray(img)
        sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
        # Compute gradient magnitude
        grad_magn = np.sqrt(sobelx**2 + sobely**2)
        debugImg(grad_magn)

        import matplotlib.pyplot as plt

        imagenW = grad_magn

        imagenW = grad_magn

        found = 100000
        minimum = found
        while (minimum != numberOfClusters):
            imagenW = morphology.h_maxima(grad_magn, found)
            _, labeled_fg = cv2.connectedComponents(imagenW.astype(np.uint8))
            print(len(np.unique(labeled_fg)))
            labels = morphology.watershed(grad_magn, labeled_fg)

            found = found - 1
            minimum = len(np.unique(labels))
            print(minimum)

#        plt.figure()
#        plt.imshow(labeled_fg)
#        print(labeled_fg)
        segmentation = labels

    return segmentation
Example #21
0
    def simple_log(self, img, prefix='debug', eps=0.008, 
                   method='local_max', h=0.004, sigma=2.4, k=3,
                   gauss_threshold=30.0):

        gauss_img = gaussian(img, sigma = sigma)
        if self.settings.debug:
            self.save_debug_img(img, '%s_spot_detection_original.png' % prefix, False, alpha=1.0)
            self.save_debug_img(gauss_img, '%s_spot_detection_gaussian.png' % prefix, False, alpha=255)

        log = laplace(gauss_img, ksize=k)

        if self.settings.debug:
            # In this case, we also generate debug images.
            print('min, max: ', log.min(), log.max())

            temp = 0.5 * (10 * log + 1.0)
            temp[temp > 1.0] = 1.0
            temp[temp < 0.0] = 0.0
            self.save_debug_img(temp, '%s_spot_detection_laplacian.png' % prefix, False, alpha=255.0)

            temp = log.copy()
            temp[temp <= eps] = 0
            temp = temp * 1000
            temp[temp > 255.0] = 255
            temp = temp.astype(np.uint8)
            self.save_debug_img(temp, '%s_spot_detection_laplacian_eps_to_zero.png' % prefix, False, alpha=1)

        # just threshold
        if method == 'threshold': 
            spots = np.zeros(log.shape, dtype=np.uint8)
            spots[log>eps] = 1
            spots[gauss_img<=gauss_threshold] = 0
        elif method == 'local_max':
            spots = h_maxima(log, h=h)
            spots[log<=eps] = 0
            spots[gauss_img<=gauss_threshold] = 0
            
        lab = label(spots)
        properties = regionprops(lab)
        coordinates = [(int(properties[i]['centroid'][0]), int(properties[i]['centroid'][1]))
                       for i in range(len(properties))]

        if self.settings.debug:
            # In this case, we also generate debug images.
            ov = Overlays()
            rgb_img = ov.overlay_grey_img(img, spots, {1: (255, 0, 0)}, True)
            filename = os.path.join(self.settings.debug_folder,
                                    '%s_spot_detection_result_overlay.tif' % prefix)
            skimage.io.imsave(filename, rgb_img)
            
            print('number of maxima: %i' % len(coordinates))
            filename = os.path.join(self.settings.debug_folder,
                                    '%s_spot_detection_result.tif' % prefix)
            self.export_spot_detection(img, coordinates, filename)

            filename = os.path.join(self.settings.debug_folder,
                                    '%s_spot_detection_result_gauss.tif' % prefix)
            gauss_max_val = gauss_img.max()
            if gauss_max_val == 0:
                gauss_max_val = 1.0
            gauss_filter_export = 255.0 * gauss_img / gauss_max_val
            gauss_filter_export = gauss_filter_export.astype(np.uint8)
            self.export_spot_detection(gauss_filter_export, coordinates, filename)

        return coordinates
Example #22
0
def _cluster_density_histogram(f, w, s):
    """
    Compute a 2d histogram of fat (f) weighted and water weighted (w) intensity pairs within segmentation (s). 
    Then a threshold is determined as halfway between histogram peaks. Finally, depending on the threshold for each voxel
    a label is assigned. 

    Parameters
    ----------
    f : 3d volume
        volume of fat weighted intensities
    w : TYPE
        volume of water weighted intensities
    s : TYPE
        segmentation of breast (breasts)

    Returns
    -------
    assignment : 3d volume
       label volume with background (0), parenchymal tissue (1), and fatty tissue (2) 
    histogram : 2d array
       2d histogram of fat and water intensities
    histogram_threshhold: 2d array
       2d label image with same size than histogram indicating the threshold areas. 
    peak-center : 2 element list
        center position of histogram peaks
    
    """
    import numpy as np
    from skimage.morphology import h_maxima, label
    from skimage.measure import regionprops

    #get intensity values within segmentation only and create pairs
    X = np.array([f[s > 0], w[s > 0]], dtype=np.uint32)
    #get maximum intensities
    maxf = np.uint32(np.max(X[0, :])) + 1
    maxw = np.uint32(np.max(X[1, :])) + 1

    #Calculate histogram and binned histogram
    #A=np.zeros((maxf,maxw),dtype=np.uint32)
    #for c in X.T: #iterate over each pair and count up in histogram matrix
    #    A[c[0],c[1]]+=1
    A, _, _ = np.histogram2d(X[0, :],
                             X[1, :],
                             bins=[np.arange(maxf + 1),
                                   np.arange(maxw + 1)])
    A = np.uint32(A)
    N, _, _ = np.histogram2d(X[0, :], X[1, :], bins=_BIN_SIZE)

    #Find extrema in binned histogram
    J = h_maxima(N, 500)
    L = label(J)
    numL = np.max(L)
    stats = regionprops(L)

    value = []
    #get sorting of peaks by histogram count and filter invalid peaks
    for stat in stats:
        #often a peak is at 0,0 when segmentation is outside of breast area, where both intensities are close to 0
        #ignore this peak
        if (np.max(stat.centroid) == 0):
            value.append(-np.inf)
        else:
            centroid = np.uint16(np.rint(stat.centroid))
            value.append(N[centroid[1], centroid[0]])
        #sort by highest value
        idx = np.flip(np.argsort(value))

    #first peak center is the one with highest value
    bincenter1 = np.array(stats[idx[0]].centroid)

    #initialize second peak center with mirrored position along diagonal
    bincenter2 = np.array([bincenter1[1], bincenter1[0]])

    if (numL > 2):
        #more than two peaks, choose peaks with highest histogram values and being on opposite site

        #iterate over all peaks and choose the one that is on opposite site
        for i in idx[1:]:
            if (value[i] == -np.inf):
                continue
            t = stats[idx[i]].centroid
            #check if it is on opposite site
            if ((bincenter1[0] >= bincenter1[1] and t[0] <= t[1])
                    or (bincenter1[0] <= bincenter1[1] and t[0] >= t[1])):
                bincenter2 = t
                break

    elif (numL == 2):
        #Check whether detected peaks are on opposite site
        if ((bincenter1[0] >= bincenter1[1] and bincenter2[0] <= bincenter2[1])
                or (bincenter1[0] <= bincenter1[1]
                    and bincenter2[0] >= bincenter2[1])):
            # cluster centers are not on different sides, peak with highest value is chosen and mirrored
            bincenter2 = stats[1].centroid

    #get interval size
    h = np.float64(np.max(X, axis=1)) / _BIN_SIZE
    #convert coordinates from binned histogram [0..BIN_SIZE) to original histogram coordinates (center of interval = (h/2) is used)
    center1 = bincenter1 * h + h / 2.0
    center2 = bincenter2 * h + h / 2.0

    #determine halfway position between the two peak centers
    center = (center1 + center2) / 2.0

    # create splitting matrix depending on center position
    a = center[1] / center[0]
    s1 = np.int32(a * np.tile(np.arange(A.shape[0]), [A.shape[1], 1]).T)
    s2 = np.tile(np.arange(A.shape[1]), [A.shape[0], 1])
    s_split = s1 - s2
    s_split = s_split > 0

    #s_split has now same size as 2d-histogram and contains information whether each element is fatty tissue or water tissue

    #now perform a lookup for each voxel within segmentation whether it is fatty or water tissue
    #lookup indices are the paired intensity values in s_split
    assignment = 1 + s_split[tuple(X)]  #  + 1 to get label 1/2 (fat/water)
    #reshape assignment to segmented breast
    assign_vol = np.zeros_like(s, dtype=np.uint8)
    assign_vol[s > 0] = assignment

    return (assign_vol, A, s_split, [center1, center2])
def deep_watershed_mibi(model_output,
                        radius=10,
                        maxima_threshold=0.1,
                        interior_threshold=0.2,
                        small_objects_threshold=0,
                        fill_holes_threshold=0,
                        interior_model='pixelwise-interior',
                        maxima_model='inner-distance',
                        interior_model_smooth=1,
                        maxima_model_smooth=0,
                        pixel_expansion=None):
    """Postprocessing function for multiplexed deep watershed models. Thresholds the inner
    distance prediction to find cell centroids, which are used to seed a marker
    based watershed of the pixelwise interior prediction.

    Args:
        model_output (dict): DeepWatershed model output. A dictionary containing key: value pairs
            with the transform name and the corresponding output. Currently supported keys:

            - inner_distance: Prediction for the inner distance transform.
            - outer_distance: Prediction for the outer distance transform.
            - fgbg: Foreground prediction for the foregound/background transform.
            - pixelwise_interior: Interior prediction for the interior/border/background transform.

        radius (int): Radius of disk used to search for maxima
        maxima_threshold (float): Threshold for the maxima prediction.
        interior_threshold (float): Threshold for the interior prediction.
        small_objects_threshold (int): Removes objects smaller than this size.
        fill_holes_threshold (int): maximum size for holes within segmented objects to be filled
        interior_model: semantic head to use to predict interior of each object
        maxima_model: semantic head to use to predict maxima of each object
        interior_model_smooth: smoothing factor to apply to interior model predictions
        maxima_model_smooth: smoothing factor to apply to maxima model predictions
        pixel_expansion: optional number of pixels to expand segmentation labels

    Returns:
        numpy.array: Uniquely labeled mask.

    Raises:
        ValueError: if interior_model or maxima_model names not in valid_model_names
        ValueError: if interior_model or maxima_model predictions do not have length 4
    """

    interior_model, maxima_model = interior_model.lower(), maxima_model.lower()

    valid_model_names = {
        'inner-distance', 'outer-distance', 'fgbg-fg', 'pixelwise-interior'
    }

    for name, model in zip(['interior_model', 'maxima_model'],
                           [interior_model, maxima_model]):
        if model not in valid_model_names:
            raise ValueError('{} must be one of {}, got {}'.format(
                name, valid_model_names, model))

    interior_predictions = model_output[interior_model]
    maxima_predictions = model_output[maxima_model]

    zipped = zip(['interior_prediction', 'maxima_prediction'],
                 (interior_predictions, maxima_predictions))
    for name, arr in zipped:
        if len(arr.shape) != 4:
            raise ValueError('Model output must be of length 4. The {} model '
                             'provided was of shape {}'.format(
                                 name, arr.shape))

    label_images = []
    for batch in range(interior_predictions.shape[0]):
        interior_batch = interior_predictions[batch, ..., 0]
        interior_batch = nd.gaussian_filter(interior_batch,
                                            interior_model_smooth)

        if pixel_expansion is not None:
            interior_batch = dilation(interior_batch,
                                      selem=square(pixel_expansion * 2 + 1))

        maxima_batch = maxima_predictions[batch, ..., 0]
        maxima_batch = nd.gaussian_filter(maxima_batch, maxima_model_smooth)

        markers = h_maxima(image=maxima_batch,
                           h=maxima_threshold,
                           selem=disk(radius))

        markers = label(markers)

        label_image = watershed(-interior_batch,
                                markers,
                                mask=interior_batch > interior_threshold,
                                watershed_line=0)

        # Remove small objects
        label_image = remove_small_objects(label_image,
                                           min_size=small_objects_threshold)

        # fill in holes that lie completely within a segmentation label
        if fill_holes_threshold > 0:
            label_image = fill_holes(label_image, size=fill_holes_threshold)

        # Relabel the label image
        label_image, _, _ = relabel_sequential(label_image)

        label_images.append(label_image)

    label_images = np.stack(label_images, axis=0)
    label_images = np.expand_dims(label_images, axis=-1)

    return label_images