Example #1
0
 def get_red_and_ellipsoidal_annuli(self):
     """
     возвращает маски красного и синего эллипсоидных колец
     :return:
     """
     # красное эллипсное кольцо
     ell_mask_red = self.__get_ellipse_mask((self.block_height, self.block_width), n_step=0)
     ell_mask_red += self.__get_ellipse_mask((self.block_height, self.block_width), n_step=1)
     # синие эллипсное кольцо
     ell_mask_blue = ell_mask_red.copy()
     ell_mask_blue += self.__get_ellipse_mask((self.block_height, self.block_width), n_step=2)
     ell_mask_blue += self.__get_ellipse_mask((self.block_height, self.block_width), n_step=3)
     # float -> bool
     ell_mask_red = ell_mask_red.astype(bool)
     ell_mask_blue = ell_mask_blue.astype(bool)
     # fill holes
     ell_mask_red = remove_small_holes(ell_mask_red)
     ell_mask_blue = remove_small_holes(ell_mask_blue)
     # вычитаем из синего элипсоидного кольца красное
     ell_mask_blue = np.logical_xor(ell_mask_blue, ell_mask_red)
     '''
     ell_mask_red = np.ones((self.block_height, self.block_width), dtype=int)
     ell_mask_red[2:-2, 2:-2] = 0
     ell_mask_blue = np.ones((self.block_height, self.block_width), dtype=int)
     ell_mask_blue[4:-4, 4:-4] = 0
     ell_mask_blue = ell_mask_blue - ell_mask_red
     ell_mask_red = ell_mask_red.astype(bool)
     ell_mask_blue = ell_mask_blue.astype(bool)
     '''
     return ell_mask_red, ell_mask_blue
Example #2
0
def test_label_warning_holes():
    labeled_holes_image = np.array([[0,0,0,0,0,0,1,0,0,0],
                                    [0,1,1,1,1,1,0,0,0,0],
                                    [0,1,0,0,1,1,0,0,0,0],
                                    [0,1,1,1,0,1,0,0,0,0],
                                    [0,1,1,1,1,1,0,0,0,0],
                                    [0,0,0,0,0,0,0,2,2,2],
                                    [0,0,0,0,0,0,0,2,0,2],
                                    [0,0,0,0,0,0,0,2,2,2]], dtype=int)
    with expected_warnings(['use a boolean array?']):
        remove_small_holes(labeled_holes_image, min_size=3)
Example #3
0
def smooth_edges(mask, filter_size, min_pixels):

    no_small = mo.remove_small_holes(mask, min_size=min_pixels,
                                     connectivity=2)

    open_close = \
        nd.binary_closing(nd.binary_opening(no_small, eight_conn), eight_conn)

    medianed = nd.median_filter(open_close, filter_size)

    return mo.remove_small_holes(medianed, min_size=min_pixels,
                                 connectivity=2)
Example #4
0
def test_label_warning_holes():
    labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
                                    [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                                    [0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
                                    [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
                                    [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                                    [0, 0, 0, 0, 0, 0, 0, 2, 2, 2],
                                    [0, 0, 0, 0, 0, 0, 0, 2, 0, 2],
                                    [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],
                                   dtype=np.int_)
    with expected_warnings(['use a boolean array?']):
        remove_small_holes(labeled_holes_image, area_threshold=3)
    remove_small_holes(labeled_holes_image.astype(bool), area_threshold=3)
Example #5
0
def get_bg_mask(img):
    
    #if img.ndim == 3:
    #    bg_mask = img.any(axis=-1)
    #    bg_mask = np.invert(bg_mask) # consistent with np.ma, True if masked

    #    # make multichannel (is it really this hard?)
    #    bg_mask = np.repeat(bg_mask[:,:,np.newaxis], 3, axis=2) 
    #
    #else:
    #    bg_mask = (img != 0)
    #    bg_mask = np.invert(bg_mask) # see above

    #bound = segmentation.find_boundaries(bg_mask, mode='inner', background=1)
    #bg_mask[bound] = 1
    #min_size = img.shape[0] * img.shape[1] // 4 
    #holes = morphology.remove_small_holes(bg_mask, min_size=min_size)
    #bg_mask[holes] = 1
    
    bg_mask = segmentation.find_boundaries(img)
    bg_mask = morphology.remove_small_objects(bg_mask)
    bg_mask = morphology.remove_small_holes(bg_mask)

    bg_mask = np.invert(bg_mask)
    return bg_mask
Example #6
0
def denoiseMask(mask, denoising_ratio=15):
    """
    Function to denoise a mask represented by a numpy array. The denoising is
    done with binary erosion and propagation.
    Args:
        mask (numpy array): The mask which should be denoised represented by a
            boolean numpy array.
        denoising_ratio (int): The ratio within which pixels the denoising step
            will be executed.
    Returns:
        denoised_mask (numpy array): The denoised mask represented by a boolean
            numpy array.
    """
    mask = ~mask
    # eroded_mask = scipy.ndimage.binary_erosion(
    #     mask, structure=np.ones((denoising_ratio, denoising_ratio)))
    # denoised_mask = scipy.ndimage.binary_propagation(
    #     eroded_mask, structure=np.ones((denoising_ratio, denoising_ratio)),
    #     mask=mask)
    # opened_mask = scipy.ndimage.binary_opening(
    #     mask, structure=np.ones((denoising_ratio, denoising_ratio)))
    # denoised_mask = scipy.ndimage.binary_opening(
    #     opened_mask, structure=np.ones((denoising_ratio, denoising_ratio)))
    denoised_mask = remove_small_objects(mask, denoising_ratio)
    denoised_mask = remove_small_holes(denoised_mask, denoising_ratio)
    denoised_mask = ~denoised_mask
    return denoised_mask
Example #7
0
def fill_gaps(image, closing_radius=0, min_hole_size=0, median_radius=0.6):
    """
    This function closes small gaps between and within objects and smooths edges. It is a
    'finishing' step before skeletonization, and improves the quality of the skeleton by removing
    gaps and minimizing bumps. It also enables removing close, parallel objects such as appear under
    the microscope as a single, long, clear object with sharp, parallel edges. These spurrious objects would
    otherwise pass earlier filters but are, in fact, spurrious. The function itself is a wrapper for
    `skimage.morphology.binary_closing`, `skimage.morphology.remove_small_holes`, and `skimage.filters.median`
    on a binary image.

    Parameters
    ----------
    image : ndarray
        Binary image of candidate objects
    closing_radius : ndarray
        Binary structure to perform binary closing. Defaults to 0 (skips).
    min_hole_size : int
        Holes with areas smaller than this (in pixels) are removed. Defaults to 0 (skips).
    median_radius : ndarray
        Binary structure to use for a median filter. Defaults at 0.6, giving square connectivity of 1
        (manhattan = 1). 0 to skip.

    Returns
    -------
    ndarray : Binary image of candidate objects.

    """
    closing_structure = _disk(closing_radius)
    median_structure = _disk(median_radius)

    out = morphology.binary_closing(image, closing_structure)
    out = morphology.remove_small_holes(out, min_size=min_hole_size)
    out = filters.median(out, selem=median_structure)

    return(out)
Example #8
0
def extract_binary_masks_from_structural_channel(Y, min_area_size=30, min_hole_size=15, gSig=5, expand_method='closing', selem=np.ones((3, 3))):
    """Extract binary masks by using adaptive thresholding on a structural channel

    Inputs:
    ------
    Y:                  caiman movie object
                        movie of the structural channel (assumed motion corrected)

    min_area_size:      int
                        ignore components with smaller size

    min_hole_size:      int
                        fill in holes up to that size (donuts)

    gSig:               int
                        average radius of cell

    expand_method:      string
                        method to expand binary masks (morphological closing or dilation)

    selem:              np.array
                        morphological element with which to expand binary masks

    Output:
    -------
    A:                  sparse column format matrix
                        matrix of binary masks to be used for CNMF seeding

    mR:                 np.array
                        mean image used to detect cell boundaries
    """

    mR = Y.mean(axis=0)
    img = cv2.blur(mR, (gSig, gSig))
    img = (img - np.min(img)) / (np.max(img) - np.min(img)) * 255.
    img = img.astype(np.uint8)

    th = cv2.adaptiveThreshold(img, np.max(
        img), cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, gSig, 0)
    th = remove_small_holes(th > 0, min_size=min_hole_size)
    th = remove_small_objects(th, min_size=min_area_size)
    areas = label(th)

    A = np.zeros((np.prod(th.shape), areas[1]), dtype=bool)

    for i in range(areas[1]):
        temp = (areas[0] == i + 1)
        if expand_method == 'dilation':
            temp = dilation(temp, selem=selem)
        elif expand_method == 'closing':
            temp = dilation(temp, selem=selem)

        A[:, i] = temp.flatten('F')

    return A, mR
Example #9
0
def test_one_connectivity_holes():
    expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
                         [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
                         [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], np.bool_)
    observed = remove_small_holes(test_holes_image, area_threshold=3)
    assert_array_equal(observed, expected)
Example #10
0
def test_one_connectivity_holes():
    expected = np.array([[0,0,0,0,0,0,1,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,0,0,0,0,0,0,1,1,1],
                         [0,0,0,0,0,0,0,1,1,1],
                         [0,0,0,0,0,0,0,1,1,1]], bool)
    observed = remove_small_holes(test_holes_image, min_size=3)
    assert_array_equal(observed, expected)
Example #11
0
def punch(img):
    # Identifiying the Tissue punches in order to Crop the image correctly
    # Canny edges and RANSAC is used to fit a circe to the punch
    # A Mask is created

    distance = 0
    r = 0

    float_im, orig, ihc = create_bin(img)
    gray = rgb2grey(orig)
    smooth = gaussian(gray, sigma=3)

    shape = np.shape(gray)
    l = shape[0]
    w = shape[1]

    x = l - 20
    y = w - 20

    rows = np.array([[x, x, x], [x + 1, x + 1, x + 1]])
    columns = np.array([[y, y, y], [y + 1, y + 1, y + 1]])

    corner = gray[rows, columns]

    thresh = np.mean(corner)
    print thresh
    binar = (smooth < thresh - 0.01)

    bin = remove_small_holes(binar, min_size=100000, connectivity=2)
    bin1 = remove_small_objects(bin, min_size=5000, connectivity=2)
    bin2 = gaussian(bin1, sigma=3)
    bin3 = (bin2 > 0)

    # eosin = IHC[:, :, 2]
    edges = canny(bin3)
    coords = np.column_stack(np.nonzero(edges))

    model, inliers = ransac(coords, CircleModel, min_samples=4, residual_threshold=1, max_trials=1000)

    # rr, cc = circle_perimeter(int(model.params[0]),
    #                          int(model.params[1]),
    #                          int(model.params[2]),
    #                          shape=im.shape)

    a, b = model.params[0], model.params[1]
    r = model.params[2]
    ny, nx = bin3.shape
    ix, iy = np.meshgrid(np.arange(nx), np.arange(ny))
    distance = np.sqrt((ix - b)**2 + (iy - a)**2)

    mask = np.ma.masked_where(distance > r, bin3)

    return distance, r, float_im, orig, ihc, bin3
def label_img(img):
    # Labelling the nests is done using connected components
    img = create_bin(img)

    labeled_img = label(input=img, connectivity=2, background=0)
    #min size holes ina nest
    rem_holes = remove_small_holes(labeled_img, min_size=100, connectivity=2)
    #min size of a nest
    labeled_img1 = remove_small_objects(rem_holes, min_size=70, connectivity=2)
    labeled = label(labeled_img1, connectivity=2, background=0)

    print labeled
    return labeled
Example #13
0
def test_uint_image_holes():
    labeled_holes_image = np.array([[0,0,0,0,0,0,1,0,0,0],
                                    [0,1,1,1,1,1,0,0,0,0],
                                    [0,1,0,0,1,1,0,0,0,0],
                                    [0,1,1,1,0,1,0,0,0,0],
                                    [0,1,1,1,1,1,0,0,0,0],
                                    [0,0,0,0,0,0,0,2,2,2],
                                    [0,0,0,0,0,0,0,2,0,2],
                                    [0,0,0,0,0,0,0,2,2,2]], dtype=np.uint8)
    expected = np.array([[0,0,0,0,0,0,1,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,1,1,1,1,1,0,0,0,0],
                         [0,0,0,0,0,0,0,1,1,1],
                         [0,0,0,0,0,0,0,1,1,1],
                         [0,0,0,0,0,0,0,1,1,1]], dtype=bool)
    observed = remove_small_holes(labeled_holes_image, min_size=3)
    assert_array_equal(observed, expected)
Example #14
0
def get_bg_mask(img):
    
    if img.ndim == 3:
        bg_mask = img.any(axis=-1)
        bg_mask = np.invert(bg_mask) # consistent with np.ma, True if masked

        # make multichannel (is it really this hard?)
        bg_mask = np.repeat(bg_mask[:,:,np.newaxis], 3, axis=2) 
    
    else:
        bg_mask = (img != 0)
        bg_mask = np.invert(bg_mask) # see above

    bound = segmentation.find_boundaries(bg_mask, mode='inner', background=1)
    bg_mask[bound] = 1
    
    holes = morphology.remove_small_holes(bg_mask)
    bg_mask[holes] = 1

    return bg_mask
Example #15
0
def label_img(img):
    # Labelling the nests is done using connected components
    dist = 0
    radius = 0
    dist, radius, float_img, orig, ihc, bin3 = punch(img)
    masked_img = np.ma.masked_where(dist > radius, float_img)
    masked_bool = np.ma.filled(masked_img, fill_value=0)

    min_nest_size = 100  # Size in Pixels of minimum nest
    min_hole_size = 500  # Size in Pixels of minimum hole

    labeled_img = label(input=masked_bool, connectivity=2, background=0)
    rem_holes = remove_small_holes(labeled_img, min_size=min_hole_size, connectivity=2)
    labeled_img1 = remove_small_objects(rem_holes, min_size=min_nest_size, connectivity=2)
    labeled = label(labeled_img1, connectivity=2, background=0)
    mask_lab = np.ma.masked_where(dist > radius, labeled)

    print labeled

    return labeled, masked_img, orig, ihc, bin3, float_img
Example #16
0
def test_uint_image_holes():
    labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
                                    [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                                    [0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
                                    [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
                                    [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                                    [0, 0, 0, 0, 0, 0, 0, 2, 2, 2],
                                    [0, 0, 0, 0, 0, 0, 0, 2, 0, 2],
                                    [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],
                                   dtype=np.uint8)
    expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
                         [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
                         [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
                         [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=np.bool_)
    with expected_warnings(['returned as a boolean array']):
        observed = remove_small_holes(labeled_holes_image, area_threshold=3)
    assert_array_equal(observed, expected)
    def get_lane_pixels(self, lane_img):
        H, W = lane_img.shape[:2]
        lane_img = remove_small_holes(lane_img, min_size=128)
        lane_img = remove_small_objects(lane_img, min_size=128+16)
        window, stride = 50, 10
        lxs, lys = [], [] # lane left boundary coordinates
        mxs, mys = [], [] # lane middle coordinates
        rxs, rys = [], [] # lane right boundary coordinates
        for offset in range(0, H-window+1, stride):
            region = lane_img[offset:offset+window, :]
            ys, xs = np.where(region > 0)
            if len(xs) <= 25: continue
            xs = np.array(sorted(xs))
            i = np.argmax(np.diff(xs))
            left_xs = xs[:i-3]
            right_xs = xs[i+1:]
            is_valid_region = (W/3 <= (xs.max()-xs.min()) <= W*5/6)  
            if is_valid_region:
                lx = np.mean(left_xs)#np.min(xs)#
                rx = np.median(right_xs)#np.max(xs)#
                mx = (lx + rx)/2

                my = np.mean(ys) + offset
                ly = my
                ry = my
                if True:#len(mxs) == 0 or np.abs(mx - np.mean(mxs)) <= 100:
                    if left_xs.max()-left_xs.min() <= W/10:
                        lxs.append(lx)
                        lys.append(ly)
                    if right_xs.max()-right_xs.min() <= W/10:
                        rxs.append(rx)
                        rys.append(ry)
                    if (left_xs.max()-left_xs.min() <= W/10) and (right_xs.max()- right_xs.min() <= W/10): 
                        mxs.append(mx)
                        mys.append(my)

        lxs,lys,mxs,mys,rxs,rys = map(np.array, [lxs,lys,mxs,mys,rxs,rys])
        return lxs, lys, mxs, mys, rxs, rys
def segment(img):
    # Identifiying the Tissue punches in order to Crop the image correctly
    im = skimage.io.imread(img)
    gray = rgb2grey(im)
    smooth = gaussian(gray, sigma=10)
    thresh = 0.88
    binar = (smooth <= thresh)
    bin = remove_small_holes(binar, min_size=90000, connectivity=2)
    bin1 = remove_small_objects(bin, min_size=20000, connectivity=2)
    dist = ndi.distance_transform_edt(bin1)
    local_maxi = peak_local_max(dist, indices=False, labels=bin1)
    markers = ndi.label(local_maxi)[0]
    wat = watershed(dist, markers, mask=bin1)

    size = np.bincount(wat.ravel())
    biggest_label = size[1:].argmax() + 1
    clump_mask = wat == biggest_label



    # fz_seg = felzenszwalb(fil, scale=1, sigma=5, min_size=20000)

    # print fz_seg
    return clump_mask
Example #19
0
def frangi_segmentation(image, 
                        colors,
                        frangi_args, 
                        threshold_args,
                        separate_objects=True, 
                        contrast_kernel_size='skip',
                        color_args_1='skip',
                        color_args_2='skip', 
                        color_args_3='skip', 
                        neighborhood_args='skip',
                        morphology_args_1='skip', 
                        morphology_args_2='skip', 
                        hollow_args='skip', 
                        fill_gaps_args='skip', 
                        diameter_args='skip', 
                        diameter_bins='skip', 
                        image_name='image', 
                        verbose=False):
    """
    Possible approach to object detection using frangi filters. Selects colorbands for
    analysis, runs frangi filter, thresholds to identify candidate objects, then removes
    spurrious objects by color and morphology characteristics. See frangi_approach.ipynb. 
    
    Unless noted, the dictionaries are called by their respective functions in order.
    
    Parameters
    ----------
    image : ndarray
        RGB image to analyze
    colors : dict or str
        Parameters for picking the colorspace. See `pyroots.band_selector`. 
    frangi_args : list of dict or dict
        Parameters to pass to `skimage.filters.frangi`
    threshold_args : list of dict or dict
        Parameters to pass to `skimage.filters.threshold_adaptive`
    contrast_kernel_size : int, str, or None
        Kernel size for `skimage.exposure.equalize_adapthist`. If `int`, then gives the size of the kernel used
        for adaptive contrast enhancement. If `None`, uses default (1/8 shortest image dimension). If `skip`,
        then skips. 
    color_args_1 : dict
        Parameters to pass to `pyroots.color_filter`.
    color_args_2 : dict
        Parameters to pass to `pyroots.color_filter`. Combines with color_args_1
        in an 'and' statement.
    color_args_3 : dict
        Parameters to pass to `pyroots.color_filter`. Combines with color_args_1, 2
        in an 'and' statement.
    neighborhood_args : dict
        Parameters to pass to 'pyroots.neighborhood_filter'. 
    morphology_args_1 : dict
        Parameters to pass to `pyroots.morphology_filter`    
    morphology_args_2 : dict
        Parameters to pass to `pyroots.morphology_filter`. Happens after fill_gaps_args in the algorithm.
    hollow_args : dict
        Parameters to pass to `pyroots.hollow_filter`
    fill_gaps_args : dict
        Paramaters to pass to `pyroots.fill_gaps`
    diameter_bins : list
        To pass to `pyroots.bin_by_diameter`
    image_name : str
        Identifier of image for summarizing
    
    Returns
    -------
    A dictionary containing:
        1. `"geometry"` summary `pandas.DataFrame`
        2. `"objects"` binary image
        3. `"length"` medial axis image
        4. `"diameter"` medial axis image
 
    """

    # Pull band from colorspace
    working_image = band_selector(image, colors)  # expects dictionary (lazy coding)
    nbands = len(working_image)
    if verbose is True:
        print("Color bands selected")
    
    ## Count nubmer of dictionaries in threshold_args and frangi_args. Should equal number of bands. Convert to list if necessary
    try:
        len(threshold_args[0])
    except:
        threshold_args = [threshold_args]
        if nbands != len(threshold_args):
            raise ValueError(
                """Number of dictionaries in `threshold_args` doesn't
                equal the number of bands in `colors['band']`!"""
            )
        pass 
    
    try:
        len(frangi_args[0])
    except:
        frangi_args = [frangi_args]
        if nbands != len(frangi_args):
            raise ValueError(
                """Number of dictionaries in `frangi_args` doesn't 
                equal the number of bands in `colors['band']`!"""
            )
        pass    
    
    working_image = [img_as_float(i) for i in working_image]
    
    # Contrast enhancement
    try:
        for i in range(nbands):
            temp = exposure.equalize_adapthist(working_image[i], 
                                               kernel_size = contrast_kernel_size)
            working_image[i] = img_as_float(temp)
        if verbose:
            print("Contrast enhanced")
    except:
        if contrast_kernel_size is not 'skip':
            warn('Skipping contrast enhancement')
        pass
        
    # invert if necessary
    for i in range(nbands):
        if not colors['dark_on_light'][i]:
            working_image[i] = 1 - working_image[i]
    
    # Identify smoothing sigma for edges and frangi thresholding
    # simultaneously detect edges (computationally cheaper than multiple frangi enhancements)
    edges = [np.ones_like(working_image[0]) == 1] * nbands    # all True
    sigma_val = [0.125] * nbands  # step is 0, 0.25, 0.5, 1, 2, 4, 8, 16
    for i in range(nbands):
        edge_val = 1
        while edge_val > 0.1 and sigma_val[i] < 10:
            sigma_val[i] = 2*sigma_val[i]
            temp = filters.gaussian(working_image[i], sigma=sigma_val[i])
            temp = filters.scharr(temp)
            temp = temp > filters.threshold_otsu(temp)
            edge_val = np.sum(temp) / np.sum(np.ones_like(temp))

            edges_temp = temp.copy()

        if sigma_val[i] == 0.25: # try without smoothing
            temp = filters.scharr(working_image[i])
            temp = temp > filters.threshold_otsu(temp)
            edge_val = np.sum(temp) / np.sum(np.ones_like(temp))
            if edge_val <= 0.1:
                sigma_val[i] = 0
                edges_temp = temp.copy()
            
        if separate_objects:
            edges[i] = morphology.skeletonize(edges_temp)
    
    if verbose:
        print("Sigma value: {}".format(sigma_val))
        if separate_objects:
            print("Edges found")
    
    # Frangi vessel enhancement
    for i in range(nbands):
        temp = filters.gaussian(working_image[i], sigma=sigma_val[i])
        temp = filters.frangi(temp, **frangi_args[i])
        temp = 1 - temp/np.max(temp)
        temp = temp < filters.threshold_local(temp, **threshold_args[i])
        working_image[i] = temp.copy()
    
    frangi = working_image.copy()
    if verbose:
        print("Frangi filter, threshold complete")
    
    
    # Combine bands, separate objects
    combined = working_image[0] * ~edges[0]
    for i in range(1, nbands):
        combined = combined * working_image[i] * ~edges[i]
    working_image = combined.copy()
    
    # Filter candidate objects by color
    try:
        color1 = color_filter(image, working_image, **color_args_1)  #colorspace, target_band, low, high, percent)
        if verbose:
            print("Color filter 1 complete")
    except:
        if color_args_1 is not 'skip':
            warn("Skipping Color Filter 1")
        color1 = np.ones(working_image.shape)  # no filtering      

    try:
        color2 = color_filter(image, working_image, **color_args_2)  # nesting equates to an "and" statement.
        if verbose:
            print("Color filter 2 complete")   
    except:
        if color_args_2 is not 'skip':
            warn("Skipping Color Filter 2")
        color2 = np.ones(working_image.shape)  # no filtering
    
    try:
        color3 = color_filter(image, working_image, **color_args_3)  # nesting equates to an "and" statement.
        if verbose:
            print("Color filter 3 complete")
    except:
        if color_args_3 is not 'skip':
            warn("Skipping Color Filter 3")
        color3 = np.ones(working_image.shape)  # no filtering
    
    # Combine bands
    working_image = color1 * color2 * color3
    del color1
    del color2
    del color3
    
    # Re-expand to area
    if separate_objects:
    
        # find edges removed
        temp = [frangi[i] * edges[i] for i in range(nbands)]
        rm_edges = temp[0].copy()
        for i in range(1, nbands):
            rm_edges = rm_edges * temp[i]
        
        # filter by color per criteria above
        try:    color1 = color_filter(image, rm_edges, **color_args_1)
        except: color1 = np.ones(rm_edges.shape)
        try:    color2 = color_filter(image, rm_edges, **color_args_2)
        except: color2 = np.ones(rm_edges.shape)
        try:    color3 = color_filter(image, rm_edges, **color_args_3)
        except: color3 = np.ones(rm_edges.shape)
        
        # Combine color filters
        expanded = color1 * color2 * color3
    else:
        expanded = np.zeros(colorfilt.shape) == 1  # evaluate to false
    
    
    working_image = expanded ^ working_image  # bitwise or
    
    try:    # remove little objects (for computational efficiency)
        working_image = morphology.remove_small_objects(
            working_image, 
            min_size=morphology_args_1['min_size']
        )
    except:
        pass
    if verbose:
        print("Edges re-added")

    # Filter candidate objects by morphology
    try:
        working_image = morphology_filter(working_image, **morphology_args_1)
        if verbose:
            print("Morphology filter 1 complete")
    except:
        if morphology_args_1 is not 'skip':
            warn("Skipping morphology filter 1")
        pass        
    
    # Filter objects by neighborhood colors
    try:
        working_image = neighborhood_filter(image, working_image, **neighborhood_args)
        if verbose:
            print("Neighborhood filter complete")
    except:
        if neighborhood_args is not 'skip':
            warn("Skipping neighborhood filter")
        pass
    
    # Filter candidate objects by hollowness
    if hollow_args is not 'skip':  
        temp = morphology.remove_small_holes(working_image, min_size=10)
        try:
            if np.sum(temp) > 0:
                working_image = hollow_filter(temp, **hollow_args)
            if verbose:
                print("Hollow filter complete")
        except:
            warn("Skipping hollow filter")
            pass
    
    # Close small gaps and holes in accepted objects
    try:
        working_image = fill_gaps(working_image, **fill_gaps_args)
        if verbose:
            print("Gap filling complete")
    except:
        if fill_gaps_args is not 'skip':
            warn("Skipping filling gaps")
        pass
    
    # Filter candidate objects by morphology
    try:
        working_image = morphology_filter(working_image, **morphology_args_2)
        if verbose:
            print("Morphology filter 2 complete")
    except:
        if morphology_args_2 is not 'skip':
            warn("Skipping morphology filter 2")
        pass
        
    # Skeletonize. Now working with a dictionary of objects.
    skel = skeleton_with_distance(working_image)
    if verbose:
        print("Skeletonization complete")
    
    # Diameter filter
    try:
        diam = diameter_filter(skel, **diameter_args)
        if verbose:
            print("Diameter filter complete")
    except:
        diam = skel.copy()
        if diameter_args is not 'skip':
            warn("Skipping diameter filter")
        pass
    
    # Summarize
    if diameter_bins is None or diameter_bins is 'skip':
        summary_df = summarize_geometry(diam['geometry'], image_name)

    else:
        diam_out, summary_df = bin_by_diameter(diam['length'],
                                               diam['diameter'],
                                               diameter_bins,
                                               image_name)
        diam['diameter'] = diam_out
    
    out = {'geometry' : summary_df,
           'objects'  : diam['objects'],
           'length'   : diam['length'],
           'diameter' : diam['diameter']}

    if verbose is True:
        print("Done")

    return(out)
Example #20
0
def find_centers_and_crop(imagepath,
                          foldername,
                          imagename,
                          savepath,
                          outfile,
                          scale=4,
                          cropsize=128):
    # Get the image and resize
    color_image = np.array(Image.open(imagepath))
    print("Working on", imagename)
    image_shape = color_image.shape[:2]
    image_shape = tuple(ti // scale for ti in image_shape)
    color_image = resize(color_image, image_shape)

    # Split the image into channels
    microtubules = color_image[:, :, 0]
    antibody = color_image[:, :, 1]
    nuclei = color_image[:, :, 2]

    # Segment the nuclear channel and get the nuclei
    min_nuc_size = 100.0

    val = threshold_otsu(nuclei)
    smoothed_nuclei = gaussian(nuclei, sigma=5.0)
    binary_nuclei = smoothed_nuclei > val
    binary_nuclei = remove_small_holes(binary_nuclei, min_size=300)
    labeled_nuclei = label(binary_nuclei)
    labeled_nuclei = clear_border(labeled_nuclei)
    labeled_nuclei = remove_small_objects(labeled_nuclei,
                                          min_size=min_nuc_size)

    # Iterate through each nuclei and get their centers (if the object is valid), and save to directory
    for i in range(1, np.max(labeled_nuclei)):
        current_nuc = labeled_nuclei == i
        if np.sum(current_nuc) > min_nuc_size:
            y, x = center_of_mass(current_nuc)
            x = np.int(x)
            y = np.int(y)

            c1 = y - cropsize // 2
            c2 = y + cropsize // 2
            c3 = x - cropsize // 2
            c4 = x + cropsize // 2

            if c1 < 0 or c3 < 0 or c2 > image_shape[0] or c4 > image_shape[1]:
                pass
            else:
                nuclei_crop = nuclei[c1:c2, c3:c4]
                antibody_crop = antibody[c1:c2, c3:c4]
                microtubule_crop = microtubules[c1:c2, c3:c4]

                folder_suffix = imagename.rsplit("_", 4)[0]
                outfolder = savepath + foldername + "_" + folder_suffix
                outimagename = imagename.rsplit("_", 3)[0] + "_" + str(i)

                if not os.path.exists(outfolder):
                    os.mkdir(outfolder)

                Image.fromarray(nuclei_crop).save(outfolder + "//" +
                                                  outimagename + "_blue.tif")
                Image.fromarray(antibody_crop).save(outfolder + "//" +
                                                    outimagename +
                                                    "_green.tif")
                Image.fromarray(microtubule_crop).save(outfolder + "//" +
                                                       outimagename +
                                                       "_red.tif")

                output = open(outfile, "a")
                output.write(foldername + "_" + folder_suffix + "/" +
                             outimagename)
                output.write("\t")
                output.write(str(x))
                output.write("\t")
                output.write(str(y))
                output.write("\n")
                output.close()
Example #21
0
#is empty, try again with a smaller minimum_feature_size
new = remove_small_objects(image.astype(np.bool),\
    min_size=minimum_feature_size,connectivity=1)
if new.sum() == 0:
    print('minimum feature size too large, trying again with m = {}'.\
            format(int(minimum_feature_size/2)))
    new = remove_small_objects(image.astype(np.bool),\
        min_size=int(minimum_feature_size/2),connectivity=1)
    if new.sum() == 0:
        print('minimum feature size too large, trying again with m = {}'.\
            format(int(minimum_feature_size/4)))
        new = remove_small_objects(image.astype(np.bool),\
            min_size=int(minimum_feature_size/4),connectivity=1)
image = new

#smoothe with binary opening and closing
#standard binary image noise-removal with opening followed by closing
#maybe remove this processing step if depicted structures are really tiny
if smoothing:
    image = binary_opening(image, disk(smoothing))
    image = binary_closing(image, disk(smoothing))

#remove disconnected objects and fill in holes
image = remove_small_objects(image.astype(bool),\
    min_size=minimum_feature_size,connectivity=1)
image = remove_small_holes(image.astype(bool),\
    min_size=minimum_feature_size/100,connectivity=1)

#save image
imsave(join(dest, image_name + "_binary.png"), image)
Example #22
0
# It is also possible to explore interactively the properties of labelled
# objects by visualizing them in the hover information of the labels.
# This example uses plotly in order to display properties when
# hovering over the objects.

import plotly
import plotly.express as px
import plotly.graph_objects as go
from skimage import data, filters, measure, morphology

img = data.coins()
# Binary image, post-process the binary mask and compute labels
threshold = filters.threshold_otsu(img)
mask = img > threshold
mask = morphology.remove_small_objects(mask, 50)
mask = morphology.remove_small_holes(mask, 50)
labels = measure.label(mask)

fig = px.imshow(img, binary_string=True)
fig.update_traces(hoverinfo='skip')  # hover is only for label info

props = measure.regionprops(labels, img)
properties = ['area', 'eccentricity', 'perimeter', 'mean_intensity']

# For each label, add a filled scatter trace for its contour,
# and display the properties of the label in the hover of this trace.
for index in range(1, labels.max()):
    label_i = props[index].label
    contour = measure.find_contours(labels == label_i, 0.5)[0]
    y, x = contour.T
    hoverinfo = ''
Example #23
0
def remove_small_regions(img, size):
    """Morphologically removes small (less than size) connected regions of 0s or 1s."""
    img = morphology.remove_small_objects(img, size)
    img = morphology.remove_small_holes(img, size)
    return img
Example #24
0
def mask_postprocess(mask, threshold=0.4):
    mask1 = (mask > threshold).astype(np.uint8)
    mask2 = remove_small_holes(mask1)
    return mask2
            Y_true=Y_train[date],
            repeats=5,
            n=20,
            threshold_start=threshold_start,
            threshold_end=threshold_end)

        end_training_time = time.time()
        training_time = end_training_time - start_training_time
        training_time = np.round(training_time, 2)

        start_inference_time = time.time()

        Y_predict_test = VI_RGB_test[date][VI_name]
        Y_predict_test = (Y_predict_test > threshold_VI[date][VI_name])
        if use_morphological_operation:
            Y_predict_test = remove_small_holes(
                Y_predict_test, area_threshold=min_plant_size[date])
            Y_predict_test = remove_small_objects(
                Y_predict_test, min_size=min_plant_size[date])

        end_inference_time = time.time()
        inference_time = end_inference_time - start_inference_time
        inference_time = np.round(inference_time, 2)

        if use_morphological_operation:
            Y_predict_test, Y_test[date] = choose_pixels_with_mask(
                bands=Y_predict_test,
                ground_truth=ground_truth_test[date],
                mask=mask_test[date],
                is_RGB_map=False)

        F1_score = f1_score(Y_predict_test, Y_test[date])
Example #26
0
        segments = mask[0]
    segments_sizes = [
        np.sum(segments == i_segments)
        for i_segments in np.unique(segments)[1:]
    ]
    cluster_sizes.append(segments_sizes)
    segments_sizes = [str(f'{i_segments}') for i_segments in segments_sizes]
    segments_sizes = '\n'.join(segments_sizes)

    # save vars for fig_slic
    background_plot = background
    lesion_area_plot = lesion_area
    vessels_plot = vessels
    boundaries_plot = boundaries
    labelled, nr = label(mask_slic)
    mask_dil = remove_small_holes(remove_small_objects(mask_slic, 50))
    labelled2, nr2 = label(mask_dil)

    tgt_minis, tgt_minis_coords, tgt_minis_masks, tgt_minis_big, tgt_minis_coords_big, tgt_minis_masks_big = select_lesions_match_conditions2(
        segments, img[0], skip_index=0)
    targets, coords, masks, seeds = make_list_of_targets_and_seeds(
        tgt_minis,
        tgt_minis_coords,
        tgt_minis_masks,
        seed_value=SEED_VALUE,
        seed_method='max')
    targets_all.append(len(targets))

    coords_big = name_prefix.split('_')
    coords_big = [int(i) for i in coords_big[1:]]
    TRESH_PLOT = 20
Example #27
0
def segment2p5(img, postsize=30, exp_clip_limit=15):
    '''
    Segments droplets in an image using a watershed algorithm. OpenCV implementation.

    Parameters
    ----------
    img: numpy.ndarray
        Array representing the greyscale values (0-255) of an image cropped to show only the droplets region
    exp_clip_limit: float [0-1], optional
        clip_limit parameter for adaptive equalization

    Returns
    -------
    (labeled: numpy.ndarray, num_regions: int)
        labeled: labeled array of the same shape as input image where each region is assigned a disctinct integer label.
        num_regions: number of labeled regions
    '''

    # Adaptive Equalization
    clahe = cv2.createCLAHE(clipLimit=exp_clip_limit, tileGridSize=(10, 10))
    img_adapteq = clahe.apply(img)

    # Thresholding (OTSU)
    blur = cv2.GaussianBlur(img_adapteq, (3, 3), 0)
    _, binary = cv2.threshold(blur, 0, 255,
                              cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    # Remove small dark regions
    remove_posts = morphology.remove_small_objects(binary, postsize)
    remove_posts = morphology.remove_small_holes(remove_posts, postsize)
    remove_posts = remove_posts.astype(np.uint8)

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
    closed = cv2.morphologyEx(remove_posts,
                              cv2.MORPH_CLOSE,
                              kernel,
                              iterations=2)
    #fill_holes = ndi.morphology.binary_fill_holes(closed, structure=np.ones((3, 3))).astype('uint8')

    # noise removal
    kernel = np.ones((2, 2), np.uint8)
    #opening = cv2.morphologyEx(closed,cv2.MORPH_OPEN,kernel, iterations = 2)
    closing = cv2.morphologyEx(closed, cv2.MORPH_CLOSE, kernel, iterations=2)
    # sure background area
    #sure_bg = cv2.dilate(opening,kernel,iterations=3)
    sure_bg = cv2.dilate(closing, kernel, iterations=1)
    # Finding sure foreground area
    #dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
    dist_transform = cv2.distanceTransform(closing, cv2.DIST_L2, 5)
    _, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255,
                               0)
    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    _, markers = cv2.connectedComponents(sure_fg)
    # Add one to all labels so that sure background is not 0, but 1
    #markers = markers+1
    # Now, mark the region of unknown with zero
    #markers[unknown > 0] = 0

    # Run the watershed algorithm
    three_channels = cv2.cvtColor(closed, cv2.COLOR_GRAY2BGR)
    segmented = cv2.watershed(three_channels.astype('uint8'), markers)

    return (segmented, segmented.max() - 1)
Example #28
0
def generate_mask(img):

    h, w = img.shape
    e = entropy(img, disk(5))

#     plt.figure();
#     plt.title('distribution');
#     plt.hist(e.flatten(), bins=100);

    x = np.atleast_2d(e[e > .1]).T

    bics = []
    clfs = []
    # for nc in [2]:
    for nc in [2,3]:
        clf = mixture.GMM(n_components=nc, covariance_type='full')
        clf.fit(x)
        bic = clf.bic(x)
        bics.append(bic)
        clfs.append(clf)

    # print 'num. components', np.argsort(bics)[0] + 2

    clf = clfs[np.argsort(bics)[0]]

    means = np.squeeze(clf.means_)

    order = np.argsort(means)
    means = means[order]

    covars = np.squeeze(clf.covars_)
    covars = covars[order]

    weights = clf.weights_
    weights = weights[order]

    # consider only the largest two components
    if nc > 2:
        order = sorted(np.argsort(weights)[-2:])
        weights = weights[order]
        covars = covars[order]
        means = means[order]

    counts, bins = np.histogram(e.flat, bins=100, density=True);

    # ignore small components
    gs = np.array([w * 1./np.sqrt(2*np.pi*c) * np.exp(-(bins-m)**2/(2*c)) for m, c, w in zip(means, covars, weights)])

#     plt.figure();
#     plt.title('fitted guassians');
#     plt.plot(bins, gs.T);

    thresh = bins[np.where(gs[-1] - gs[-2] < 0)[0][-1]]
    # print thresh

    mask = e > thresh

    mask = remove_small_objects(mask, min_size=10000, connectivity=8)
    mask = remove_small_holes(mask, min_size=10000, connectivity=8)

    return mask
Example #29
0
def remove_small_regions(img, size):
    """Morphologically removes small (less than size) connected regions of 0s or 1s."""
    img = morphology.remove_small_objects(img, size)
    img = morphology.remove_small_holes(img, size)
    return img
Example #30
0
save_path = r'\FightHCC3DV2\Task3\MR'
pred_path = r'\InferenceResults\LiverMR\TrSeg'

filenames = os.listdir(save_path)
filenames.sort()

# T1DUAL
for name in filenames:
    folder_path = join(save_path, name)
    nii_name = 't1in_' + name + '.nii.gz'

    #   nii_data = nb.load(join(pred_path, nii_name)).get_data()
    nii = sitk.ReadImage(join(pred_path, nii_name))
    nii_data = sitk.GetArrayFromImage(nii)
    liver = getLargestCC(nii_data > 0)
    liver = morphology.remove_small_holes(liver, 100000)
    liver = np.uint8(liver) * 63

    for i in range(liver.shape[0]):
        save_name = 'T1DUAL\\Results\\img' + ("%03d" % i) + '.png'
        io.imsave(join(folder_path, save_name), liver[i, :, :])

# Save T2SPIR_nii.gz to png
for name in filenames:
    folder_path = join(save_path, name)
    nii_name = 't2_' + name + '.nii.gz'
    #    if os.path.exists(join(pred_path, nii_name)):
    nii = sitk.ReadImage(join(pred_path, nii_name))
    nii_data = sitk.GetArrayFromImage(nii)
    liver = np.uint8(getLargestCC(nii_data > 0)) * 63
Example #31
0
def showLines(img):
    thresh = threshold_otsu(img)

    binary = img > thresh
    binary = remove_small_holes(binary)
    skeleton = skeletonize(binary)

    # label image regions
    label_image = label(skeleton)

    if debug_mode:
        image_label_overlay = label2rgb(label_image, image=img)
        io.imsave(debug_folder + "image_label_overlay.jpg",
                  image_label_overlay)

    prop = regionprops(label_image)
    #print(len(prop))

    mask = np.ones([3, 3])
    kernel = mask < 0

    overlay_polylineImage = np.zeros(img.shape)
    for obj in prop:
        #print(label.convex_image)
        if obj.area > 10:
            b = findJoints(obj.image)
            joints = label(b)
            dots = regionprops(joints)
            img4edit = np.copy(obj.image)

            #io.imshow(closing(obj.image))
            #io.show()
            min_row = obj.bbox[0]
            min_col = obj.bbox[1]
            for d in dots:
                y, x = np.array(d.centroid).astype(int)
                mask = np.copy(img4edit[y - 1:y + 2, x - 1:x + 2])

                img4edit[y - 1:y + 2, x - 1:x + 2] = 1

                branchLabel = label(img4edit)
                branches = regionprops(branchLabel)
                branchsize = [branch.area for branch in branches]
                branchcoord = [branch.coords for branch in branches]
                if len(branchsize) > 1:
                    minIndex = np.argmin(np.array(branchsize))
                    for i, j in branchcoord[minIndex]:
                        img4edit[i, j] = 0
                    img4edit[y - 1:y + 2, x - 1:x + 2] = mask
                    #img4edit[y,x]=1

            ##### to fit into polyline
            img4edit = skeletonize(binary_dilation(binary_dilation(img4edit)))

            label4coord = label(img4edit)
            coords = regionprops(label4coord)[0]
            polyline = approximate_polygon(coords.coords, tolerance=0.02)

            xlast = -1
            ylast = -1
            row = []
            col = []

            for i, j in polyline:
                if xlast != -1:
                    rr, cc = line(min_row + ylast, min_col + xlast,
                                  min_row + i, min_col + j)
                    row.extend(rr)
                    col.extend(cc)
                    #polylineImage[rr,cc] = 1
                xlast = j
                ylast = i
                #polylineImage[i,j] = 1

            overlay_polylineImage[row, col] = 1

            #io.imshow(polylineImage)
            #io.show()
    return overlay_polylineImage
def remove_small_regions(img, size):
    """Morphologically removes small (less than size) connected regions of 0s or 1s."""
    img = morphology.remove_small_objects(img, size)  # 移除小于指定尺寸的连通域(为1的?)
    img = morphology.remove_small_holes(img, size)  # 去除小于指定尺寸的连续孔(为0的?)
    return img
Example #33
0
def train(bs, sample, vasample, ep, ilr):
    # Initialize learning rate decay and learning rate
    lr_dec = 1
    init_lr = ilr
    # model
    model = Cuda(UNet())
    # initialize weight
    init_weights(model)
    # optimizer
    opt = torch.optim.Adam(model.parameters(), lr=init_lr)
    opt.zero_grad()
    # train and validation samples
    rows_trn = len(sample['Label'])
    rows_val = len(vasample['Label'])
    # Batch per epoch
    batches_per_epoch = rows_trn // bs
    losslists = []
    vlosslists = []

    for epoch in range(ep):
        # Learning rate
        lr = init_lr * lr_dec
        order = np.arange(rows_trn)
        losslist = []
        tr_metric_list = []
        va_metric_list = []
        for itr in range(batches_per_epoch):
            rows = order[itr * bs:(itr + 1) * bs]
            if itr + 1 == batches_per_epoch:
                rows = order[itr * bs:]
            # read in a batch
            trim = sample['Image'][rows[0]]
            trla = sample['Label'][rows[0]]
            trga = sample['Gap'][rows[0]]
            # read in augmented images
            for iit in range(6):
                trimm = trim[iit:iit + 1, :, :, :]
                trlaa = trla[iit:iit + 1, :, :, :]
                trgaa = trga[iit:iit + 1, :, :, :]
                # Calculate label positive and negative ratio
                label_ratio = (trlaa > 0).sum() / (
                    trlaa.shape[1] * trlaa.shape[2] * trlaa.shape[3] -
                    (trlaa > 0).sum())
                # If smaller than 1, add weight to positive prediction
                if label_ratio < 1:
                    add_weight = (trlaa[0, 0, :, :] / 255 + 1 /
                                  (1 / label_ratio - 1))
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If smaller than 1, add weight to negative prediction
                elif label_ratio > 1:
                    add_weight = (trlaa[0, 0, :, :] / 255 + 1 /
                                  (label_ratio - 1))
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If equal to 1, no weight added
                elif label_ratio == 1:
                    add_weight = (np.ones(
                        [1, 1, trlaa.shape[2], trlaa.shape[3]])) / 2 * 255
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # Cuda and tensor inputs and label
                x = Cuda(
                    Variable(torch.from_numpy(trimm).type(torch.FloatTensor)))
                y = Cuda(
                    Variable(
                        torch.from_numpy(trlaa / 255).type(torch.FloatTensor)))
                # Prediction
                pred_mask = model(x)
                # BCE and dice loss
                loss = loss_fn(pred_mask, y).cpu() + dice_loss(
                    F.sigmoid(pred_mask), y)
                losslist.append(loss.data.numpy()[0])
                loss.backward()
                # ppv metric
                tr_metric = metric(F.sigmoid(pred_mask), y)
                tr_metric_list.append(tr_metric)
            opt.step()
            opt.zero_grad()

        vlosslist = []
        # For validation set
        for itr in range(rows_val):
            vaim = vasample['Image'][itr]
            vala = vasample['Label'][itr]
            vaga = vasample['Gap'][itr]
            for iit in range(1):
                # Load one batch
                vaimm = vaim[iit:iit + 1, :, :, :]
                valaa = vala[iit:iit + 1, :, :, :]
                vagaa = vaga[iit:iit + 1, :, :, :]
                # Calculate label positive and negative ratio
                label_ratio = (valaa > 0).sum() / (
                    valaa.shape[1] * valaa.shape[2] * valaa.shape[3] -
                    (valaa > 0).sum())
                # If smaller than 1, add weight to positive prediction
                if label_ratio < 1:
                    add_weight = (valaa[0, 0, :, :] / 255 + 1 /
                                  (1 / label_ratio - 1))
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If smaller than 1, add weight to negative prediction
                elif label_ratio > 1:
                    add_weight = (valaa[0, 0, :, :] / 255 + 1 /
                                  (label_ratio - 1))
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If equal to 1, no weight added
                elif label_ratio == 1:
                    add_weight = (np.ones(
                        [1, 1, valaa.shape[2], valaa.shape[3]])) / 2 * 255
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # cuda and tensor sample
                xv = Cuda(
                    Variable(torch.from_numpy(vaimm).type(torch.FloatTensor)))
                yv = Cuda(
                    Variable(
                        torch.from_numpy(valaa / 255).type(torch.FloatTensor)))
                # prediction
                pred_maskv = model(xv)
                # dice and BCE loss
                vloss = loss_fn(pred_maskv, yv).cpu() + dice_loss(
                    F.sigmoid(pred_maskv), yv)
                vlosslist.append(vloss.data.numpy()[0])
                # ppv metric
                va_metric = metric(F.sigmoid(pred_maskv), yv)
                va_metric_list.append(va_metric)

        lossa = np.mean(losslist)
        vlossa = np.mean(vlosslist)
        tr_score = np.mean(tr_metric_list)
        va_score = np.mean(va_metric_list)
        # Print epoch summary
        print(
            'Epoch {:>3} |lr {:>1.5f} | Loss {:>1.5f} | VLoss {:>1.5f} | Train Score {:>1.5f} | Val Score {:>1.5f} '
            .format(epoch + 1, lr, lossa, vlossa, tr_score, va_score))
        losslists.append(lossa)
        vlosslists.append(vlossa)

        for param_group in opt.param_groups:
            param_group['lr'] = lr
        # Save model every 10 epoch
        if vlossa == np.min(vlosslists):
            checkpoint = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': opt.state_dict(),
            }
            torch.save(checkpoint, '../' + output + '/unet')
        # if no change or increase in loss for consecutive 6 epochs, decrease learning rate by 10 folds
        if epoch > 6:
            if losscp(losslists[-5:]) or losscp(vlosslists[-5:]):
                lr_dec = lr_dec / 10
        # if no change or increase in loss for consecutive 15 epochs, save validation predictions and stop training
        if epoch > 15:
            if losscp(losslists[-15:]) or losscp(
                    vlosslists[-15:]) or epoch + 1 == ep:
                for itr in range(rows_val):
                    vaim = vasample['Image'][itr]
                    for iit in range(1):
                        vaimm = vaim[iit:iit + 1, :, :, :]
                        xv = Cuda(
                            Variable(
                                torch.from_numpy(vaimm).type(
                                    torch.FloatTensor)))
                        pred_maskv = model(xv)
                        pred_np = (F.sigmoid(pred_maskv) >
                                   0.7).cpu().data.numpy().astype(np.uint8)
                        pred_np = mph.remove_small_objects(
                            pred_np.astype(bool), min_size=15,
                            connectivity=2).astype(np.uint8)
                        pred_np = mph.remove_small_holes(
                            pred_np, min_size=40, connectivity=2).astype(
                                np.uint8) * 255
                        if not os.path.exists('../' + output + '/validation/'):
                            os.makedirs('../' + output + '/validation/')
                        imsave(
                            '../' + output + '/validation/' +
                            vasample['ID'][itr] + '.png', pred_np[0, 0, :, :])
                break

    # Loss figures
    plt.plot(losslists)
    plt.plot(vlosslists)
    plt.title('Train & Validation Loss')
    plt.legend(['Train', 'Validation'], loc='upper right')
    plt.savefig('../' + output + '/loss.png')
Example #34
0
def test(tesample, model, group):
    test_ids = []
    rles = []
    if not os.path.exists('../' + output + '/' + group):
        os.makedirs('../' + output + '/' + group)
    for itr in range(len(tesample['ID'])):
        teim = tesample['Image'][itr]
        teid = tesample['ID'][itr]
        tedim = tesample['Dim'][itr]
        # cuda and tensor input
        xt = Cuda(Variable(torch.from_numpy(teim).type(torch.FloatTensor)))
        # prediciton
        pred_mask = model(xt)
        # pdm = F.sigmoid(pred_mask).cpu().data.numpy()[0,0,:,:]
        # raw = (pdm / pdm.max() * 255).astype(np.uint8)
        # binarize output mask
        pred_np = (F.sigmoid(pred_mask).cpu().data.numpy()) * 2
        ppp = pred_np[0, 0, :, :]
        pred_np = pred_np[0, 0, :, :]
        pred_npa = (pred_np > 1.2).astype(np.uint8)
        pred_npb = (pred_np > 0.95).astype(np.uint8)
        pred_npa = mph.remove_small_objects(pred_npa.astype(bool),
                                            min_size=30,
                                            connectivity=2).astype(np.uint8)
        pred_npa = mph.remove_small_holes(pred_npa.astype(bool),
                                          min_size=30,
                                          connectivity=2).astype(np.uint8)
        pred_npb = mph.remove_small_objects(pred_npb.astype(bool),
                                            min_size=30,
                                            connectivity=2).astype(np.uint8)
        pred_npb = mph.remove_small_holes(pred_npb.astype(bool),
                                          min_size=30,
                                          connectivity=2).astype(np.uint8)
        pred_np = pred_npa + pred_npb
        pww = pred_np
        # local_maxi = peak_local_max(raw, indices=False, min_distance=20, labels=pred_np)
        # markers = ndi.label(local_maxi)[0]
        # pred_np = mph.watershed(pred_np, markers, connectivity=2, watershed_line=True, mask=pred_np)
        # pred_np = (pred_np > 0)
        # cut back to original image size
        pred_np = back_scale(pred_np, tedim)
        ppp = back_scale(ppp, tedim)
        pww = back_scale(pww, tedim)
        if np.max(pred_np) == np.min(pred_np):
            print('1st BOOM!')
            print(teid)
            if np.max(pww) == np.min(pww):
                print('2nd_BOOM!')
                if ppp.max() == 0 or ppp.min() == 2:
                    print('3rd_BOOM!')
                    imsave(
                        '../' + output + '/' + group + '/' + teid +
                        '_pred.png', ppp.astype(np.uint8))
                    pred_np = ppp
                else:
                    ppp = (ppp / ppp.max()) * 2
                    ppp = (ppp > 1.9).astype(np.uint8) * 2
                    imsave(
                        '../' + output + '/' + group + '/' + teid +
                        '_pred.png',
                        ((ppp / ppp.max()) * 255).astype(np.uint8))
                    pred_np = ppp
            else:
                imsave('../' + output + '/' + group + '/' + teid + '_pred.png',
                       ((pww / pww.max()) * 255).astype(np.uint8))
                pred_np = pww
        else:
            # save predicted mask
            imsave('../' + output + '/' + group + '/' + teid + '_pred.png',
                   ((pred_np / pred_np.max()) * 255).astype(np.uint8))
        rle = list(prob_to_rles(pred_np))
        rles.extend(rle)
        test_ids.extend([teid] * len(rle))
    # save vectorize masks as CSV
    sub = pd.DataFrame()
    sub['ImageId'] = test_ids
    sub['EncodedPixels'] = pd.Series(rles).apply(
        lambda x: ' '.join(str(y) for y in x))

    return sub
Example #35
0
    manualmask = cv2.threshold(manualmask, 254, 255, cv2.THRESH_BINARY)[1]
    manualerase = imread(directoryboundaries + filename[:-4] +
                         '.tif')[0, :, :, :]
    manualerase = cv2.cvtColor(manualerase, cv2.COLOR_BGR2GRAY)
    manualerase = 255 - manualerase
    manualerase = cv2.threshold(manualerase, 254, 255, cv2.THRESH_BINARY)[1]
    manualerase = 255 - manualerase
    img = imread(directoryin + filename[:-4] + '.tif')
    img = np.maximum(img, manualmask)

    img = np.minimum(img, manualerase)
    img = cv2.bitwise_not(img)
    img = cv2.threshold(img, cutoff, 255, cv2.THRESH_BINARY)[1]
    img = img > 0
    img = morphology.remove_small_objects(img, min_size=minregionsize)
    img = morphology.remove_small_holes(img, area_threshold=minholesize)
    img = 255 * img.astype(np.uint8)
    contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    minRect = [None] * len(contours)
    Rect = [None] * len(contours)
    for i, c in enumerate(contours):
        minRect[i] = cv2.minAreaRect(c)
        a, b, w, h = cv2.boundingRect(c)
        Rect[i] = (a, b, a + w, b + h)
    drawing = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
    GTOG = cv2.imread(directoryoriginal + filename)
    if len(GTOG.shape) > 2:
        GT = GTOG[:, :, 1]
    GT = GT / 2
    GT = GT.astype(np.uint8)
Example #36
0
    mask = np.zeros(data.shape, dtype=np.uint8)
    mask[data > threshold_value] = 1

    return threshold_value, mask


if __name__ == '__main__':
    image = np.load(r'C:\MyCode\PythonScript\NoduleDetection\slice_demo.npy')

    mask = np.zeros(image.shape)
    mask[image < -374] = 1
    # 形态学操作
    final_mask = np.zeros(image.shape)

    # temp = morphology.binary_opening(mask, morphology.disk(11))
    # plt.imshow(temp, cmap='gray'), plt.show()
    temp = clear_border(mask)
    plt.imshow(temp, cmap='gray'), plt.show()
    temp = temp.astype(np.bool)
    temp = morphology.remove_small_holes(temp, 512)
    plt.imshow(temp, cmap='gray'), plt.show()
    temp = morphology.remove_small_objects(temp, 100)
    plt.imshow(temp, cmap='gray'), plt.show()
    temp = temp.astype(np.float64)
    plt.imshow(temp, cmap='gray'), plt.show()
    final_mask = temp

    from Visualization import DrawBoundaryOfBinaryMask
    DrawBoundaryOfBinaryMask(image, final_mask)
Example #37
0
            cv2.moveWindow("Image Overlay", 0, 0)
            cv2.resizeWindow("Image Overlay", func.resizeWin(bgr, resize)[0], 
                         func.resizeWin(bgr, resize)[1])
            cv2.waitKey(1)
        cv2.destroyWindow("Image Overlay")
    elif coords.closeWin == True:
        cv2.destroyAllWindows()
        break
try:
    # apply the clicked range
    lower = coords.lower
    upper = coords.upper
    color_mask = cv2.inRange(hsv, lower, upper)
    color_mask = np.invert(color_mask).astype(bool)
    # clean it up
    color_mask = morph.remove_small_holes(color_mask, area_threshold=cutoff, connectivity=2)
    color_mask = morph.opening(color_mask, selem=morph.selem.disk(1))
    color_mask = morph.closing(color_mask, selem=morph.selem.disk(1))
    # add the hue mask to the full ignore mask
    ignore_mask = np.logical_and(ignore_mask, color_mask)
    # make the color mask three channel image for stacking
    color_mask = color_mask.astype(np.uint8)
    color_mask[color_mask==0] = 255
    color_mask[color_mask==1] = 0
    color_mask = np.dstack((color_mask, color_mask, color_mask))
    color_masks.append(color_mask)
    # stack the color mask for subsequent runs
    img = cv2.addWeighted(color_mask, 0.6, img, 1, 0)
    # do we repeat the color masking?
    while True:
        do_masking = input("add another color mask? (y/n): ")
Example #38
0
def test_in_place_holes():
    observed = remove_small_holes(test_holes_image, min_size=3, in_place=True)
    assert_equal(observed is test_holes_image, True,
                 "remove_small_holes in_place argument failed.")
Example #39
0
def func(path, output):

    cwd = "/".join(
        os.path.realpath(__file__).replace("\\", "/").split("/")[:-1]) + "/"

    #print(cwd)
    #print(" :) ")

    name = cwd + "model.h5"
    #name = "\.model.h5"

    # get model
    get_model()

    # load model
    model = load_model(name, compile=False)

    print("preprocessing...")
    nib_volume = nib.load(path)
    new_spacing = [1., 1., 1.]
    resampled_volume = resample_to_output(nib_volume, new_spacing, order=1)
    data = resampled_volume.get_data().astype('float32')

    curr_shape = data.shape

    # resize to get (512, 512) output images
    img_size = 512
    data = zoom(data,
                [img_size / data.shape[0], img_size / data.shape[1], 1.0],
                order=1)

    # intensity normalization
    intensity_clipping_range = [-150, 250
                                ]  # HU clipping limits (Pravdaray's configs)
    data = intensity_normalization(
        volume=data, intensity_clipping_range=intensity_clipping_range)

    # fix orientation
    data = np.rot90(data, k=1, axes=(0, 1))
    data = np.flip(data, axis=0)

    print("predicting...")
    # predict on data
    pred = np.zeros_like(data).astype(np.float32)
    for i in tqdm(range(data.shape[-1]), "pred: "):
        pred[..., i] = model.predict(
            np.expand_dims(np.expand_dims(np.expand_dims(data[..., i], axis=0),
                                          axis=-1),
                           axis=0))[0, ..., 1]
    del data

    # threshold
    pred = (pred >= 0.4).astype(int)

    # fix orientation back
    pred = np.flip(pred, axis=0)
    pred = np.rot90(pred, k=-1, axes=(0, 1))

    print("resize back...")
    # resize back from 512x512
    pred = zoom(pred,
                [curr_shape[0] / img_size, curr_shape[1] / img_size, 1.0],
                order=1)
    pred = (pred >= 0.5).astype(np.float32)

    print("morphological post-processing...")
    # morpological post-processing
    # 1) first erode
    pred = binary_erosion(pred.astype(bool), ball(3)).astype(np.float32)

    # 2) keep only largest connected component
    labels = label(pred)
    regions = regionprops(labels)
    area_sizes = []
    for region in regions:
        area_sizes.append([region.label, region.area])
    area_sizes = np.array(area_sizes)
    tmp = np.zeros_like(pred)
    tmp[labels == area_sizes[np.argmax(area_sizes[:, 1]), 0]] = 1
    pred = tmp.copy()
    del tmp, labels, regions, area_sizes

    # 3) dilate
    pred = binary_dilation(pred.astype(bool), ball(3))

    # 4) remove small holes
    pred = remove_small_holes(pred.astype(bool),
                              area_threshold=0.001 *
                              np.prod(pred.shape)).astype(np.float32)

    print("saving...")
    pred = pred.astype(np.uint8)
    img = nib.Nifti1Image(pred, affine=resampled_volume.affine)
    resampled_lab = resample_from_to(img, nib_volume, order=0)
    nib.save(resampled_lab, output)
        graph.add_tedge(node_id, beta * np.minimum(1./alpha*scoremap_cropped[y,x], 1),
                        beta * (1-np.minimum(1./alpha*scoremap_cropped[y,x], 1)))

    graph.maxflow();

    node_mincut_labels = graph.get_grid_segments(nodeids)
    mincut_labelmap = ~node_mincut_labels.reshape((im_cropped_h, im_cropped_w))

    sys.stderr.write('Graph-cut: %.2f seconds.' % ( time.time() - t))

#     plt.figure(figsize=(20,20));
#     plt.imshow(mincut_labelmap, cmap=plt.cm.gray)
#     plt.title('Graphcut binary map');
#     plt.show();

    mincut_labelmap = remove_small_holes(mincut_labelmap, min_size=10000)
    mincut_labelmap = remove_small_objects(mincut_labelmap, min_size=10000)

    # plt.figure(figsize=(20,20));
    # plt.imshow(mincut_labelmap, cmap=plt.cm.gray)
    # plt.title('Graphcut binary map, hole filled');
    # plt.show();

    conn_map, num_conn = label(mincut_labelmap, return_num=True)
    overlap_with_init_mask = [np.count_nonzero(init_mask[conn_map[::32, ::32] == i])
                              for i in range(1, num_conn+1)]
    most_likely_conn_label = np.argsort(overlap_with_init_mask)[-1] + 1
    most_likely_mask = conn_map == most_likely_conn_label

    contours_final = find_contour_points(most_likely_mask, sample_every=1)[1]
Example #41
0
    def calc_foreground_background_mask(self):
        '''
        Calculates the mask (self.maks) based on the traces and an SVM
        :return:
        '''
        try:
            #first create the inner and the outer mask, marking which pixels have been marked by the user as
            #definitely inside or definitely outside the slice
            inner_mask, outer_mask = self.create_inner_and_outer_mask()
            # get a numpy array from the image
            pixels = self.get_np_array()
            # and use this to create a trainings set of pixels inside and outside
            X_in = pixels[inner_mask].reshape((-1, 3))
            X_out = pixels[outer_mask].reshape((-1, 3))
            # undersample max_num_train_pixels
            X_in_sel = np.arange(len(X_in), dtype=np.int)
            X_out_sel = np.arange(len(X_out), dtype=np.int)
            num_train_pixels = min(len(X_in_sel), len(X_out_sel),
                                   self.max_num_train_pixels)

            np.random.shuffle(X_in_sel)
            X_in_sel = X_in_sel[:num_train_pixels]
            np.random.shuffle(X_out_sel)
            X_out_sel = X_out_sel[:num_train_pixels]

            X_in = X_in[X_in_sel]
            X_out = X_out[X_out_sel]
            Y_in = np.ones((X_in.shape[0], 1))
            Y_out = np.zeros((X_out.shape[0], 1))
            X = np.vstack((X_in, X_out))
            y = np.vstack((Y_in, Y_out))
            # and train an SVM
            svm = SVC().fit(X=X, y=y)

            # apply the SVM on all pixels
            pred_svm = svm.predict(pixels.reshape((-1, 3))).reshape(
                (pixels.shape[0], pixels.shape[1]))
            prob_svm = svm.decision_function(pixels.reshape((-1, 3))).reshape(
                (pixels.shape[0], pixels.shape[1]))

            # but have the precalculated masks take precedence
            pred_svm = np.minimum(np.maximum(inner_mask, pred_svm),
                                  ~outer_mask)
            prob_svm = np.minimum(np.maximum(inner_mask, prob_svm),
                                  ~outer_mask)

            #remove objects that are smaller than 100 pixels
            pred_svm = morphology.remove_small_objects(
                pred_svm.astype(np.bool), min_size=100,
                connectivity=2).astype(int)
            # remove holes  that are smaller than 100 pixels
            pred_svm = morphology.remove_small_holes(
                pred_svm.astype(np.bool), area_threshold=100,
                connectivity=2).astype(int)

            #and that results in the final mask
            self.mask = pred_svm

            # create a matplotlib visualization, only visible in the IDE
            color_inner = (pixels.reshape((-1, 3))[pred_svm.reshape(
                (-1)).astype(np.bool)])
            color_inner = np.mean(color_inner, axis=0, dtype=np.int)
            color_outer = (pixels.reshape((-1, 3))[~pred_svm.reshape(
                (-1)).astype(np.bool)])
            color_outer = np.mean(color_outer, axis=0, dtype=np.int)
            fig, axs = plt.subplots(3, 2)
            axs[0, 0].imshow(1 * inner_mask - 1 * outer_mask)
            axs[0, 1].imshow(pixels)
            axs[1, 0].imshow(self.mask, vmin=0, vmax=1)
            axs[1, 1].imshow(self.mask, vmin=0, vmax=1)
            axs[2, 0].imshow(
                np.expand_dims(self.mask, -1).astype(np.int) * pixels +
                np.expand_dims(1 - self.mask, -1).astype(np.int) * color_inner)
            axs[2, 1].imshow(
                np.expand_dims(1 - self.mask, -1).astype(np.int) * pixels +
                np.expand_dims(self.mask, -1).astype(np.int) * color_outer)
            for axh in axs:
                for ax in axh:
                    ax.set_aspect('equal')
                    ax.set_axis_off()

            fig.tight_layout()
            plt.show()
            return None

        except:
            self.logger.error(sys.exc_info()[0])
            self.logger.error(traceback.format_exc())
            return None
Example #42
0
def test_in_place_holes():
    observed = remove_small_holes(test_holes_image, area_threshold=3,
                                  in_place=True)
    assert_equal(observed is test_holes_image, True,
                 "remove_small_holes in_place argument failed.")
Example #43
0
 def _denoise_thrs(self, img, thrs, max_object, max_hole):
     return morphology.remove_small_holes(
         morphology.remove_small_objects(img > thrs, max_object), max_hole)
Example #44
0
def CellSeg(SlideDir, quantify, shape, stroma, tumor, start):
    """
    Wrapper for cell segmentation

    Parameters
    ----------
    SlideDir : directory for slide containing AFRemoved folder and Registered
    images folder - assumes round 001 is baseline and all files are .tif
    quantify : whether or not to quantify, 1=yes, 0=no
    shape : whether or not to characterize shape, 1=yes, 0=no
    stroma : whether or not to segment stroma, 1=yes, 0=no
    tumor : whether or not to include tumors, 1=yes, 0=no
    start : what image to start processing

    Returns
    -------
    None - function saves images and quantifications.

    """

    # Parse Direcotry supplied for cell segmentation
    (AFRemoved, DAPI, OutDir) = SegDirFormatting(SlideDir)
    # get formatting for AfRemoved, DAPI, and output directories
    AFFiles = os.listdir(AFRemoved)
    AFList = []
    for file in AFFiles:
        AFList.append(file.split("_AFRemoved_"))
    AFList = np.asarray(AFList)
    AFList = np.resize(AFList, (95, 1, 2))
    PosList = np.unique(AFList[:, :, 1])
    AFList = np.unique(AFList[:, :, 0])  # list of markers
    PosList = np.char.replace(PosList, ".tif", "")  # list of positions
    OutPos = PosList

    # Format DAPI images for Cytell based imaging
    DapiList = sorted(DAPI + "/" + element for element in os.listdir(DAPI))

    # make sure the number of DAPI images equals the number of positions
    if len(DapiList) != len(PosList):
        print("Error: Dapi Image Mismatch")
        return
    OutDir = sorted(OutDir)

    # status updates
    print("Segmentation of:", SlideDir, ";", str(len(PosList)), " Positions;\n")

    # Segmentation and Quantification for each position
    for i in range(start, len(PosList)):
        print(f"{OutPos[i]}:")
        # make Stacks of AFRemoved images and Dapi if they don't exist
        if not os.path.exists(f"{OutDir[10]}{OutPos[i]}_stack.tif"):
            print(f"Stack: {OutPos[i]}")
            # form tif image stack for each position with images from each marker
            # io.imsave(f"{OutDir[10]}{OutPos[i]}_stack.tif", io.imread(DapiList[i]))
            stack = []
            stack.append(io.imread(DapiList[i]))
            for j in range(
                len(AFList)
            ):  # loop through AFRemoved images and append to tiff stack
                stack.append(
                    io.imread(f"{AFRemoved}/{AFList[j]}_AFRemoved_{OutPos[i]}.tif")
                )
            stack = np.asarray(stack)
            io.imsave(f"{OutDir[10]}{OutPos[i]}_stack.tif", stack)
        # Check for probability files
        if not os.path.exists(f"{OutDir[4]}epi_{OutPos[i]}_stack_Probabilities.png"):
            print("No Epithelial Probability File")
            continue
        if not os.path.exists(f"{OutDir[4]}mem_{OutPos[i]}_stack_Probabilities.png"):
            print("No Membrane/Nucleus Probabilty File")
            continue

        # nuclear segmentation and generate supermembrane and binary membrane mask
        if not (
            os.path.exists(f"{OutDir[7]}NucMask_{OutPos[i]}.png")
            or os.path.exists(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif")
            or os.path.exists(f"{OutDir[5]}MemMask_{OutPos[i]}.png")
        ):
            # read in membrane probability file
            Probs = io.imread(f"{OutDir[4]}mem_{OutPos[i]}_stack_Probabilities.png")

            # threshold with nuclear probability >0.6 for nuclear mask
            mask = np.where(Probs[:, :, 1] > 255 * 0.6, np.uint8(255), np.uint8(0))
            io.imsave(f"{OutDir[7]}NucMask_{OutPos[i]}.png", mask)
            io.imsave(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif", Probs[:, :, 0])
            # thresholding for membrane mask
            MemMask = np.where(Probs[:, :, 0] > 255 * 0.6, np.uint8(255), np.uint8(0))
            io.imsave(f"{OutDir[5]}MemMask_{OutPos[i]}.png", MemMask)
        else:
            # read files if previously generated
            mask = io.imread(f"{OutDir[7]}NucMask_{OutPos[i]}.png")
            SuperMem = io.imread(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif")
            MemMask = io.imread(f"{OutDir[5]}MemMask_{OutPos[i]}.png")

        mask = np.where(mask > 0, np.uint8(1), np.uint8(0))  # make nuclear mask binary

        # fill in small holes and smooth
        mask = morphology.remove_small_holes(mask, 20 ** 3)
        selem = morphology.disk(3)
        mask = morphology.binary_opening(mask, selem)

        # remove blurred nuclear regions
        mask = np.multiply(mask, blurimg2_batch(io.imread(DapiList[i])))

        s = mask.shape
        pixadj = 1
        if s[0] != 2048 or s[1] != 2048:
            pixadj = 3

        # generate epithelial mask from machine learning

        if not (os.path.exists(OutDir[3] + "EpiMask_" + OutPos[i] + ".png")):
            print("EpiMask Processing: ")
            epiMask = io.imread(
                OutDir[4] + "epi_" + OutPos[i] + "_stack_Probabilities.png"
            )
            epiMask = ML_probability(
                epiMask, pixadj * 0.01, 0.45
            )  # create epi mask from probability map
            io.imsave(
                OutDir[3] + "EpiMask_" + OutPos[i] + ".png",
                255 * np.array(epiMask, dtype=np.uint8),
            )
        else:
            epiMask = np.array(
                io.imread(OutDir[3] + "EpiMask_" + OutPos[i] + ".png"), dtype=bool
            )

        # thin membrane borders prior to initial watershed
        MemMask = morphology.thin(MemMask)

        # generate cell (re)segmentation and nuclear segmentation images
        if (not os.path.exists(f"{OutDir[8]}NucMaskFinal_{OutPos[i]}.png")) or (
            not os.path.exists(f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif")
        ):

            print("CellSeg;")

            if not (os.path.exists(f"{OutDir[0]}L2_{OutPos[i]}.tif")):
                L2 = np.array(np.add(util.invert(epiMask), MemMask), dtype=np.uint8)

                # watershed segmentation with nuclei as basins
                L2 = segmentation.watershed(imimposemin(L2, mask), watershed_line=True)
                L2 = np.array(L2, dtype=np.float_)

                # return cells only in epithelial mask
                L2 = np.multiply(L2, epiMask)
                io.imsave(f"{OutDir[0]}L2_{OutPos[i]}.tif", np.int16(L2))

            else:
                L2 = io.imread(f"{OutDir[0]}L2_{OutPos[i]}.tif")
            if not (os.path.exists(f"{OutDir[0]}CellSeg_{OutPos[i]}.tif")):
                MemMask = np.array(
                    io.imread(f"{OutDir[5]}MemMask_{OutPos[i]}.png"), dtype=bool
                )
                start = time.time()
                CellSeg = ReSegCells(L2, MemMask)
                end = time.time()
                print(end - start)
                io.imsave(
                    f"{OutDir[0]}CellSeg_{OutPos[i]}.tif",
                    np.array(CellSeg, dtype=np.int16),
                )
            else:
                CellSeg = io.imread(f"{OutDir[0]}CellSeg_{OutPos[i]}.tif")

            if not (os.path.exists(f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif")):
                CellSeg = io.imread(f"{OutDir[0]}CellSeg_{OutPos[i]}.tif")
                SuperMem = io.imread(f"{OutDir[11]}SuperMem_{OutPos[i]}.tif")
                Probs = io.imread(f"{OutDir[4]}mem_{OutPos[i]}_stack_Probabilities.png")
                # check for cells with multiple nuclei and re-segment if they exist
                (watcellseg, mask) = NucCountBatch(
                    CellSeg, mask, epiMask, MemMask, [], Probs[:, :, 1], SuperMem
                )
                watcellseg = watcellseg > 0

                filt = np.array(
                    [
                        [0, 0, 0, 0, 0],
                        [0, 0, 1, 0, 0],
                        [0, 1, 1, 1, 0],
                        [0, 0, 1, 0, 0],
                        [0, 0, 0, 0, 0],
                    ],
                    dtype=bool,
                )

                # fill small crosses
                hitmiss = ndimage.morphology.binary_hit_or_miss(
                    watcellseg, ~filt, np.zeros((5, 5))
                )
                spots = morphology.remove_small_objects(hitmiss, 2)
                diff = hitmiss * (hitmiss ^ spots)

                diff = signal.convolve2d(np.array(diff, np.uint8), filt, mode="same")
                watcellseg = watcellseg + diff
                # set non-epithelial pixels to zero
                watcellseg[epiMask == 0] = 0
                watcellseg = morphology.remove_small_objects(watcellseg, 15)
                watcellseg = watcellseg > 0
                watcellseg = measure.label(watcellseg, connectivity=1)

                io.imsave(
                    f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif",
                    np.array(watcellseg, dtype=np.uint16),
                )
                io.imsave(
                    f"{OutDir[8]}NucMaskFinal_{OutPos[i]}.png",
                    np.array(255 * (mask > 0), dtype=np.uint8),
                )

            else:
                watcellseg = io.imread(f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif")
                mask = io.imread(f"{OutDir[8]}NucMaskFinal_{OutPos[i]}.png") > 0
        else:
            watcellseg = io.imread(f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif")
            mask = io.imread(f"{OutDir[8]}NucMaskFinal_{OutPos[i]}.png") > 0

        # Quantification if specified
        if quantify == 1:

            if os.path.exists(
                f"{OutDir[9]}PosStats_{OutPos[i]}.csv"
            ) and os.path.exists(f"{OutDir[6]}Novlp_{OutPos[i]}.png"):
                continue
            else:
                if np.max(watcellseg) == 0:
                    print("\\n")
                print("Quant; ")

                if tumor == 0:
                    (Stats, NoOvlp) = MxIF_quantify(
                        i, watcellseg, AFRemoved, AFList, PosList, mask, MemMask, OutPos
                    )

                if tumor == 1:
                    if not os.path.exists(f"{OutDir[4]}TumorMask_{OutPos[i]}.png"):
                        tumorMask = io.imread(
                            f"{OutDir[4]}tum_{OutPos[i]}_stack_Probabilities.png"
                        )
                        tumorMask = ML_probability(tumorMask, pixadj * 0.01, 0.5)
                        io.imsave(
                            f"{OutDir[4]}TumorMask_{OutPos[i]}.png",
                            np.array(255 * (tumorMask > 0), np.uint8),
                        )
                        (Stats, NoOvlp) = MxIF_quantify(
                            i,
                            watcellseg,
                            AFRemoved,
                            AFList,
                            PosList,
                            mask,
                            MemMask,
                            OutPos,
                            tumorMask,
                        )

                # format data table and write
                transposed_data = list(zip_longest(*Stats.values()))
                with open(r"{OutDir[9]}PosStats_{OutPos[i]}.csv", "w", newline="") as f:
                    writer = csv.writer(f)
                    writer.writerow(Stats.keys())
                    writer.writerows(transposed_data)

                io.imsave(r"{OutDir[6]}Novlp_{OutPos[i]}.png", NoOvlp)

        # Stromal Quantification
        if stroma == 1:
            print("Stromal quant:")
            if not os.path.exists(
                f"{OutDir[4]}str_{OutPos[i]}_stack_Probabilities.png"
            ):
                ("No epithelial probability file")
            elif not os.path.exists(
                f"{OutDir[9]}StrPosStats_{OutPos[i]}.csv"
            ) or not os.path.exists(f"{OutDir[6]}StrNovlp{OutPos[i]}.png"):
                stromal_nuclei = stromal_nuclei_segmentation(
                    io.imread(f"{OutDir[4]}str_{OutPos[i]}_stack_Probabilities.png")
                )
                stromal_nuclei[epiMask == 1] = 0
                stromal_grow = morphology.binary_dilation(
                    stromal_nuclei, morphology.square(5)
                )  # dilate nuclei
                # watershed on dilated cells with nuclei as seed points
                stromal_label = segmentation.watershed(
                    imimposemin(np.array(stromal_grow, np.uint8), stromal_nuclei),
                    watershed_line=True,
                )
                stromal_label[stromal_grow == 0] = 0
                io.imsave(
                    f"{OutDir[1]}StrCellSegFinal_{OutPos[i]}.tif",
                    np.array(stromal_label, np.uint16),
                )
                io.imsave(
                    f"{OutDir[8]}StrNucMaskFinal_{OutPos[i]}.png",
                    255 * np.array(stromal_nuclei, np.uint8),
                )

                # quantify markers in cells and write out data
                (strStats, strNoOvlp) = MxIF_quantify_stroma(
                    i,
                    stromal_label,
                    AFRemoved,
                    AFList,
                    PosList,
                    stromal_nuclei,
                    pixadj,
                    epiMask,
                    OutPos,
                )
                transposed_data = list(zip_longest(*strStats.values()))
                with open(
                    f"{OutDir[9]}strPosStats_{OutPos[i]}.csv", "w", newline=""
                ) as f:
                    writer = csv.writer(f)
                    writer.writerow(strStats.keys())
                    writer.writerows(transposed_data)

                io.imsave(f"{OutDir[6]}strNovlp_{OutPos[i]}.png", strNoOvlp)
            else:
                continue

        # Shape Pre-processing
        if shape == 1:
            # load final segmentation image, extract cells, save as npz files
            if os.path.exists(
                f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif"
            ) and not os.path.exists(f"{OutDir[2]}CellShape_{OutPos[i]}.npz"):
                print("Cell Shape Pre-Processing; ")
                CellImages = io.imread(f"{OutDir[1]}CellSegFinal_{OutPos[i]}.tif")
                CellImages = cell_shape_images(CellImages)
                np.savez_compressed(f"{OutDir[2]}CellShape_{OutPos[i]}", CellImages)
                # np.savez_compressed(OutDir[2] + 'CellShape_' + OutPos[i] + '.npz', CellImages)
            if i == (len(PosList) - 1):
                print("Training Autoencoder; ")
                # Run autoencoder with extracted cell images
                trainList = CellShapeAutoencoder(OutDir[2], 0.2)
                header = ["ID", "Pos", "Selec"]
                for k in range(1, 257):
                    header.append("Enc" + str(k))
                with open(f"{OutDir[2]}EncodedCells.csv", "w", newline="") as f:
                    writer = csv.writer(f)
                    writer.writerow(header)
                    writer.writerows(trainList)
    return None
Example #45
0
def hole(binary, draw=False):
    out = remove_small_holes(binary, min_size=300)
    out = morEx(out)
    if draw == True:
        res_drawer.binary_single(out)
    return out
Example #46
0
def extract_binary_masks_from_structural_channel(Y,
                                                 min_area_size=30,
                                                 min_hole_size=15,
                                                 gSig=5,
                                                 expand_method='closing',
                                                 selem=np.ones((3, 3))):
    """Extract binary masks by using adaptive thresholding on a structural channel

    Inputs:
    ------
    Y:                  caiman movie object
                        movie of the structural channel (assumed motion corrected)

    min_area_size:      int
                        ignore components with smaller size

    min_hole_size:      int
                        fill in holes up to that size (donuts)

    gSig:               int
                        average radius of cell

    expand_method:      string
                        method to expand binary masks (morphological closing or dilation)

    selem:              np.array
                        morphological element with which to expand binary masks

    Output:
    -------
    A:                  sparse column format matrix
                        matrix of binary masks to be used for CNMF seeding

    mR:                 np.array
                        mean image used to detect cell boundaries
    """

    mR = Y.mean(axis=0)
    img = cv2.blur(mR, (gSig, gSig))
    img = (img - np.min(img)) / (np.max(img) - np.min(img)) * 255.
    img = img.astype(np.uint8)

    th = cv2.adaptiveThreshold(img, np.max(img),
                               cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                               cv2.THRESH_BINARY, gSig, 0)
    th = remove_small_holes(th > 0, min_size=min_hole_size)
    th = remove_small_objects(th, min_size=min_area_size)
    areas = label(th)

    A = np.zeros((np.prod(th.shape), areas[1]), dtype=bool)

    for i in range(areas[1]):
        temp = (areas[0] == i + 1)
        if expand_method == 'dilation':
            temp = dilation(temp, selem=selem)
        elif expand_method == 'closing':
            temp = dilation(temp, selem=selem)

        A[:, i] = temp.flatten('F')

    return A, mR
        alphas = np.arange(.20, 0.009, -.01)
            
        #############################################
    
        scoremap_thresholded = scoremap_roi > score_thresh
        
        scoremap_thresholded_padded = np.zeros((roi_height + 100, roi_width + 100), np.bool)
        scoremap_thresholded_padded[50:-50, 50:-50] = scoremap_thresholded[:]
#         scoremap_thresholded_padded = binary_closing(scoremap_thresholded_padded, disk(10))

#         for _ in range(3):
#             scoremap_thresholded_padded = binary_dilation(scoremap_thresholded_padded, disk(3))
#         for _ in range(3):
#             scoremap_thresholded_padded = binary_erosion(scoremap_thresholded_padded, disk(3))

        scoremap_thresholded_padded = remove_small_holes(scoremap_thresholded_padded, 1000)
#         scoremap_thresholded_padded = remove_small_objects(scoremap_thresholded_padded, 100)
        scoremap_thresholded = scoremap_thresholded_padded[50:-50, 50:-50][:]

        init_levelset = np.zeros((roi_height, roi_width))
        init_levelset[inside_points_inroi[:,1], inside_points_inroi[:,0]] = 1.
        
        t = time.time()
        
        msnake = morphsnakes.MorphACWE(scoremap_thresholded.astype(np.float), smoothing=smoothing, lambda1=1., lambda2=1.)
        
        msnake.levelset = init_levelset.copy()
        # levelset values are either 1.0 or 0.0
        
        dq = deque([None, None])
        for i in range(1000): 
Example #48
0
def post_light(mask):
    mask = remove_small_objects(mask > 0.5, 10)
    mask = remove_small_holes(mask, 1000)
    return mask
    initial_mask = mask.copy()

    # The resolution is about 47 arcsec, but this is just 2 pixels across in
    # the map. I'm going to double this to only include features that are
    # clearly not noise
    beam = Beam(major=2 * 47 * u.arcsec)

    kernel = beam.as_tophat_kernel(pixscale)
    kernel_pix = (kernel.array > 0).sum()

    for i in ProgressBar(mask.shape[0]):
        mask[i] = nd.binary_opening(mask[i], kernel)
        mask[i] = nd.binary_closing(mask[i], kernel)
        mask[i] = mo.remove_small_objects(mask[i], min_size=kernel_pix,
                                          connectivity=2)
        mask[i] = mo.remove_small_holes(mask[i], min_size=kernel_pix,
                                        connectivity=2)

    # Each region must contain a point above the peak_snr
    labels, num = nd.label(mask, np.ones((3, 3, 3)))
    for n in range(1, num + 1):
        pts = np.where(labels == n)
        if np.nanmax(snr[pts]) < peak_snr:
            mask[pts] = False

    masked_cube = cube.with_mask(mask)

    # Save the cube
    masked_cube.write("{}.masked.fits".format(name))

    # Now make
    reduc = Mask_and_Moments(masked_cube, scale=noise.scale)
Example #50
0
def preprocess(img, thresh):
    img = (img > (255 * thresh)).astype(np.bool)
    remove_small_objects(img, 300, in_place=True)
    remove_small_holes(img, 300, in_place=True)
    # img = cv2.dilate(img.astype(np.uint8), np.ones((7, 7)))
    return img
Example #51
0
def preprocess(img, thresh):
    img = (img > (255 * thresh)).astype(np.bool)
    remove_small_objects(img, 300, in_place=True)
    remove_small_holes(img, 300, in_place=True)
    # img = cv2.dilate(img.astype(np.uint8), np.ones((7, 7)))
    return img
Example #52
0
    def resegmentise_mask(self, img_obj, by_slice, method, settings):
        # Resegmentation of the roi map based on grey level values

        from skimage.measure import label
        from skimage.morphology import remove_small_holes

        # Skip if required voxel grids are missing
        if img_obj.is_missing or self.roi_intensity is None or self.roi_morphology is None:
            return

        ################################################################################################################
        # Resegmentation that affects both intensity and morphological maps
        ################################################################################################################

        # Initialise range
        updated_range = np.array([np.nan, np.nan])

        if bool(set(method).intersection(["threshold", "range"])):
            # Filter out voxels with intensity outside prescribed range

            # Local constant
            g_thresh = settings.roi_resegment.g_thresh  # Threshold values

            # Upper threshold
            if not np.isnan(g_thresh[1]):
                updated_range[1] = copy.deepcopy(g_thresh[1])

            # Lower threshold
            if not np.isnan(g_thresh[0]):
                updated_range[0] = copy.deepcopy(g_thresh[0])

            # Set the threshold values as g_range
            self.g_range = g_thresh

        if bool(set(method).intersection(["sigma", "outlier"])):
            # Remove voxels with outlier intensities

            # Local constant
            sigma = settings.roi_resegment.sigma
            img_voxel_grid = img_obj.get_voxel_grid()
            roi_voxel_grid = self.roi_intensity.get_voxel_grid()

            # Check if the voxel grid is not empty
            if np.any(roi_voxel_grid):

                # Calculate mean and standard deviation of intensities in roi
                mean_int = np.mean(img_voxel_grid[roi_voxel_grid])
                sd_int   = np.std(img_voxel_grid[roi_voxel_grid])

                if not np.isnan(updated_range[0]):
                    updated_range[0] = np.max([updated_range[0], mean_int - sigma * sd_int])
                else:
                    updated_range[0] = mean_int - sigma * sd_int

                if not np.isnan(updated_range[1]):
                    updated_range[1] = np.min([updated_range[1], mean_int + sigma * sd_int])
                else:
                    updated_range[1] = mean_int + sigma * sd_int

        if not np.isnan(updated_range[0]) or not np.isnan(updated_range[1]):
            # Update intensity mask
            roi_voxel_grid = self.roi_intensity.get_voxel_grid()

            if not np.isnan(updated_range[0]):
                roi_voxel_grid = np.logical_and((img_obj.get_voxel_grid() >= updated_range[0]), roi_voxel_grid)

            if not np.isnan(updated_range[1]):
                roi_voxel_grid = np.logical_and((img_obj.get_voxel_grid() <= updated_range[1]), roi_voxel_grid)

            # Set roi voxel volume
            self.roi_intensity.set_voxel_grid(voxel_grid=roi_voxel_grid)

        ################################################################################################################
        # Resegmentation that affects only morphological maps
        ################################################################################################################
        if bool(set(method).intersection("close_volume")):
            # Close internal volumes

            from scipy.ndimage import generate_binary_structure, binary_erosion

            # Read minimal volume required
            max_fill_volume = settings.roi_resegment.max_fill_volume

            # Get voxel grid of the roi morphological mask
            roi_voxel_grid = self.roi_morphology.get_voxel_grid()

            # Determine fill volume (in voxels); if max_fill_volume is less than 0.0, fill all holes
            if max_fill_volume < 0.0: fill_volume = np.prod(np.array(self.roi_morphology.size)) + 1.0
            else:                     fill_volume = np.floor(max_fill_volume / np.prod(self.roi_morphology.spacing)) + 1.0

            # If the maximum fill volume is smaller than the minimal size of a hole
            if fill_volume < 1.0: return None

            # Label all non-roi voxels and get label corresponding to voxels outside of the roi
            non_roi_label = label(np.pad(roi_voxel_grid, 1, mode="constant", constant_values=0),
                                  background=1, connectivity=3)
            outside_label = non_roi_label[0, 0, 0]

            # Crop non-roi labels and determine non-roi voxels outside of the mask
            non_roi_label = non_roi_label[1:-1, 1:-1, 1:-1]
            vox_outside = non_roi_label == outside_label

            # Determine mask of voxels which are not internal holes
            vox_not_internal = np.logical_or(roi_voxel_grid, vox_outside)

            # Check if there are any holes, otherwise continue
            if not np.any(~vox_not_internal): return None

            if by_slice:
                # 2D approach to filling holes

                for ii in np.arange(0, self.roi_morphology.size[0]):
                    # Skip operations on slides that do not contain voxels in the mask or no holes in the slice
                    if not np.any(roi_voxel_grid[ii, :, :]): continue
                    if not(np.any(~vox_not_internal[ii, :, :])): continue

                    # Fill holes up to fill_volume in voxel number
                    vox_filled = remove_small_holes(vox_not_internal[ii, :, :], min_size=np.int(fill_volume), connectivity=2)

                    # Update mask by removing outside voxels from the mask
                    roi_voxel_grid[ii, :, :] = np.squeeze(np.logical_and(vox_filled, ~vox_outside[ii, :, :]))
            else:
                # 3D approach to filling holes

                # Fill holes up to fill_volume in voxel number
                vox_filled = remove_small_holes(vox_not_internal, min_size=np.int(fill_volume), connectivity=3)

                # Update mask by removing outside voxels from the mask
                roi_voxel_grid = np.logical_and(vox_filled, ~vox_outside)

            # Update voxel grid
            self.roi_morphology.set_voxel_grid(voxel_grid=roi_voxel_grid)

        if bool(set(method).intersection("remove_disconnected")):
            # Remove disconnected voxels

            # Discover prior disconnected volumes from the roi voxel grid
            vox_disconnected = label(self.roi.get_voxel_grid(), background=0, connectivity=3)
            vox_disconnected_labels = np.unique(vox_disconnected)

            # Set up an empty morphological masks
            upd_vox_mask = np.full(shape=self.roi_morphology.size, fill_value=False, dtype=np.bool)

            # Get the minimum volume fraction for inclusion as voxels
            min_vol_fract = settings.roi_resegment.min_vol_fract

            # Iterate over disconnected labels
            for curr_volume_label in vox_disconnected_labels:

                # Skip background
                if vox_disconnected_labels == 0: continue

                # Mask only current volume, skip if empty
                curr_mask = np.logical_and(self.roi_morphology.get_voxel_grid(), vox_disconnected == curr_volume_label)
                if not np.any(curr_mask): continue

                # Find fully disconnected voxels groups and count them
                vox_mask = label(curr_mask, background=0, connectivity=3)
                vox_mask_labels, vox_label_count = np.unique(vox_mask, return_counts=True)

                # Filter out the background counts
                valid_label_id = np.nonzero(vox_mask_labels)
                vox_mask_labels = vox_mask_labels[valid_label_id]
                vox_label_count = vox_label_count[valid_label_id]

                # Normalise to maximum
                vox_label_count = vox_label_count / np.max(vox_label_count)

                # Select labels fulfilling the minimal size
                vox_mask_labels = vox_mask_labels[vox_label_count >= min_vol_fract]

                for vox_mask_label_id in vox_mask_labels:
                    upd_vox_mask += vox_mask == vox_mask_label_id

                # Update morphological voxel grid
                self.roi_morphology.set_voxel_grid(voxel_grid=upd_vox_mask > 0)
Example #53
0
def test_float_input_holes():
    float_test = np.random.rand(5, 5)
    with testing.raises(TypeError):
        remove_small_holes(float_test)
Example #54
0
    liver_seg = measure.label(liver_seg, 4)
    props = measure.regionprops(liver_seg)

    max_area = 0
    max_index = 0
    for index, prop in enumerate(props, start=1):
        if prop.area > max_area:
            max_area = prop.area
            max_index = index

    liver_seg[liver_seg != max_index] = 0
    liver_seg[liver_seg == max_index] = 1

    liver_seg = liver_seg.astype(np.bool)
    morphology.remove_small_holes(liver_seg,
                                  para.maximum_hole,
                                  connectivity=2,
                                  in_place=True)
    liver_seg = liver_seg.astype(np.uint8)

    # 计算分割评价指标
    liver_metric = Metirc(seg_array, liver_seg, ct.GetSpacing())

    liver_score['dice'].append(liver_metric.get_dice_coefficient()[0])
    liver_score['jacard'].append(liver_metric.get_jaccard_index())
    liver_score['voe'].append(liver_metric.get_VOE())
    liver_score['fnr'].append(liver_metric.get_FNR())
    liver_score['fpr'].append(liver_metric.get_FPR())
    liver_score['assd'].append(liver_metric.get_ASSD())
    liver_score['rmsd'].append(liver_metric.get_RMSD())
    liver_score['msd'].append(liver_metric.get_MSD())
submaps = glob.glob("img/i*.jpg")
submaps.sort()
submaps.insert(0, "img/plan_general.jpg")  # add general map

image_index = []
image_name = []
blob_index = []
centroid = []
for img_ind in range(1, n_image + 1):
    img = mpimg.imread(f"data/{img_ind}_4_colors.png")
    green = (img[:, :, 1] * n_bit).astype(int)  # green channel
    bw = green == colors[1, 1]  # second label from color_quantization.py

    # Morphological cleaning
    bw = morphology.remove_small_holes(bw, area_threshold)
    bw = morphology.remove_small_objects(bw, area_threshold)
    bw = morphology.opening(bw, selem=morphology.disk(opening_radius))

    # Erosion step to disconnect close blobs
    bw = morphology.erosion(bw, selem=morphology.disk(erosion_radius))

    # Get blobs centroids
    label = measure.label(bw, connectivity=1)
    regions = measure.regionprops(label)
    for b_ind, region in enumerate(regions):
        image_index.append(img_ind)
        image_name.append(submaps[img_ind])
        blob_index.append(b_ind)
        centroid.append(region.centroid)
centroid = np.array(centroid)
def get_operands(images, avoid_shaky_plus=False):
    # average over every frame and over the 3 channels to remove robot
    m_im = images.astype('long').mean(axis=0)
    m_im = m_im.mean(axis=2).astype('uint8')

    # gaussian adaptive threshold
    m_im = cv2.adaptiveThreshold(m_im,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
                cv2.THRESH_BINARY,151,20)

    # morphology: remove small holes
    m_im = morphology.remove_small_holes(m_im.astype(bool), 10)

    if avoid_shaky_plus:
        # do the same without averaging over all the frames, using only first frame
        m_im2 = images[0, :, :, :].copy()
        m_im2 = m_im2.mean(axis=2).astype('uint8')

        # gaussian adaptive threshold
        m_im2 = cv2.adaptiveThreshold(m_im2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                    cv2.THRESH_BINARY,151,20)

        # morphology: remove small holes
        m_im2 = morphology.remove_small_holes(m_im2.astype(bool), 10)

        # use both to remove both shaky plus sign and robot
        m_im[m_im != m_im2] = True

    # re-convert to uint8 with  value from 0 to 255
    m_im = m_im.astype('uint8')
    m_im[m_im == 0] = 255
    # object labeling
    labeled_img, num_lab = label(m_im, background=0, return_num=True)

    # detect object close enough to be part of the same object (e.g. = or divide)
    center = np.zeros(2)
    center_comp = np.zeros(2)
    for i in range(1, num_lab + 1):
        area_i = (labeled_img == (i)).sum()
        if area_i > 500:
            labeled_img[labeled_img == (i)] = 0
            continue
        for j in range(1, num_lab + 1):
            area_i = (labeled_img == (i)).sum()
            area_j = (labeled_img == (j)).sum()
            if area_j > 500:
                labeled_img[labeled_img == (j)] = 0
                continue
            if i == j or area_i == 0 or area_j == 0:
                continue
            else:
                index = np.nonzero(labeled_img == i)
                center[0] = index[0].mean()
                center[1] = index[1].mean()
                index = np.nonzero(labeled_img == j)
                center_comp[0] = index[0].mean()
                center_comp[1] = index[1].mean()

                _distance = distance.euclidean(center, center_comp)

                if _distance < 15:
                    labeled_img[labeled_img == j] = i
        area_i = (labeled_img == (i)).sum()
        if area_i > 500 or area_i < 40:
            labeled_img[labeled_img == i] = 0

    # remove empty labels
    for i in range(1, num_lab):
        size = (labeled_img == (i)).sum()
        while size == 0 and np.any(labeled_img >= i):
            for j in range(i, num_lab):
                labeled_img[labeled_img == (j + 1)] = j
            size = (labeled_img == (i)).sum()

    # extract mini-image of each object, as well as their center
    list_objects = []
    centers = np.zeros((labeled_img.max(), 2))
    for i in range(labeled_img.max()):
        obj = labeled_img == (i + 1)  # select only object with label i+1
        index = np.nonzero(obj)  # find the index of every pixel of the object
        centers[i, 1] = index[0].mean()
        centers[i, 0] = index[1].mean()
        left = index[1].min()  # - 10 #get the bounds of the index
        right = index[1].max()  # + 10
        top = index[0].min()  # - 10
        bottom = index[0].max()  # + 10
        list_objects.append(labeled_img[top:bottom + 1, left:right + 1])
    # pad the mini-image so that they have the same shape, use 28x28, shape of mnist

    # heights = np.zeros(len(list_objects), dtype='int')
    # widths = np.zeros(len(list_objects), dtype='int')
    # for i in range(len(list_objects)):
    #     heights[i] = list_objects[i].shape[0]
    #     widths[i] = list_objects[i].shape[1]
    height = 28  # heights.max()
    width = 28  # widths.max()
    all_objects = np.zeros((len(list_objects), height, width))
    for i in range(len(list_objects)):
        vert = height - list_objects[i].shape[0]
        horiz = width - list_objects[i].shape[1]
        if vert > 0 and vert % 2 == 0:
            if horiz > 0 and horiz % 2 == 0:
                all_objects[i, :, :] = np.pad(
                    list_objects[i], ((int(vert / 2), int(vert / 2)),
                                      (int(horiz / 2), int(horiz / 2))),
                    mode='constant')
            elif horiz > 0:
                all_objects[i, :, :] = np.pad(
                    list_objects[i], ((int(vert / 2), int(vert / 2)), (int(
                        (horiz - 1) / 2), int((horiz + 1) / 2))),
                    mode='constant')
            elif horiz == 0:
                all_objects[i, :, :] = np.pad(list_objects[i],
                                              ((int(vert / 2), int(vert / 2)),
                                               (0, 0)),
                                              mode='constant')
        elif vert > 0:
            if horiz > 0 and horiz % 2 == 0:
                all_objects[i, :, :] = np.pad(list_objects[i], ((int(
                    (vert - 1) / 2), int(
                        (vert + 1) / 2)), (int(horiz / 2), int(horiz / 2))),
                                              mode='constant')
            elif horiz > 0:
                all_objects[i, :, :] = np.pad(list_objects[i], ((int(
                    (vert - 1) / 2), int((vert + 1) / 2)), (int(
                        (horiz - 1) / 2), int((horiz + 1) / 2))),
                                              mode='constant')
            elif horiz == 0:
                all_objects[i, :, :] = np.pad(list_objects[i], ((int(
                    (vert - 1) / 2), int((vert + 1) / 2)), (0, 0)),
                                              mode='constant')
        elif vert == 0:
            if horiz > 0 and horiz % 2 == 0:
                all_objects[i, :, :] = np.pad(
                    list_objects[i],
                    ((0, 0), (int(horiz / 2), int(horiz / 2))),
                    mode='constant')
            elif horiz > 0:
                all_objects[i, :, :] = np.pad(list_objects[i], ((0, 0), (int(
                    (horiz - 1) / 2), int((horiz + 1) / 2))),
                                              mode='constant')
            elif horiz == 0:
                all_objects[i, :, :] = np.pad(list_objects[i],
                                              ((0, 0), (0, 0)),
                                              mode='constant')
    all_objects[all_objects != 0] = 255
    return all_objects, centers