예제 #1
0
 def cvxhull_area(self):
     '''
     Calculate the convexhull area such that:
     A=0.5*Sumfrom 0 to N-1 of {xn+1*yn-xn*yn+1}
     Pn(xn,yn) and PN=P0
     see http://en.wikipedia.org/wiki/Polygon#Area_and_centroid
     '''
     #print "cvxhull called"        
     binIm=self.particuleImage>0
     area=np.sum(binIm[:,:]==True)
     print "area",area
     #print binIm.dtype
     contour=mahotas.bwperim(binIm)
     #print "contour",contour.dtype
     pointlist=mahotas.polygon.convexhull(contour)
     N=len(pointlist)
     fP=pointlist[0]
     #duplicate the first point P0
     #at the end such that PointN=Point0
     pointlist.append(fP)
     s=0
     #compute the sum from 0 to N-1
     for i in range(0,N-1):
         cx=pointlist[i][0]#x of the current point
         cy=pointlist[i][1]#y of the current point
         #print "Point",i," x=",cx," y=",cy
         nx=pointlist[i+1][0]#x of the next point
         ny=pointlist[i+1][1]#y of the next point
         #print "Point suiv",i+1," x=",nx," y=",ny
         det=nx*cy-cx*ny
         s=s+det
         #print "det:",det," S=",s
     CvxhParticleArea_ratio=area/(0.5*abs(s))
     return 0.5*abs(s),CvxhParticleArea_ratio
예제 #2
0
def _hull_computations(imageproc,imagehull = None):
    # Just share code between the two functions below
    if imagehull is None:
        imagehull = convexhull(imageproc > 0)

    Ahull = _bwarea(imagehull)
    Phull = _bwarea(bwperim(imagehull))

    cofy,cofx = center_of_mass(imagehull)
    hull_mu00 = imgcentmoments(imagehull,0,0,cofy,cofx)
    hull_mu11 = imgcentmoments(imagehull,1,1,cofy,cofx)
    hull_mu02 = imgcentmoments(imagehull,0,2,cofy,cofx)
    hull_mu20 = imgcentmoments(imagehull,2,0,cofy,cofx)

# Parameters of the 'image ellipse'
#   (the constant intensity ellipse with the same mass and
#   second order moments as the original image.)
#   From Prokop, RJ, and Reeves, AP.  1992. CVGIP: Graphical
#   Models and Image Processing 54(5):438-460
    hull_semimajor = sqrt((2 * (hull_mu20 + hull_mu02 + \
                    sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00) 

    hull_semiminor = sqrt((2 * (hull_mu20 + hull_mu02 - \
                    sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00) 
    return imagehull,Ahull, Phull, hull_semimajor, hull_semiminor
예제 #3
0
def dna_size_shape(labeled, scale=1., minarea=float('-inf'), maxarea=float('+inf'), minroundness=-1.):
    '''
    positives = dna_size_shape(dnamasks, scale=1., minarea=-Inf, maxarea=+Inf, minroundness=0)

    Only accepts DNA objects that fulfill the following criterion:
            * are greater than minarea
            * are smaller than maxarea
            * are rounder than minroundness
    '''
    nr_objects = labeled .max()
    positives = np.zeros(nr_objects+1, np.bool)
    minarea /= scale
    maxarea /= scale
    for obj in xrange(1,nr_objects+1):
        objimg = croptobbox(labeled == obj)
        area = objimg.sum()
        if area > maxarea or area < minarea:
            continue
        hull = convexhull(objimg)
        hullArea = hull.sum()
        hullPerim = bwperim(hull).sum()
        roundness = hullPerim**2/(4*np.pi*hullArea)
        if roundness < minroundness:
            continue
        positives[obj] = 1
    return positives
def phase_stretch_transform(img, LPF, S, W, threshold_min, threshold_max, flag):
    L = 0.5
    x = np.linspace(-L, L, img.shape[0])
    y = np.linspace(-L, L, img.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    theta, rho = cart2pol(X, Y)
    orig = ((np.fft.fft2(img)))
    expo = np.fft.fftshift(np.exp(-np.power((np.divide(rho, math.sqrt((LPF ** 2) / np.log(2)))), 2)))
    orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))
    PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(rho, W))) - 0.5 * np.log(
        1 + np.power(np.dot(rho, W), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(orig_filtered))
    orig_filtered_PST = np.fft.ifft2(temp)
    PHI_features = np.angle(orig_filtered_PST)
    if flag == 0:
        out = PHI_features
    else:
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > threshold_max] = 1
        features[PHI_features < threshold_min] = 1
        features[img < (np.amax(img) / 20)] = 0

        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))
    return out, PST_Kernel
예제 #5
0
def find_perimeter(bim):
    pim = mahotas.bwperim(bim)
    pts = numpy.array(numpy.where(pim))
    if len(pts):
        return pts.T[:, ::-1]
    else:
        return []
예제 #6
0
 def cvxhull_area(self):
     '''
     Calculate the convexhull area such that:
     A=0.5*Sumfrom 0 to N-1 of {xn+1*yn-xn*yn+1}
     Pn(xn,yn) and PN=P0
     see http://en.wikipedia.org/wiki/Polygon#Area_and_centroid
     '''
     #print "cvxhull called"
     binIm = self.particuleImage > 0
     area = np.sum(binIm[:, :] == True)
     print "area", area
     #print binIm.dtype
     contour = mahotas.bwperim(binIm)
     #print "contour",contour.dtype
     pointlist = mahotas.polygon.convexhull(contour)
     N = len(pointlist)
     fP = pointlist[0]
     #duplicate the first point P0
     #at the end such that PointN=Point0
     pointlist.append(fP)
     s = 0
     #compute the sum from 0 to N-1
     for i in range(0, N - 1):
         cx = pointlist[i][0]  #x of the current point
         cy = pointlist[i][1]  #y of the current point
         #print "Point",i," x=",cx," y=",cy
         nx = pointlist[i + 1][0]  #x of the next point
         ny = pointlist[i + 1][1]  #y of the next point
         #print "Point suiv",i+1," x=",nx," y=",ny
         det = nx * cy - cx * ny
         s = s + det
         #print "det:",det," S=",s
     CvxhParticleArea_ratio = area / (0.5 * abs(s))
     return 0.5 * abs(s), CvxhParticleArea_ratio
예제 #7
0
def pred_to_mask(pred, wsi=None, perim=False):
    """
    given a prediction logit
    of size [# classes, width, height]
    & corresponding wsi image
    return the mask embedded onto
    this wsi (as np array)
    """

    str_elem = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))

    pred = threshold_probs(pred)

    'save image'
    pred = 255 * (np.eye(args.num_classes)[pred][..., 1:]).astype(np.uint8)
    wsi = np.zeros_like(pred) if wsi is None else np.array(wsi.copy())
    for cj in range(args.num_classes - 1):
        rgbcolor = [0, 0, 0]
        rgbcolor[cj] = 255

        if perim:
            pred[..., cj] = bwperim(pred[..., cj])
            pred[..., cj] = cv2.dilate(pred[..., cj], str_elem, iterations=1)

        wsi[pred[..., cj] > 0, :] = rgbcolor

    del pred

    return wsi
예제 #8
0
    def extract(self):
        '''Extracts neighbour features.

        Returns
        -------
        pandas.DataFrame
            extracted feature values for each object in `label_image`
        '''
        # Create an empty dataset in case no objects were detected
        logger.info('extract neighbour features')
        features = list()

        for obj in self.object_ids:
            pad = max(self.neighbour_distance, self.touching_distance)
            object_image = self._get_bbox_containing_neighbours(obj, pad)

            # dilate the current object
            object_image_dilate = mh.dilate(object_image == obj,
                                            Bc=mh.disk(
                                                self.neighbour_distance))

            # mask the corresponding region of the label image
            object_image_mask = np.copy(object_image)
            object_image_mask[object_image_dilate == 0] = 0
            object_image_mask[object_image == obj] = 0
            neighbour_ids = np.unique(object_image_mask)
            unique_values = neighbour_ids[np.nonzero(neighbour_ids)].tolist()
            neighbour_count = len(unique_values)

            # save these unique values as a string
            if neighbour_count == 0:
                neighbour_string = '.'
            else:
                neighbour_string = '.'.join(str(x) for x in unique_values)

            # create an inverted image of the surrounding cells
            neighbours = np.zeros_like(object_image)
            for n in unique_values:
                neighbours += mh.dilate(object_image == n)

            # calculate the distance from each pixel of object to neighbours
            dist = ndi.morphology.distance_transform_edt(
                np.invert(neighbours > 0))

            # select perimeter pixels whose distance to neighbours is
            # less than threshold touching distance
            perimeter_image = mh.bwperim(object_image == obj)
            dist[perimeter_image == 0] = 0
            dist[dist > self.touching_distance] = 0

            fraction_touching = np.count_nonzero(dist) / float(
                np.count_nonzero(perimeter_image))

            values = [neighbour_count, neighbour_string, fraction_touching]
            features.append(values)
        return pd.DataFrame(features,
                            columns=self.names,
                            index=self.object_ids)
예제 #9
0
def pst_algorithm(image, LPF, Phase_strength, Warp_strength, Threshold_min,
                  Threshold_max, Morph_flag):
    L = 0.5
    x = np.linspace(-L, L, image.shape[0])
    y = np.linspace(-L, L, image.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    [THETA, RHO] = [np.arctan2(Y, X),
                    np.hypot(X, Y)]  # cartesian to polar coordinates

    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = np.fft.fft2(image)
    expo = np.fft.fftshift(
        np.exp(-np.power((np.divide(RHO, math.sqrt((LPF**2) /
                                                   np.log(2)))), 2)))
    Image_orig_filtered = np.real(
        np.fft.ifft2((np.multiply(Image_orig_f, expo))))

    # Constructing the PST Kernel
    PST_Kernel_1 = np.multiply(
        np.dot(RHO, Warp_strength), np.arctan(np.dot(RHO, Warp_strength))
    ) - 0.5 * np.log(1 + np.power(np.dot(RHO, Warp_strength), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * Phase_strength

    # Apply the PST Kernel
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)),
                       np.fft.fft2(Image_orig_filtered))
    Image_orig_filtered_PST = np.fft.ifft2(temp)

    # Calculate phase of the transformed image
    PHI_features = np.angle(Image_orig_filtered_PST)

    if Morph_flag == 0:
        out = PHI_features
        out = (out / np.max(out)) * 3
    else:
        # find image sharp transitions by thresholding the phase
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > Threshold_max] = 1  # Bi-threshold decision
        features[
            PHI_features <
            Threshold_min] = 1  # as the output phase has both positive and negative values
        features[image < (
            np.amax(image) / 20
        )] = 0  # Removing edges in the very dark areas of the image (noise)

        # apply binary morphological operations to clean the transformed image
        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))

    return out
예제 #10
0
def phase_stretch_transform(img, LPF, S, W, Threshold_min, Threshold_max,
                            flag):
    L = 0.5
    x = np.linspace(-L, L, img.shape[0])
    y = np.linspace(-L, L, img.shape[1])
    X, Y = np.meshgrid(x, y)
    p, q = X.T, y.T
    theta, rho = cart2pol(p, q)

    # 接下来对PST公式从右至左依次实现,
    # 对输入图像进行快速傅里叶变换,
    orig = np.fft.fft2(img)

    # 实现L[p, q]
    expo = np.fft.fftshift(
        np.exp(-np.power((np.divide(rho, math.sqrt((LPF**2) /
                                                   np.log(2)))), 2)))

    # 对图像进行平滑处理,
    orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))

    # 实现相位核,
    PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(
        rho, W))) - 0.5 * np.log(1 + np.power(np.dot(rho, W), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S

    # 将前面实现的部分与相位核做乘积,
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)),
                       np.fft.fft2(orig_filtered))

    # 对图像进行逆快速傅里叶变换,
    orig_filtered_PST = np.fft.ifft2(temp)

    # 进行角运算,得到变换图像的相位,
    PHI_features = np.angle(orig_filtered_PST)

    if flag == 0:
        out = PHI_features
    else:
        # 对图像进行阈值化处理,
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > Threshold_max] = 1
        features[PHI_features < Threshold_min] = 1
        features[img < (np.amax(img) / 20)] = 0

        # 应用二进制形态学操作来清除转换后的图像,
        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))
    return out, PST_Kernel
예제 #11
0
def find_perimeter(labeled, seeds):
    """
    return [[x,y],[x,y]...]
    """
    rpts = None
    for seed in seeds:
        bim = to_binary(labeled, seed)
        pim = mahotas.bwperim(bim)
        pts = numpy.array(numpy.where(pim)).astype(numpy.float64)
        if len(pts):
            if rpts is None:
                rpts = pts
            else:
                rpts = numpy.hstack((rpts, pts))
    if rpts is None:
        return []
    else:
        return rpts.T[:, ::-1]  # swap dimensions to get to (x, y)
예제 #12
0
def get_key_points_for_patch(params):
    '''
    patches will only have image level
    label (no segmentation mask);
    hence no need for region
    level key points. instead, generate
    a roughly uniform
    point set.
    '''

    y_max = params.dimensions[1] // 4**params.scan_level
    x_max = params.dimensions[0] // 4**params.scan_level

    mask = np.zeros((y_max, x_max), dtype=np.uint8)

    y_min = 32
    x_min = 32

    mask[y_min:y_max - y_min, x_min:x_max - x_min] = 1

    perim = bwperim(mask)
    perim_coords = np.transpose(np.where(perim))[:, ::-1]  # (x,y) pairs
    skip = np.maximum(2, perim_coords.shape[0] // params.num_perim_points)
    perim_coords = perim_coords[::skip, :]

    kernel = np.ones((10, 10), np.uint8)

    _, center_pts, _, _ = get_key_points(cv2.erode(mask, kernel, iterations=1),
                                         1, params.num_center_points,
                                         params.num_center_points)

    center_pts -= [params.tile_w // 2, params.tile_h // 2]
    perim_coords -= [params.tile_w // 2, params.tile_h // 2]

    return {
        'cnt_xy': center_pts,
        'perim_xy': perim_coords,
        'scan_level': params.scan_level
    }
예제 #13
0
def detect_blobs(image, mask, threshold, min_area, deblend_nthresh=500,
        deblend_cont=0):
    '''Detects blobs in `image` using an implementation of
    `SExtractor <http://www.astromatic.net/software/sextractor>`_ [1].

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image in which blobs should be detected
    mask: numpy.ndarray[numpy.bool]
        binary image that masks pixel regions in which no blobs should be
        detected
    threshold: int, optional
        factor by which pixel values must be above background
        to be considered part of a blob (default: ``5``)
    min_area: int, optional
        minimal size of a blob
    deblend_ntresh: int, optional
        number of deblending thresholds (default: ``500``)
    deblend_cont: int, optional
        minimum contrast ratio for deblending (default: ``0``)

    Returns
    -------
    Tuple[numpy.ndarray[numpy.int32]]
        detected blobs and the corresponding centroids

    References
    ----------
    .. [1] Bertin, E. & Arnouts, S. 1996: SExtractor: Software for source
    extraction, Astronomy & Astrophysics Supplement 317, 393
    '''
    sep.set_extract_pixstack(10**7)

    img = image.astype('float')

    # We pad the image with mirrored pixels to prevent border artifacts.
    pad = 50
    left = img[:, 1:pad]
    right = img[:, -pad:-1]
    detect_img = np.c_[np.fliplr(left), img, np.fliplr(right)]
    upper = detect_img[1:pad, :]
    lower = detect_img[-pad:-1, :]
    detect_img = np.r_[np.flipud(upper), detect_img, np.flipud(lower)]

    logger.info('detect blobs via thresholding and deblending')
    detection, blobs = sep.extract(
        detect_img, threshold,
        minarea=min_area, segmentation_map=True,
        deblend_nthresh=deblend_nthresh, deblend_cont=deblend_cont,
        filter_kernel=None, clean=False
    )

    centroids = np.zeros(detect_img.shape, dtype=np.int32)
    y = detection['y'].astype(int)
    x = detection['x'].astype(int)
    # WTF? In rare cases object coorindates lie outside of the image.
    n = len(detection)
    y[y > detect_img.shape[0]] = detect_img.shape[0]
    x[x > detect_img.shape[1]] = detect_img.shape[1]
    centroids[y, x] = np.arange(1, n + 1)

    # Remove the padded border pixels
    blobs = blobs[pad-1:-(pad-1), pad-1:-(pad-1)].copy()
    centroids = centroids[pad-1:-(pad-1), pad-1:-(pad-1)].copy()

    # Blobs detected outside of regions of interest are discarded.
    blobs[mask > 0] = 0
    blobs[mh.bwperim(np.invert(mask)) > 0] = 0
    mh.labeled.relabel(blobs, inplace=True)

    # We need to ensure that centroids are labeled the same way as blobs.
    centroids[centroids > 0] = blobs[centroids > 0]

    return (blobs, centroids)
예제 #14
0
def Bfx_basicint(I, R, *args):
    """
    X, Xn = Bfx_basicint(I,R,options)
    
    Toolbox: Balu
        Basic intensity features
    
        X is the features vector, Xn is the list feature names (see Example to see how it works).
        
        Reference:
            Kumar, A.; Pang, G.K.H. (2002): Defect detection in textured materials
            using Gabor filters. IEEE Transactions on Industry Applications,
            38(2):425-440.
        
    Example:
        import numpy as np
        from mahotas.colors import rgb2gray
        from balu.ImageProcessing import Bim_segbalu
        from balu.ImagesAndData import balu_imageload
        
        options = {'show': True, 'mask': 5}   % Gauss mask for gradient computation and display results
        I = balu_imageload(('testimg1.jpg');             % input image
        R,_,_ = Bim_segbalu(I);                     % segmentation

        X, Xn = Bfx_basicint(I,R,options)     % basic intenisty features

    See also Bfx_haralick, Bfx_clp, Bfx_gabor, Bfx_fourier, Bfx_dct, Bfx_lbp.
        
    (c) D.Mery, PUC-DCC, 2010
    http://dmery.ing.puc.cl
        
    With collaboration from:
    Jose Miguel Arrieta Ramos ([email protected]) -> Translated implementation into python (2017)
    """

    if len(args) == 0:
        options = {'show': False}
    else:
        options = args[0]

    if options['show']:
        print('--- extracting Basic intensity features...')

    if 'mask' not in options:
        options['mask'] = 15

    E = bwperim(R, n=4)
    ii = R == 1
    jj = np.where(R.ravel() == 0)[0]
    kk = E == 1

    I = I.astype(float)

    I1, _, _ = Bim_d1(I, options['mask'])
    I2 = Bim_d2(I)

    if len(jj) > 0:
        C = np.mean(np.abs(I1[kk]))
    else:
        C = -1

    J = I[ii]
    G = np.mean(J)
    S = np.std(J)
    K = st.kurtosis(J, fisher=False)
    Sk = st.skew(J)
    D = np.mean(I2[ii])

    X = np.array([[G, S, K, Sk, D, C]])

    Xn = [
        'Intensity Mean', 'Intensity StdDev', 'Intensity Kurtosis',
        'Intensity Skewness', 'Mean Laplacian', 'Mean Boundary Gradient'
    ]

    return X, Xn
예제 #15
0
def _compare_slow(img):
    for n in (4, 8):
        assert np.all(_slow_bwperim(img, n) == bwperim(img, n))
im = np.uint8(heatmap/255 >= 0.9)

im = cv2.morphologyEx(
	im,
	cv2.MORPH_OPEN,
	kernel=np.ones((30, 30))
)

heatmap_orig = heatmap

heatmap = heatmap * im
heatmap = np.repeat(heatmap[..., np.newaxis], 3, 2)


tb_perim = cv2.morphologyEx(
	bwperim(chull(im)).astype(np.uint8),
	cv2.MORPH_DILATE,
	kernel=np.ones((20, 20))
)

overlay = 0.65 * wsi + 0.35 * heatmap
yy, xx = np.where(tb_perim)
overlay[yy,xx,...]=0
overlay = np.uint8(overlay)

overlay_image = Image.fromarray(overlay).resize((x//4, y//4))
overlay_image.save('overlay_tumor_bed.png')


wsi = Image.fromarray(wsi).resize((x//4, y//4))
wsi.save('wsi.png')
예제 #17
0
def detect_blobs(image, mask, threshold, min_area, deblend_nthresh=500,
        deblend_cont=0):
    '''Detects blobs in `image` using an implementation of
    `SExtractor <http://www.astromatic.net/software/sextractor>`_ [1].

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image in which blobs should be detected
    mask: numpy.ndarray[numpy.bool]
        binary image that masks pixel regions in which no blobs should be
        detected
    threshold: int, optional
        factor by which pixel values must be above background
        to be considered part of a blob (default: ``5``)
    min_area: int, optional
        minimal size of a blob
    deblend_ntresh: int, optional
        number of deblending thresholds (default: ``500``)
    deblend_cont: int, optional
        minimum contrast ratio for deblending (default: ``0``)

    Returns
    -------
    Tuple[numpy.ndarray[numpy.int32]]
        detected blobs and the corresponding centroids

    References
    ----------
    .. [1] Bertin, E. & Arnouts, S. 1996: SExtractor: Software for source
    extraction, Astronomy & Astrophysics Supplement 317, 393
    '''
    sep.set_extract_pixstack(10**7)

    img = image.astype('float')

    # We pad the image with mirrored pixels to prevent border artifacts.
    pad = 50
    left = img[:, 1:pad]
    right = img[:, -pad:-1]
    detect_img = np.c_[np.fliplr(left), img, np.fliplr(right)]
    upper = detect_img[1:pad, :]
    lower = detect_img[-pad:-1, :]
    detect_img = np.r_[np.flipud(upper), detect_img, np.flipud(lower)]

    logger.info('detect blobs via thresholding and deblending')
    detection, blobs = sep.extract(
        detect_img, threshold,
        minarea=min_area, segmentation_map=True,
        deblend_nthresh=deblend_nthresh, deblend_cont=deblend_cont,
        filter_kernel=None, clean=False
    )

    centroids = np.zeros(detect_img.shape, dtype=np.int32)
    y = detection['y'].astype(int)
    x = detection['x'].astype(int)
    # WTF? In rare cases object coorindates lie outside of the image.
    n = len(detection)
    y[y > detect_img.shape[0]] = detect_img.shape[0]
    x[x > detect_img.shape[1]] = detect_img.shape[1]
    centroids[y, x] = np.arange(1, n + 1)

    # Remove the padded border pixels
    blobs = blobs[pad-1:-(pad-1), pad-1:-(pad-1)].copy()
    centroids = centroids[pad-1:-(pad-1), pad-1:-(pad-1)].copy()

    # Blobs detected outside of regions of interest are discarded.
    blobs[mask > 0] = 0
    blobs[mh.bwperim(np.invert(mask)) > 0] = 0
    mh.labeled.relabel(blobs, inplace=True)

    # We need to ensure that centroids are labeled the same way as blobs.
    centroids[centroids > 0] = blobs[centroids > 0]

    return (blobs, centroids)
예제 #18
0
def _compare_slow(img):
    for n in (4,8):
        assert np.all(_slow_bwperim(img, n) == bwperim(img, n))
예제 #19
0
def predict_wsis(model, dataset, ep):
    '''
	given directory svs_path,
	current model goes through each
	wsi (svs) and generates a
	prediction mask embedded onto
	wsi.
	sequential: tiles images and
	generates batches of size 1
	parallel: uses preallocated
	dataset and batches>>1
	'''

    os.makedirs('{}/{}'.format(args.val_save_pth, ep), exist_ok=True)

    model.eval()

    with torch.no_grad():
        ' go through each svs and make a pred. mask'
        ious_tb = 0
        for key in dataset.wsis:
            'create prediction template'
            pred = np.zeros(
                (args.num_classes, *dataset.wsis[key]['iterator'].dataset.scan.
                 level_dimensions[args.scan_level][::-1]),
                dtype=np.float)
            'slide over wsi'
            for batch_x, batch_y, batch_image in dataset.wsis[key]['iterator']:
                batch_image = batch_image.cuda()

                pred_src = model(batch_image)
                if args.scan_resize != 1:
                    pred_src = torch.nn.functional.interpolate(
                        pred_src, (args.tile_h * args.scan_resize,
                                   args.tile_w * args.scan_resize))
                pred_src = pred_src.cpu().numpy()

                for bj in range(batch_image.size(0)):
                    tile_x, tile_y = int(batch_x[bj]), int(batch_y[bj])
                    pred[:, tile_y:tile_y + dataset.params.ph,
                         tile_x:tile_x + dataset.params.pw] += pred_src[bj]

            'post process wsi (throw out non-foreground tissue)'
            scan = openslide.OpenSlide(dataset.wsis[key]['wsipath'])
            mask = Image.open(dataset.wsis[key]['wsipath'] +
                              '_find_nuclei.png')
            mask = np.asarray(mask)

            'downsample pred'
            pred_ = np.zeros(
                (args.num_classes, *scan.level_dimensions[2][::-1]))
            for ij in range(args.num_classes):
                pred_[ij, ...] = cv2.resize(pred[ij, ...],
                                            scan.level_dimensions[2])
            pred = pred_
            del pred_

            'calculate score'
            if os.path.exists(dataset.wsis[key]['wsipath'] + '_mask.png'):

                gt = Image.open(dataset.wsis[key]['wsipath'] + '_mask.png')
                gt = gt.resize(pred.shape[1:][::-1])
                gt = np.array(gt)

                p = np.argmax(pred, 0)
                '''
				get tumor bed
				erosion: to remove small possibly
				miss-regions
				
				'''
                # tb = (p.astype(np.uint8) == 3).astype(np.uint8)
                tb = (p.astype(np.uint8) >= 2).astype(np.uint8)
                tb = cv2.morphologyEx(tb, cv2.MORPH_OPEN,
                                      np.ones((20, 20), dtype=np.uint8))
                tb_pred = chull(tb)

                tb = bwperim(tb_pred).astype(np.uint8)
                tb = cv2.dilate(tb,
                                np.ones((20, 20), dtype=np.uint8),
                                iterations=1)
                tb = np.nonzero(tb)
                '''
				use gt tumor bed
				'''
                tb_pth = dataset.wsis[key]['wsipath'] + '_tumor_bed.png'
                if os.path.exists(tb_pth):
                    tb_gt = Image.open(tb_pth).convert('L')
                    tb_gt = (np.array(tb_gt) > 0).astype(np.uint8)

                    iou_tb = (tb_gt *
                              tb_pred).sum() / (args.epsilon +
                                                (tb_gt | tb_pred).sum())
                    ious_tb += iou_tb

                acc = (p == gt)
                acc = acc[gt > 0]
                acc = np.mean(acc)
                s = 1 - np.sum(np.abs(p - gt)) / np.sum(
                    np.maximum(np.abs(gt - 0), np.abs(gt - 3.0)) *
                    (1 - (1 - (p > 0)) * (1 - gt > 0)))

                p = mask * p
                acc_masked = (p == gt)
                acc_masked = acc_masked[gt > 0]
                acc_masked = np.mean(acc_masked)
                s_masked = 1 - np.sum(np.abs(p - gt)) / np.sum(
                    np.maximum(np.abs(gt - 0), np.abs(gt - 3.0)) *
                    (1 - (1 - (p > 0)) * (1 - gt > 0)))

                'only detect foreground vs background'
                iou_fg = ((p > 0) * (gt > 0)).sum() / (args.epsilon +
                                                       ((p > 0) |
                                                        (gt > 0)).sum())

                print('{}, '
                      '{:.3f}({:.3f}),'
                      ' {:.3f}({:.3f}),'
                      ' {:.3f},'
                      ' tb iou: {:.3f} '.format(
                          dataset.wsis[key]['wsipath'].split('/')[-1],
                          s_masked, s, acc_masked, acc, iou_fg,
                          iou_tb if os.path.exists(tb_pth) else -1))
                del p
                del gt

            'save color mask'
            pred_image = np.expand_dims(mask,
                                        -1) * preprocessing.pred_to_mask(pred)
            pred_image[tb] = [255, 255, 255]
            pred_image = Image.fromarray(pred_image)
            pred_image.resize((dataset.wsis[key]['scan'].level_dimensions[2][0] // 2,
                               dataset.wsis[key]['scan'].level_dimensions[2][1] // 2)). \
                save('{}/{}/{}_{}.png'.format(args.val_save_pth, ep, key, args.tile_stride_w))

            del pred
            del pred_image

        print('Average tb iou: {:.3f}'.format(ious_tb / len(dataset.wsis)))

    model.train()
def PST(I,
        LPF=0.21,
        Phase_strength=0.48,
        Warp_strength=12.14,
        Threshold_min=-1,
        Threshold_max=0.0019,
        Morph_flag=1):
    # I: image
    # Gaussian Low Pass Filter
    #	LPF = 0.21
    # PST parameters:
    # 	Phase_strength = 0.48
    #	Warp_strength = 12.14
    # Thresholding parameters (for post processing after the edge is computed)
    #	Threshold_min = -1
    #	Threshold_max = 0.0019
    # To compute analog edge, set Morph_flag = 0 and to compute digital edge, set Morph_flag = 1
    # 	Morph_flag = 1
    I_initial = I
    if (len(I.shape) == 3):
        I = I.mean(axis=2)

    L = 0.5
    x = np.linspace(-L, L, I.shape[0])
    y = np.linspace(-L, L, I.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    [THETA, RHO] = cart2pol(X, Y)

    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = ((np.fft.fft2(I)))
    expo = np.fft.fftshift(
        np.exp(-np.power((np.divide(RHO, math.sqrt((LPF**2) /
                                                   np.log(2)))), 2)))
    Image_orig_filtered = np.real(
        np.fft.ifft2((np.multiply(Image_orig_f, expo))))
    # Constructing the PST Kernel
    PST_Kernel_1 = np.multiply(
        np.dot(RHO, Warp_strength), np.arctan(np.dot(RHO, Warp_strength))
    ) - 0.5 * np.log(1 + np.power(np.dot(RHO, Warp_strength), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)),
                       np.fft.fft2(Image_orig_filtered))
    Image_orig_filtered_PST = np.fft.ifft2(temp)

    # Calculate phase of the transformed image
    PHI_features = np.angle(Image_orig_filtered_PST)

    if Morph_flag == 0:
        out = PHI_features
        return out
    else:
        #   find image sharp transitions by thresholding the phase
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > Threshold_max] = 1  # Bi-threshold decision
        features[
            PHI_features <
            Threshold_min] = 1  # as the output phase has both positive and negative values
        features[I < (
            np.amax(I) / 20
        )] = 0  # Removing edges in the very dark areas of the image (noise)

        # apply binary morphological operations to clean the transformed image
        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))

        Overlay = mh.overlay(I, out)
        return (out, Overlay)
예제 #21
0
labels = labels.resize((x, y))

image = np.asarray(image)
labels = np.asarray(labels)

metadata = {}

for tile_id in range(labels.max()):

    label_patch = labels == tile_id
    n, center_pts, out_image, foreground_indices = regiontools.get_key_points(
        label_patch, us_kmeans, dhr.HR_NUM_CNT_SAMPLES, dhr.HR_NUM_CNT_SAMPLES)

    perim_coords = np.zeros([0, 2])
    if dhr.HR_NUM_PERIM_SAMPLES > 0:
        perim = bwperim(label_patch)
        perim_coords = np.transpose(np.where(perim))[:, ::-1]  # (x,y) pairs
        skip = np.maximum(2, perim_coords.shape[0] // dhr.HR_NUM_PERIM_SAMPLES)
        perim_coords = perim_coords[::skip, :]

    metadata[tile_id] = {
        'cnt_xy': center_pts,
        'perim_xy': perim_coords,
        'wsipath': svspth,
        'scan_level': scan_level,
        'foreground_indices': foreground_indices,
        'tile_id': tile_id,
    }
'''
evaluation stage
'''
expo = np.fft.fftshift(np.exp(-np.power((np.divide(rho, math.sqrt((LPF ** 2) / np.log(2)))), 2)))

orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))
PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(rho, W))) - 0.5 * np.log(1 + np.power(np.dot(rho, W), 2))
PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S
temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(orig_filtered))
temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(Image_orig_filtered))
orig_filtered_PST = np.fft.ifft2(temp)
PHI_features = np.angle(Image_orig_filtered_PST)
features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
features[PHI_features > Threshold_max] = 1
features[PHI_features < Threshold_min] = 1
features[I < (np.amax(I) / 20)] = 0
out = features
out = mh.thin(out, 1)
out = mh.bwperim(out, 4)
out = mh.thin(out, 1)
out = mh.erode(out, np.ones((1, 1)))

def phase_stretch_transform(img, LPF, S, W, threshold_min, threshold_max, flag):
    L = 0.5
    x = np.linspace(-L, L, img.shape[0])
    y = np.linspace(-L, L, img.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    theta, rho = cart2pol(X, Y)
    orig = ((np.fft.fft2(img)))
    expo = np.fft.fftshift(np.exp(-np.power((np.divide(rho, math.sqrt((LPF ** 2) / np.log(2)))), 2)))
    orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))
    PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(rho, W))) - 0.5 * np.log(
예제 #23
0
def test_n5():
    img = np.zeros((8, 8), np.bool)
    img[3:7, 3:7] = 1
    bwperim(img, 5)
예제 #24
0
for imageNumber in range(len(filenames)): 
    cellID.append(str(uuid.uuid4()))
filename="file.csv" 
df = pd.DataFrame (cellID, columns=["CellID"]) # assign unique ID for each cell 
df.to_csv(filename, index=False)



# finding a list of pixels in cell boundries 
cell_boundaries_list=[]
filenames = list(glob.glob('./cropped_and_resized/*')) #load image
for idx,filename in enumerate(filenames):
    targetCell = skimage.io.imread(filenames[idx])
    dimension = targetCell.shape
    # calculating cell boundaries
    boolean_border = mh.bwperim(targetCell, n=4)
    cell_boundaries = np.ravel_multi_index(np.nonzero(boolean_border),dimension)
    # formatting the calculated cell boundaries
    CB_string=map(str, cell_boundaries) # converting all the elements in the array to string
    for x,y in enumerate(CB_string): # add ", " to all the elements so that when they are combined, they are still distinctly recognizable
        CB_string[x]=CB_string[x]+' ' 
    boundaries=''.join(CB_string) # combine all elements in an array to form a single string
    # adding the calculation to the cell_boundaries_list
    cell_boundaries_list.append(boundaries)

csv = "file.csv" 
df = utils.read_csv(csv)
print df.shape
df['cell boundary']=pd.Series (cell_boundaries_list)
df['FileName']=pd.Series (filenames)
df.to_csv(csv, index=False)
예제 #25
0
file="part3.png"        
complete_path=os.path.join(workdir,file)
if __name__ == "__main__":
    im=readmagick.readimg(complete_path)
    im0=np.copy(im)
    
    hip=pymorph.subm(im,nd.gaussian_filter(im,5))
    hip0=np.copy(hip)
    im=mahotas.thin(im)
    #im=mahotas.bwperim(im>0)
    hip=mahotas.thin(hip)    
    #print im.dtype#print uint16
    p1=particle(im)
    print "particle 1",p1.cvxhull_area()
    p2=particle(hip)
    contour=mahotas.bwperim(im>0)
    p3=particle(contour)
    print "particle 1 hi pass",p2.cvxhull_area()
    theta1,rosedesvents,VImage=p1.orientationByErosion(5)
    theta2,rdv,VHip=p2.orientationByErosion(5)
    x=rosedesvents[0,:]
    y=rosedesvents[1,:]
    xh=rdv[0,:]
    yh=rdv[1,:]
    pylab.subplot(321,frameon=False, xticks=[], yticks=[])
    pylab.gray()    
    pylab.imshow(im0)
    pylab.subplot(322,frameon=False, xticks=[], yticks=[])
    pylab.imshow(hip0)
    pylab.subplot(323)
    pylab.title("Resistance to vertical erosion")
예제 #26
0
file = "part3.png"
complete_path = os.path.join(workdir, file)
if __name__ == "__main__":
    im = readmagick.readimg(complete_path)
    im0 = np.copy(im)

    hip = pymorph.subm(im, nd.gaussian_filter(im, 5))
    hip0 = np.copy(hip)
    im = mahotas.thin(im)
    #im=mahotas.bwperim(im>0)
    hip = mahotas.thin(hip)
    #print im.dtype#print uint16
    p1 = particle(im)
    print "particle 1", p1.cvxhull_area()
    p2 = particle(hip)
    contour = mahotas.bwperim(im > 0)
    p3 = particle(contour)
    print "particle 1 hi pass", p2.cvxhull_area()
    theta1, rosedesvents, VImage = p1.orientationByErosion(5)
    theta2, rdv, VHip = p2.orientationByErosion(5)
    x = rosedesvents[0, :]
    y = rosedesvents[1, :]
    xh = rdv[0, :]
    yh = rdv[1, :]
    pylab.subplot(321, frameon=False, xticks=[], yticks=[])
    pylab.gray()
    pylab.imshow(im0)
    pylab.subplot(322, frameon=False, xticks=[], yticks=[])
    pylab.imshow(hip0)
    pylab.subplot(323)
    pylab.title("Resistance to vertical erosion")
    cx, cy = centers[tile_id, :]

    label_patch = labels == tile_id
    area = np.count_nonzero(label_patch)
    num_clusters = dhr.HR_NUM_CNT_SAMPLES  #6 + int(area / (0.01 * gt.size))

    if (w * h)/gt.size <= 0.005:

        n, cnt_pts, out_image, foreground_indices = regiontools.get_key_points(label_patch, us_kmeans, dhr.HR_NUM_CNT_SAMPLES, dhr.HR_NUM_CNT_SAMPLES)

        if n is not None:

            label_patch = Image.fromarray(label_patch.astype(np.uint8))
            x, y = label_patch.size
            label_patch = label_patch.resize((x // us_kmeans, y // us_kmeans))
            perim = bwperim(np.asarray(label_patch))
            coords_ = np.transpose(np.where(perim))[:, ::-1]  # (x,y) pairs
            cvh = concaveHull(coords_, 3)
            coords = esp(cvh, dhr.HR_NUM_PERIM_SAMPLES) * us_kmeans

            plt.plot(cnt_pts[:, 0], cnt_pts[:, 1], 'wo', ms=ms)
            plt.plot(coords[:, 0], coords[:, 1], 'ko', ms=ms)

    else:

        min_center_large = int((w * h)/(gt.size * 0.01))
        min_center_large = np.maximum(min_center_large, 5)
        min_center_large = num_clusters

        n, cnt_pts, out_image, foreground_indices = regiontools.get_key_points(label_patch, us_kmeans, min_center_large, min_center_large)
예제 #28
0
    label_patch = labels == tile_id

    area = np.count_nonzero(label_patch)
    num_clusters = 2 + int(area / (0.01 * labels.size))

    n, center_pts, out_image, foreground_indices = regiontools.get_key_points(
        label_patch, us_kmeans, num_clusters, num_clusters)

    'get width & height'
    indices = np.where(label_patch)
    h = 1 + indices[0].max() - indices[0].min()
    w = 1 + indices[1].max() - indices[1].min()

    if n is not None and (w * h) / labels.size <= 0.05:
        perim = bwperim(label_patch)
        perim_coords = np.transpose(np.where(perim))[:, ::-1]  # (x,y) pairs
        skip = np.maximum(2, perim_coords.shape[0] // dhr.HR_NUM_PERIM_SAMPLES)
        perim_coords = perim_coords[::skip, :]

        metadata[patch_id] = {
            'cnt_xy': center_pts,
            'perim_xy': perim_coords,
            'wsipath': svspth,
            'scan_level': scan_level,
            'foreground_indices': foreground_indices,
            'tile_id': patch_id,
        }
        patch_id = patch_id + 1

    elif n is not None: