Esempio n. 1
0
 def __call__(self, img_small):
     m = morphology.square(self.square_size)
     img_th = morphology.black_tophat(img_small, m)
     img_sob = abs(filters.sobel_v(img_th))
     img_closed = morphology.closing(img_sob, m)
     threshold = filters.threshold_otsu(img_closed)
     return img_closed > threshold
Esempio n. 2
0
def getImage(i, imOriginal, mean):
    if i == 0:  #return a central crop of 99x99 pixels
        resized = cv2.resize(imOriginal, (128, 128))
        resized = resized * 256.0 - mean
        return resized[14:113, 14:113]
    rot = np.random.uniform(0, 360, 1).astype(int)[0]  #Random rotations
    rot = 90 * np.random.uniform(0, 4, 1).astype(int)[0]  #Random rotations
    #    rot = 0
    im_size = imOriginal.shape[0]
    if (np.random.rand() > 0.5):
        if (np.random.rand() > 0.5):
            imOriginal = cv2.flip(imOriginal, 0)
        else:
            imOriginal = cv2.flip(imOriginal, 1)
    scale = np.random.uniform(0.9, 1.1)
    mat = cv2.getRotationMatrix2D((im_size / 2, im_size / 2), rot, scale=scale)
    resized = cv2.warpAffine(img_out,
                             mat, (im_size, im_size),
                             borderValue=(255, 255, 255))
    img_out = np.zeros((resized.shape[0], resized.shape[1], 3), dtype=np.uint8)
    img_orig = resized[:, :, 0]
    img_btop = 255 - black_tophat(img_orig, selem)
    img_wtop = 255 - white_tophat(img_orig, selem)
    img_out[:, :, 1] = img_btop
    img_out[:, :, 2] = img_wtop

    resized = cv2.resize(img_out, (128, 128))

    resized = resized * 256.0 - mean  #Geht richtig in den Keller auf Werte um 4, wenn man mean nicht abzieht
    #    offsetX = np.random.uniform(10,18,1).astype(int)[0]
    #    offsetY = np.random.uniform(10,18,1).astype(int)[0]
    offsetX = np.random.uniform(0, 28, 1).astype(int)[0]  #Random rotations
    offsetY = np.random.uniform(0, 28, 1).astype(int)[0]  #Random rotations
    return resized[offsetY:(99 + offsetY), offsetX:(99 + offsetX)]
Esempio n. 3
0
def detectCentroid(input):
    # Centroid Detection and contouring
    #Utilizing the Hough-Transform
    kernel = np.ones((4, 4), np.float32) / 25
    input = cv2.filter2D(input, -1, kernel)
    output = black_tophat(input, disk(15))
    blur = cv2.GaussianBlur(output, (5, 5), 0)
    _, threshold = cv2.threshold(blur, 254, 255, 0)
    threshold = threshold.astype('uint8')
    centroid = cv2.HoughCircles(threshold, cv2.HOUGH_GRADIENT, 1, 100, param1=5, param2=10, minRadius=9, maxRadius=15)
    # ensure at least some circles were found
    if centroid is not None:
        # Convert the circle parameters a, b and r to integers.
        centroid = np.uint16(np.around(centroid))
        for pt in tqdm(centroid[0, :]):
            a, b, r = pt[0], pt[1], pt[2]
            # Draw the circumference of the circle.
            cv2.circle(threshold, (a, b), r, (78, 55, 128), 2)
            # Draw a small circle (of radius 1) to show the center.
            cv2.circle(threshold, (a, b), 1, (0, 0, 0), 3)
    print(centroid)
    cv2.destroyWindow("Original Image")
    cv2.imshow("Centroid", threshold)
    cv2.waitKey(0)
    return threshold, a, b
Esempio n. 4
0
def black_top_func(filename):

    image = asarray(Image.open(filename))
    image = rgb2gray(image)
    image = black_tophat(image)
    plt.imsave(filename, image, cmap='gray')
    return filename
Esempio n. 5
0
def subtract_background(image, elem='disk', radius=50, light_bg=False):
    """Background substraction using structure element.
    Slightly adapted from: https://forum.image.sc/t/background-subtraction-in-scikit-image/39118/4

    :param image: input image
    :type image: NumPy.Array
    :param elem: type of the structure element, defaults to 'disk'
    :type elem: str, optional
    :param radius: size of structure element [pixel], defaults to 50
    :type radius: int, optional
    :param light_bg: light background, defaults to False
    :type light_bg: bool, optional
    :return: image with background subtracted
    :rtype: NumPy.Array
    """
    # use 'ball' here to get a slightly smoother result at the cost of increased computing time
    if elem == 'disk':
        str_el = disk(radius)
    if elem == 'ball':
        str_el = ball(radius)

    if light_bg:
        img_subtracted = black_tophat(image, str_el)
    if not light_bg:
        img_subtracted = white_tophat(image, str_el)

    return img_subtracted
Esempio n. 6
0
def norm_image(path):
    image = np.array(cv2.imread(path))
    temp_img = gray_img = np.uint8(rgb2gray(image))
    selem = disk(6)
    b_tophat = black_tophat(gray_img, selem)
    resultant_img = b_tophat + gray_img
    median_img = cv2.medianBlur(resultant_img, 5)
    gaussian_img = scipy.ndimage.filters.gaussian_filter(median_img,
                                                         sigma=1.90,
                                                         order=0,
                                                         output=None,
                                                         mode='reflect',
                                                         cval=0.0,
                                                         truncate=4.0)
    xc, yc, r = center(gaussian_img)
    R = radius(gaussian_img)
    theta = np.arange(0.00, np.pi * 2, 0.01)  #theta
    rng = np.arange(0, 100)
    norm_img = np.zeros((rng.size, theta.size))
    for t in theta:
        for rc in rng:
            mc = (R - r) * (rc) / 100 + r
            x = int(xc + mc * np.cos(t))
            y = int(yc + mc * np.sin(t))
            try:
                norm_img[rc, np.where(theta == t)] = temp_img[x, y]
            except Exception as e:
                pass
    return norm_img
Esempio n. 7
0
def Analyze(input):
    # perhaps we want to first normalize the image so the maximum pixel is 255, and minimum is 0
    img = pydicom.dcmread(input)
    normalizedImg = rescale(img.pixel_array)
    normalizedImgO = normalizedImg.copy()
    normalizedImg = (black_tophat(normalizedImg, disk(12)))
    normalizedImg = cv2.GaussianBlur(normalizedImg, (5, 5), 3)
    # apply hough transform
    circles = cv2.HoughCircles(normalizedImg,
                               cv2.HOUGH_GRADIENT,
                               1,
                               100,
                               param1=100,
                               param2=5,
                               minRadius=3,
                               maxRadius=10)
    # place circles and cente rectangle on image
    if circles is not None:
        # Convert the circle parameters a, b and r to integers.
        circles = np.uint16(np.around(circles))
        for pt in tqdm(circles[0, :]):
            a, b, r = pt[0], pt[1], pt[2]
            # Draw the circumference of the circle.
            cv2.circle(normalizedImgO, (a, b), r, (0, 0, 0), 2)
    print(circles)
    return normalizedImgO
Esempio n. 8
0
def getImage(i, imOriginal, mean):
    if i == 0: #return a central crop of 99x99 pixels
        resized = cv2.resize(imOriginal, (128, 128))
        resized = resized * 256.0 - mean
        return resized[14:113, 14:113]
    rot = np.random.uniform(0,360,1).astype(int)[0] #Random rotations
    rot = 90 * np.random.uniform(0,4,1).astype(int)[0] #Random rotations
#    rot = 0
    im_size = imOriginal.shape[0]
    if (np.random.rand() > 0.5):
       if (np.random.rand() > 0.5):
         imOriginal = cv2.flip(imOriginal,0)
       else:
         imOriginal = cv2.flip(imOriginal,1)
    scale = np.random.uniform(0.9,1.1)
    mat = cv2.getRotationMatrix2D((im_size / 2, im_size / 2), rot, scale=scale)
    resized = cv2.warpAffine(img_out, mat, (im_size, im_size), borderValue=(255,255,255))
    img_out = np.zeros((resized.shape[0], resized.shape[1], 3), dtype=np.uint8)
    img_orig = resized[:,:,0]
    img_btop = 255-black_tophat(img_orig, selem)
    img_wtop = 255-white_tophat(img_orig, selem)
    img_out[:, :, 1] = img_btop
    img_out[:, :, 2] = img_wtop
    
    resized = cv2.resize(img_out, (128, 128))
    
    resized = resized * 256.0 - mean #Geht richtig in den Keller auf Werte um 4, wenn man mean nicht abzieht
#    offsetX = np.random.uniform(10,18,1).astype(int)[0] 
#    offsetY = np.random.uniform(10,18,1).astype(int)[0] 
    offsetX = np.random.uniform(0,28,1).astype(int)[0] #Random rotations
    offsetY = np.random.uniform(0,28,1).astype(int)[0] #Random rotations
    return resized[offsetY:(99+offsetY), offsetX:(99+offsetX)]
    def insert_db(self, mode, image, label, features, channel_no, inverse):
        if inverse:
            image_ubyte = 255 - img_as_ubyte(image)
        else:
            image_ubyte = img_as_ubyte(image)

        image_ubyte = numpy.transpose(image_ubyte, (2, 0, 1))
                
        image_string = image_ubyte.tostring()
        
        if features != None:
            delimeter = '!@#$'
            self.datum.data = image_string + delimeter + features
        elif channel_no > 3:
            selem = disk(6)
            w_tophat = white_tophat(image_ubyte, selem)
            b_tophat = black_tophat(image_ubyte, selem)
            self.datum.data = image_string + w_tophat.tostring() + b_tophat.tostring()
        else:
            self.datum.data = image_string
            
        if label != None:
            self.datum.label = int(label)                
    
        serialized = self.datum.SerializeToString()
        
        if mode == 'train':
            self.train_batch.Put("%08d" % self.train_no, serialized)                    
            self.train_no += 1
        elif mode == 'valid':
            self.valid_batch.Put("%08d" % self.valid_no, serialized)                    
            self.valid_no += 1
        elif mode == 'test':
            self.test_batch.Put("%08d" % self.test_no, serialized)                    
            self.test_no += 1
Esempio n. 10
0
def image_process(im):
    '''Enter function general description + arguments'''
    strel = disk(5) 
    im_wavg = np.mean(im,0)
    im_binary = black_tophat(im_wavg, strel)
    im_binary = im_binary > np.max(im_binary)/2
    im_binary = im_binary.astype('float')
    return im_wavg, im_binary
def rolling_disk(image, radius=50, light_bg=False):
    from skimage.morphology import white_tophat, black_tophat, disk 
    str_el = disk(radius)
    print(image.shape)
    if light_bg:
        return black_tophat(image, str_el)
    else:
        return white_tophat(image, str_el)
Esempio n. 12
0
def remove_uneven_illumination(data):
    s = np.shape(data)
    date_new = np.zeros(s, dtype=float)

    for i in range(s[2]):
        date_new[:, :, i] = 255 - black_tophat(data[:, :, i], disk(31))

    return (date_new)
def subtract_background(image, radius, light_bg=False):
    # you can also use 'ball' here to get a slightly smoother result at the
    # cost of increased computing time
    str_el = disk(radius)
    if light_bg:
        return black_tophat(image, str_el)
    else:
        return white_tophat(image, str_el)
Esempio n. 14
0
def run(img):
    if len(img.shape) > 2 and img.shape[2] == 4:
        img = color.rgba2rgb(img)
    if len(img.shape) == 2:
        img = color.gray2rgb(img)
    img = color.rgb2grey(img)
    img = black_tophat(img, disk(1))
    return to_base64(img)
Esempio n. 15
0
def morpho_split(data_dcm_directory, Image_file, Image, Is_a_file, sig, diam):

    if Is_a_file:

        #Lecture des données pixel du fichier DICOM
        Lec = sitk.ReadImage(os.path.join(data_dcm_directory, Image_file))

        #Conversion en array
        Image = sitk.GetArrayFromImage(Lec[:, :, 0])

    #Conversion de l'image de non signé 16bits vers le type float
    Image = Image.astype(float)
    #Lissage de l'image par un filtre gaussien
    IG8 = Image
    #IG8 = filters.gaussian(Image,sigma=sig)
    #Augmentation du contraste de l'image filtrée
    IG8 = IG8 / (0.7 * np.max(IG8))

    indices = np.unravel_index(np.argmax(IG8, axis=None), IG8.shape)
    max_y = int(np.median(indices[0], overwrite_input=True))
    max_x = int(np.median(indices[1], overwrite_input=True))
    margin_y = int(2 * (3 / 2) * diam /
                   0.336)  #Marge de sécurité de deux fois le diamètre
    margin_x = int(2 * (3 / 2) * diam / 0.336)  # en +/-x et en +/-y.

    print(max_y, margin_y, max_x, margin_x)

    #Définition du Kernel pour le filtrage morphologique
    Kerb = morphology.disk(40)
    Field = IG8.copy()
    Filled_Field = IG8.copy()
    #Kerf = morphology.disk(60)
    #Field = morphology.white_tophat(IG8,Kerf)    # A priori inutile de faire du top-hat pour avoir le champ. Mathématiquement, pour
    #                                             # enlever la bille, il suffit d'ajouter le bottom-hat au top hat, qui est ici l'image de base.
    #                                             # Cela peut être faux, attention!
    Ball = morphology.black_tophat(
        IG8[max_y - margin_y:max_y + margin_y,
            max_x - margin_x:max_x + margin_x], Kerb)

    B = np.zeros((1280, 1280))
    B[max_y - margin_y:max_y + margin_y,
      max_x - margin_x:max_x + margin_x] = Ball
    Filled_Field[max_y - margin_y:max_y + margin_y,
                 max_x - margin_x:max_x + margin_x] += Ball

    Ball = B
    #Figures de contrôle

    # plt.figure()
    # plt.subplot(131)
    # plt.imshow(Filled_Field)
    # plt.subplot(132)
    # plt.imshow(Field)
    # plt.subplot(133)
    # plt.imshow(Ball)

    return (Filled_Field, Ball)
Esempio n. 16
0
def remove_hair(im):
    """
    Segmentation of the hairs in an image in order to remove them.

    Usage:
     im = remove_hair(im)

    Parameters
    ----------
    im: ndarray, shape (width, height, channels)
      Image that contains hairs.

    Returns
    -------
    new_im: ndarray (width, height, channels)
      An image with hairs removed.
    """

    n, m, _ = im.shape

    # step 1: looking for the mask using a threshold on the RGB image
    bool_im = np.zeros((n, m))

    for i in range(n):
        for j in range(m):
            if im[i, j, 0] < 20 or im[i, j, 1] < 20 or im[i, j, 2] < 20:
                bool_im[i, j] = 1
            else:
                bool_im[i, j] = 0

    # step 2: looking for the mask using a morphological operation and then a threshold
    gray_im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    tophat = black_tophat(gray_im, disk(20)) / 255
    tophat = (tophat > 0.15).astype(np.int)  # 0.2 is used in the paper

    # step 3: take the union of both results
    mask = dilation((bool_im.astype(np.bool) + tophat.astype(np.bool)) * 1)

    # step 4: if there are not enough hairs, no need to preprocess
    if 100 * sum(sum(mask)) / (n * m) < 5:
        return im

    # step 5: we remove the hairs as indicated in the paper
    new_im = im.copy()

    for i in range(n):
        for j in range(m):
            if mask[i, j] == 1:
                i_index = [max(i - 30, 0), min(i + 30, n)]
                j_index = [max(j - 30, 0), min(j + 30, m)]
                temp = im[i_index[0]:i_index[1], j_index[0]:j_index[1]]

                new_im[i, j, 0] = np.median(temp[:, :, 0])
                new_im[i, j, 1] = np.median(temp[:, :, 1])
                new_im[i, j, 2] = np.median(temp[:, :, 2])

    return new_im
Esempio n. 17
0
def black_tophat(img):
    orig_img = util.img_as_ubyte(img)
    image = orig_img.copy()
    image[340:350, 200:210] = 255
    image[100:110, 200:210] = 0
    selem = morphology.disk(6)
    eroded = morphology.black_tophat(image, selem)
    plot_comparison(orig_img, eroded, 'Black Tophat')
    plt.tight_layout()
    plt.show()
Esempio n. 18
0
    def _label_augment(g):
        """Augment the 3rd class: attaching cell borders """
        se = square(3)
        g_tophat = black_tophat(g, se)
        g_dilation = dilation(g_tophat, se)
        g_aug = g + (np.max(g) + 1) * g_dilation
        g_aug[g_aug == 3.0] = 2.0
        g_aug[find_boundaries(
            g)] = 2.0  # highlight all borders as the 3rd label

        return g_aug
Esempio n. 19
0
    def find_void_radii(self):

        # create compensated top-hat filter
        deg_per_pix = 20 / self.img_smooth.shape[0]
        kernel_width_pix = int(self.kernel_width /
                               deg_per_pix)  # convert deg to pix
        self.cth_rad = np.array([
            int(2 * kernel_width_pix),
            int(2 * kernel_width_pix * np.sqrt(2))
        ])

        selem = (2 * np.pad(
            disk(self.cth_rad[0]),
            self.cth_rad[1] - self.cth_rad[0],
            mode="constant",
            constant_values=0,
        ) + disk(self.cth_rad[1]) * -1)

        # apply compensated top-hat filter
        self.img_cth = black_tophat(self.img_smooth, selem)

        # create marker for watershedding
        marker = np.zeros(self.img_smooth.shape)
        for src in range(len(self.pos)):
            marker[self.pos[src, 1], self.pos[src, 0]] = src + 1
        self.marker = marker

        # create mask for watershedding
        mask = np.zeros(self.img_cth.shape)
        limit = np.percentile(self.img_cth.flatten(), 80)
        mask[limit <= self.img_cth] = 1
        self.mask = mask[::-1]

        # create image for watershedding
        distance = ndi.distance_transform_edt(mask)

        # watershedding
        labels = watershed(
            -distance,  # map_lowres_smooth,
            markers=marker,
            mask=mask,
            watershed_line=True,
        )

        self.labels = labels

        rad = np.zeros(len(self.pos))
        for src in range(len(self.pos)):
            lab = labels[self.pos[src, 1], self.pos[src, 0]]
            area_pix = len(labels[labels == lab])
            rad[src] = np.sqrt(area_pix / np.pi)

        self.radii = rad
Esempio n. 20
0
def black_tophat():
    image = io.imread("pics/Cosmos.jpg")
    blacked = morphology.black_tophat(image)
    fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(8, 3),
                                sharex=True, sharey=True)

    ax0.imshow(image, cmap=plt.cm.gray)
    ax0.axis('off')
    ax1.imshow(blacked, cmap=plt.cm.gray)
    ax1.axis('off')

    plt.show()
Esempio n. 21
0
def calc_msi(raster_input, s_min, s_max, s_delta):
    """ Calculates morphological shadow index (MSI) per Huang et al. (2015)
    Assumes use of linear structuring element
    MSI = sum(dxs)(blacktop_hat(morphological profiles))/(D*S)
    d = degrees (0,45, 90, 135)
    D = 4
    s = size of linear structuring element (pixels)
    S = (s_max-s_min)/s_delta + 1

    Parameters
    ------------
    raster_input : 3-dim raster (tif) stack for calculating brightness
    s_min, s_max, s_delta: numerical input (integer)

    Returns
    ------------
    Calculated values for MSI in a numpy array
    """
    # Calculate brightness for cloud masked stack
    brightness = np.nanmax(raster_input, axis=0)

    # Cap brightness values at a max of 1. Replace all values greater than 1 with a value of 1
    brightness_cap = np.where(brightness > 1, 1, brightness)

    # Initialize inputs for MSI calculation
    selem = selemline(0, 0)
    b_tophat_array_sum = black_tophat(brightness_cap, selem)

    # Loop and sum black tophat morphological profiles for MSI calculation
    for i in range(s_min, s_max + s_delta, s_delta):
        for x in range(0, 4):
            selem = selemline(i, 45 * x)
            b_tophat = black_tophat(brightness_cap, selem)
            b_tophat_array_sum = b_tophat_array_sum.__add__(b_tophat)

    D = 4
    S = ((s_max - s_min) / s_delta) + 1
    msi = b_tophat_array_sum / (D * S)

    return msi
def black_top_hat(image, k=2, plot=False, verbose=True):
    if verbose:
        print('\t - Removing black holes using disk of radius %d' % k)
    res = black_tophat(image, disk(k))
    if plot:
        d.compare3(image,
                   res,
                   image + res,
                   title1='Original',
                   title2='Black Top Hat',
                   title3='Complimentary')

    return image + res
Esempio n. 23
0
    def _try_black_tophat(self, roi, cur_text, cur_mrz):
        roi_b = morphology.black_tophat(roi, morphology.disk(5))
        new_text = ocr(roi_b)  # There are some examples where this line basically hangs for an undetermined amount of time.
        new_mrz = MRZ.from_ocr(new_text)
        if new_mrz.valid_score > cur_mrz.valid_score:
            new_mrz.aux['method'] = 'black_tophat'
            cur_text, cur_mrz = new_text, new_mrz

        new_text, new_mrz = self._try_larger_image(roi_b, cur_text, cur_mrz)
        if new_mrz.valid_score > cur_mrz.valid_score:
            new_mrz.aux['method'] = 'black_tophat(rescaled(3))'
            cur_text, cur_mrz = new_text, new_mrz

        return cur_text, cur_mrz
Esempio n. 24
0
def Calvo_segmentation(img,Th,Tw,elem,threshold,dilation):
    elem_estru = np.ones((elem,elem))
    Image = img[:,:,1]
    
    Image = morph.black_tophat(Image,elem_estru)
    
    #Image = ndi.median_filter(Image,3)

    arr_sigma = np.linspace(0.1, 3, 6)
    Image = frangi_2d(Image,arr_sigma)
    
    
    Image_array= Image.flatten()
    Image_array = np.sort(Image_array,axis=None)
    Tw = np.percentile(Image_array,Tw)
    Th = np.percentile(Image_array,Th)
    #  Tw_zip = zip(percentile_array,Tw)
    #  print(list(Tw_zip))

    Image = skifilter.apply_hysteresis_threshold(Image,Tw,Th)
    
    

    L,N = ndi.label(Image)
    T = ndi.sum(Image,L, range(1,N+1))
    comp2remove = np.nonzero(T<threshold)[0] + 1

    num_rows,num_cols = L.shape

    Image = np.copy(Image)

    for row in range(num_rows):
        for col in range(num_cols):
            label =  L[row,col]
            if label in comp2remove:
                Image[row,col] = 0

    
    
    
    for i in range(dilation):
        Image = morph.binary_dilation(Image)

    for i in range(dilation):
        Image = morph.binary_erosion(Image)

    #plot_image(Image,"Result4")
    

    return Image
Esempio n. 25
0
def detect_dust(image, drawable, isnegative=True, sensitivity=85, spot_size=9):
    """
    Detects dust particles.
    """
    # image = gimp.image_list()[0]
    sensitivity = 1.0 * sensitivity / 100
    spot_size = int(spot_size)

    H = pdb.gimp_image_height(image)
    W = pdb.gimp_image_width(image)
    # first let's get the pixel values
    layer = pdb.gimp_image_get_active_layer(image)

    raster_image = channelData(layer).reshape([H, W, 3])
    bpp = 8  #TODO get this
    MAX = 2**bpp - 1

    # to grayscale
    raster_image_gray = color.rgb2gray(raster_image)
    if isnegative:
        mask_level = raster_image_gray > sensitivity
        # white top hat to avoid uniform burned areas
        strel = morpho.square(spot_size)
        mask_th = morpho.white_tophat(raster_image_gray, selem=strel)
        # take only (very) outliers
        mask_th = mask_th > mask_th.mean() + 3 * mask_th.std()
    else:  # positive
        mask_level = raster_image_gray < 1 - sensitivity
        # black top hat to avoid uniform dark areas
        strel = morpho.square(spot_size)
        mask_th = morpho.black_tophat(raster_image_gray, selem=strel)
        # take only (very) outliers
        mask_th = mask_th > mask_th.mean() + 3 * mask_th.std()

    strel = morpho.disk(1)
    mask_th = morpho.opening(mask_th, selem=strel)

    # mask composition
    mask = mask_th & mask_level

    # morphological dilation
    strel = morpho.disk(3)
    mask = morpho.dilation(mask, selem=strel)

    # layer composition
    mask_list = [mask * MAX for _ in range(4)]
    dust_mask_rgba = np.stack(mask_list, axis=2)

    createMaskLayer(image, "dust", dust_mask_rgba)
Esempio n. 26
0
def conjugate_tophats(img, structures):
    """Funtion which performs a sum of conjugate top-hats of an image with the chose structuring elements

    Parameters
    ----------
    img : np.ndarray<float>
        The input image
    structures : np.ndarray<float>
        The structuring elements

    Returns
    -------
    img_th : np.ndarray<float>
        The output image
    """
    img_th = np.zeros(img.shape)
    for struct in structures:
        img_th += black_tophat(img, struct)
    return img_th
Esempio n. 27
0
def black_tophat_(image, selem=None, out=None):
    orig_phantom = img_as_ubyte(image)
    fig, ax = plt.subplots()
    ax.imshow(orig_phantom, cmap=plt.cm.gray)

    def plot_comparison(original, filtered, filter_name):
        fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True,
                                       sharey=True)
        ax1.imshow(original, cmap=plt.cm.gray)
        ax1.set_title('original')
        ax1.axis('off')
        ax2.imshow(filtered, cmap=plt.cm.gray)
        ax2.set_title(filter_name)
        ax2.axis('off')

    phantom = orig_phantom.copy()
    phantom[340:350, 200:210] = 255
    phantom[100:110, 200:210] = 0

    b_tophat = black_tophat(phantom, selem)
    plot_comparison(phantom, b_tophat, 'black tophat')
    plt.show()
Esempio n. 28
0
def other_water(image):
    b_tophat = morphology.black_tophat(image, disk(4))
    thresholded_image = threshold_image(b_tophat)

    distance = ndi.distance_transform_edt(thresholded_image)
    local_maxi = peak_local_max(distance,
                                indices=False,
                                footprint=np.ones((3, 3)),
                                labels=image)

    markers = ndi.label(local_maxi)[0]

    labels = watershed(thresholded_image, markers)
    label_image = skimage.morphology.remove_small_objects(labels, min_size=100)

    counter = 1
    config = '/Users/reganlamoureux/other_watershed_images/watershed_image{}.png'.format(
        counter)
    while os.path.exists(config):
        counter += 1
        config = '/Users/reganlamoureux/new_watershed_images/watershed_image{}.png'.format(
            counter)
    plt.imsave(config, label_image)
 def analyse(self):
    all_wells=[]
    for well in range(self.img_set.sizes['v']):
       self.img_set.default_coords['v'] = well
       res=[]
       if self.mask[well] is not None:
          for img in self.img_set:
             passed=[]
             im=difference_of_gaussians(img, 1, 5)
             blur=gaussian(im, sigma=2)
             black=black_tophat(blur, selem=disk(4))
             thresh=threshold_otsu((black))
             black=black*self.mask[well].astype(bool).astype(int)
             objects=regionprops(ndi.label(black>thresh)[0])
             for obj in objects:
                if obj.area>self.min_area and obj.area<self.max_area:
                   passed.append(obj.area)
             res.append(len(passed))
       else:
          res.append([])
       all_wells.append(res)
    with open("out.csv", "w", newline="") as f:
        writer = csv.writer(f)
        writer.writerows(res)
Esempio n. 30
0
plot_comparison(phantom, w_tophat, 'white tophat')

######################################################################
# As you can see, the 10-pixel wide white square is highlighted since it is
# smaller than the structuring element. Also, the thin, white edges around
# most of the ellipse are retained because they're smaller than the
# structuring element, but the thicker region at the top disappears.
#
# Black tophat
# ============
#
# The ``black_tophat`` of an image is defined as its morphological **closing
# minus the original image**. This operation returns the *dark spots of the
# image that are smaller than the structuring element*.

b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')

######################################################################
#As you can see, the 10-pixel wide black square is highlighted since
#it is smaller than the structuring element.
#
#**Duality**
#
#As you should have noticed, many of these operations are simply the reverse
#of another operation. This duality can be summarized as follows:
#
# 1. Erosion <-> Dilation
#
# 2. Opening <-> Closing
#
Esempio n. 31
0
            newField = ogr.FieldDefn("SandMask", ogr.OFTInteger)
            outLayer.CreateField(newField)
            gdal.Polygonize(band, band, outLayer, 0, [], callback=None)
            outDatasource.Destroy()
            sourceRaster = None
            band = None

# =============================================================================
# Segment
# =============================================================================

print("\nBeginning segmentation")

# tophat edges
print("Black tophat edge detection")
tophat = morph.black_tophat(gray, selem=morph.selem.disk(1))
tophat = tophat < np.percentile(tophat, tophat_th * 100)
tophat = morph.remove_small_holes(tophat, area_threshold=5, connectivity=2)
if not np.sum(tophat) == 0:
    foo = func.featAND_fast(ignore_mask, tophat)
    ignore_mask = np.logical_and(foo, ignore_mask)
# canny edges
print("Canny edge detection")
canny = feat.canny(gray, sigma=canny_sig)
canny = np.invert(canny)
foo = func.featAND_fast(ignore_mask, canny)
ignore_mask = np.logical_and(foo, ignore_mask)
# sobel edges
print("Sobel edge detection")
sobel = filt.sobel(gray)
sobel = sobel < np.percentile(sobel, sobel_th * 100)
Esempio n. 32
0
#     is passed, a new array will be allocated.
#
# Returns
# -------
# opening : uint8 array
#    The result of the black top filter.

# <codecell>

# We will be working with phantom.png for this function.
# First defining the structuring element as a disk using disk()
#selem = disk(3);
selem = disk(5)
#selem = disk(10);
phantom[150:160, 200:210] = 255
b_tophat = black_tophat(phantom, selem)

# Displaying the original and eroded image
# 'plt.figure() can be used for showing multiple images together
plt.figure(1)
io.imshow(phantom)
plt.figure(2)
io.imshow(b_tophat)
#plt.figure(3)
#io.imshow(opening(phantom, selem))
plt.show()

# <markdowncell>

# **Comments** :
#
Esempio n. 33
0
        writeImg(outPath, img_org, kind, file, mani_num) #Original
        rots = np.random.uniform(0,360,10).astype(int) #10 random rotations
        for i, rot in enumerate(rots):
            im_size = img_org.shape[0]
            if (np.random.rand() > 0.5):
                if (np.random.rand() > 0.5):
                    img_org = cv2.flip(img_org,0)
                else:
                    img_org = cv2.flip(img_org,1)
            scale = np.random.uniform(0.7,1.3)
            mat = cv2.getRotationMatrix2D((im_size / 2, im_size / 2), rot, scale=scale)
            img_rotated = cv2.warpAffine(img_org, mat, (im_size, im_size), flags=cv2.INTER_LINEAR, borderValue=(255,255,255))

            img_out = np.zeros((img_rotated.shape[0], img_rotated.shape[1], 3), dtype=np.uint8)
            img_orig = img_rotated[:,:,0]
            img_btop = 255-black_tophat(img_orig, selem)
            img_wtop = 255-white_tophat(img_orig, selem)
            img_out[:, :, 1] = img_btop
            img_out[:, :, 2] = img_wtop

            img_rotated = img_out

            if show:
              cv2.imshow('Rot_' + str(i), img_rotated)
            writeImg(outPath, img_rotated, kind, file, i+1) #Original
        if show:
            cv2.waitKey(20000 )
            lineNum += 1
            if (lineNum > 20):
                break
Esempio n. 34
0
 def func(frame):
     return mor.black_tophat(frame, mor.disk(size))
Esempio n. 35
0
import numpy as np
from PIL import Image

from skimage.morphology import erosion, dilation, opening, closing
from skimage.morphology import black_tophat, white_tophat
from skimage.morphology import disk, diamond, rectangle, square, star

def my_imshow(arr, filter_name):
    f,ax = plt.subplots()
    ax.set_title(filter_name)
    #ax.axis('off')
    ax.set_adjustable('box-forced')
    ax.imshow(arr, cmap=cm.gray, interpolation='none')
    plt.show()
    
#Import an image.
image = np.array(Image.open('Butterfly.jpg'))
my_imshow(image, 'original')

#White tophat
fly = img.copy()
fly[340:350, 200:210] = 255
fly[100:110, 200:210] = 0

w_tophat = white_tophat(fly, selem)
my_imshow(w_tophat, 'white-tophat')

#Black tophat
b_tophat = black_tophat(fly, selem)
my_imshow(b_tophat, 'black-tophat')
Esempio n. 36
0
    def convert_test_data(self, data_set_folder, min_pixel, test_db_name, test_output_pickle_path, 
                          inverse, channel_no = 1):
        self.remove_folder(test_db_name)
            
        test_db = leveldb.LevelDB(test_db_name)
        
        pickleTestX = test_output_pickle_path + "/testX_size_" + str(min_pixel) + ".pickle"
        pickleFileNames = test_output_pickle_path + "/fileNames.pickle"
        
        if not os.path.exists(test_output_pickle_path):
            os.makedirs(test_output_pickle_path)

        numberofImages = 0    
    
        datum = caffe.proto.caffe_pb2.Datum()
        datum.channels = channel_no
        datum.width = min_pixel
        datum.height = min_pixel
        
        test_batch = leveldb.WriteBatch()
    
        print "Load test dataset from image files"
    
        for fileNameDir in os.walk(data_set_folder):   
            for index, fileName in enumerate(fileNameDir[2]):
                if fileName[-5:] != ".JPEG":
                  continue
                numberofImages += 1
        
        imageSize = min_pixel * min_pixel
        num_rows = numberofImages # one row for each image in the test dataset

        batch_size = 10000    
        data_size = min(batch_size, numberofImages)
        testX = numpy.zeros((data_size, channel_no, imageSize), dtype=numpy.uint8)
        
        files = []
        db_index = 0
        pickle_index = 0
        batch_no = 1
        
        print "Reading images"
        for fileNameDir in os.walk(data_set_folder):   
            for index, fileName in enumerate(fileNameDir[2]):
                if fileName[-5:] != ".JPEG":
                  continue
                
                nameFileImage = "{0}{1}{2}".format(fileNameDir[0], os.sep, fileName)            
                org_image = Image.open(nameFileImage)
                files.append(fileName)
                
                image = org_image.resize((min_pixel, min_pixel), Image.ANTIALIAS)
    
                """
                print fileName
                
                plt.figure(1, figsize=(1, 1), dpi=100)
                plt.gray();                
                plt.subplot(1, 1, 1)
                plt.imshow(image)
                plt.show()
                """
    
                if inverse:
                    image_ubyte = 255 - img_as_ubyte(image)
                else:
                    image_ubyte = img_as_ubyte(image)
                
                if channel_no > 1:
                    selem = disk(6)
                    w_tophat = white_tophat(image_ubyte, selem)
                    b_tophat = black_tophat(image_ubyte, selem)
                    datum.data = image_ubyte.tostring() + w_tophat.tostring() + b_tophat.tostring()
                    image_output = numpy.concatenate((image_ubyte, w_tophat, b_tophat), axis=1)
                else:
                    datum.data = image_ubyte.tostring()
                    image_output = image_ubyte
                
                    
                test_batch.Put("%08d" % db_index, datum.SerializeToString())
    
                testX[pickle_index] = numpy.reshape(image_output, (channel_no, imageSize))
    
                db_index += 1
                pickle_index += 1
                
                if db_index % 1000 == 0:
                    test_db.Write(test_batch, sync = True)
                    del test_batch
                    test_batch = leveldb.WriteBatch()
                    print 'Processed %i test images.' % db_index
    
                if pickle_index % batch_size == 0:
                    pickle_file_name = pickleTestX + "_" + str(batch_no)
                    with open(pickle_file_name,'wb') as fp:
                        cPickle.dump(testX, fp)
                        print "pickled %s" % pickle_file_name
                        data_size = min(batch_size, numberofImages - batch_size * batch_no)
                        testX = numpy.zeros((data_size, channel_no, imageSize), dtype=numpy.uint8)
                        batch_no += 1
                        pickle_index = 0
                
                report = [int((j+1)*num_rows/20.) for j in range(20)]
                if db_index in report: print numpy.ceil(db_index *100.0 / num_rows), "% done"
    
    
        # Write last batch of images
        if db_index % 1000 != 0:
            test_db.Write(test_batch, sync = True)
    
        if pickle_index % batch_size > 0:
            pickle_file_name = pickleTestX + "_" + str(batch_no)
            with open(pickle_file_name,'wb') as fp:
                cPickle.dump(testX, fp)
                print "pickled %s" % pickle_file_name
                        
        with open(pickleFileNames,'wb') as fp:
            cPickle.dump(files, fp)
    
        print 'Processed a total of %i images.' % db_index