コード例 #1
0
def enhance(image):
    image = image.copy()
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = cv2.bilateralFilter(image, 9, 175, 175)
    image_top = morphology.white_tophat(image, size=400)
    image_bottom = morphology.black_tophat(image, size=80)
    image = cv2.add(image, image_top)
    image = cv2.subtract(image, image_bottom)
    clahe_obj = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16, 16))
    image = clahe_obj.apply(image)
    return image
コード例 #2
0
def top_hat_transform(img):
    """Calculates the top-hat transformation of a given image.
    This transformation enhances the brighter structures in the image.

    Args:
        img: A grayscale dental x-ray image.

    Returns:
        The top-hat transformation of the input image.

    """
    return morphology.white_tophat(img, size=400)
コード例 #3
0
    def top_hat_transform(image):
        """
            Method calculates the top hat transformation of a given image
        """

        #image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        structure = np.array([[1., 1., 2., 5., 2.,
                               1.], [1., 2., 5., 5., 5., 1.],
                              [1., 5., 5., 10., 5., 1.],
                              [1., 1., 5., 5., 5., 1.],
                              [1., 1., 2., 5., 2., 1.]])

        return morphology.white_tophat(image, size=400)
コード例 #4
0
    def top_hat_transform(image):
        """
            Method calculates the top hat transformation of a given image
        """

        #image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        structure = np.array([[1., 1., 2., 5., 2., 1.],
                              [1., 2., 5., 5., 5., 1.],
                              [1., 5., 5., 10., 5., 1.],
                              [1., 1., 5., 5., 5., 1.],
                              [1., 1., 2., 5., 2., 1.]])

        return morphology.white_tophat(image, size=400)
コード例 #5
0
def peak_find(image,
              best_size="auto",
              refine_positions=False,
              sensitivity_threshold=33,
              start_search=3,
              end_search="auto",
              progress_object=None):
    """
    
    Parameters
    ----------
    refine_position : bool
        ddf
            
    """
    # TODO: best_size needs its auto-estimation routine
    trial_size = get_trial_size(best_size)

    # Removes slowly varying background from image to simplify Gaussian fitting.
    input_offset = white_tophat(image, 2*trial_size)

    # image dimension sizes, used for loop through image pixels
    m, n = get_data_shape(image)

    big = get_end_search(image, end_search)
            
    # Create blank arrays.
    heights        = np.empty(image.shape, dtype=np.float32)
    spreads         = np.empty(image.shape, dtype=np.float32)
    xs              = np.empty(image.shape, dtype=np.float32)
    ys              = np.empty(image.shape, dtype=np.float32)

        
    # Half of the trial size, equivalent to the border that will not be inspected.
    test_box_padding = int(( trial_size - 1 ) / 2.)

    # Coordinate set for X and Y fitting.  
    base_axis = np.arange(-test_box_padding, test_box_padding+1., dtype=np.float32)
    # Followed by the restoration progress bar:
    if progress_object is not None:
        progress_object.set_title("Identifying Image Peaks...")
        progress_object.set_position(0)
    for i in range(test_box_padding + 1 , m - ( test_box_padding + 1 )):
        currentStrip = input_offset[ i - test_box_padding : i + test_box_padding +1] 
        for j in range( test_box_padding + 1, n - ( test_box_padding + 1 )):
            I = currentStrip[:, j - test_box_padding : j + test_box_padding + 1]
            y, x, height, spread = fit_block(I, base_axis)
            ys[i, j] = y
            xs[i, j] = x
            heights[i, j] = height
            spreads[i, j] = spread
            
            if progress_object is not None:
                percentage_refined = (((trial_size-3.)/2.) / ((big-1.)/2.)) +  (((i-test_box_padding) / (m - 2*test_box_padding)) / (((big-1)/2)))  # Progress metric when using a looping peak-finding waitbar.
                progress_object.set_position(percentage_refined)
    # normalize peak heights
    heights = heights / ( np.max(input_offset) - np.min(input_offset) ) 
    # normalize fitted Gaussian widths
    spreads = spreads / trial_size
    offset_radii = np.sqrt(ys**2 + xs**2)  # Calculate offset radii.
    return filter_peaks(heights, spreads, offset_radii, trial_size, sensitivity_threshold)
コード例 #6
0
ファイル: ranger.py プロジェクト: dpfhty/Absolute_Integrator
def peak_find(image,
              best_size="auto",
              refine_positions=False,
              sensitivity_threshold=33,
              start_search=3,
              end_search="auto",
              progress_object=None):
    """
    
    Parameters
    ----------
    refine_position : bool
        ddf
            
    """
    # TODO: best_size needs its auto-estimation routine
    trial_size = get_trial_size(best_size)

    # Removes slowly varying background from image to simplify Gaussian fitting.
    input_offset = white_tophat(image, 2 * trial_size)

    # image dimension sizes, used for loop through image pixels
    m, n = get_data_shape(image)

    big = get_end_search(image, end_search)

    # Create blank arrays.
    heights = np.empty(image.shape, dtype=np.float32)
    spreads = np.empty(image.shape, dtype=np.float32)
    xs = np.empty(image.shape, dtype=np.float32)
    ys = np.empty(image.shape, dtype=np.float32)

    # Half of the trial size, equivalent to the border that will not be inspected.
    test_box_padding = int((trial_size - 1) / 2.)

    # Coordinate set for X and Y fitting.
    base_axis = np.arange(-test_box_padding,
                          test_box_padding + 1.,
                          dtype=np.float32)
    # Followed by the restoration progress bar:
    if progress_object is not None:
        progress_object.set_title("Identifying Image Peaks...")
        progress_object.set_position(0)
    for i in range(test_box_padding + 1, m - (test_box_padding + 1)):
        currentStrip = input_offset[i - test_box_padding:i + test_box_padding +
                                    1]
        for j in range(test_box_padding + 1, n - (test_box_padding + 1)):
            I = currentStrip[:, j - test_box_padding:j + test_box_padding + 1]
            y, x, height, spread = fit_block(I, base_axis)
            ys[i, j] = y
            xs[i, j] = x
            heights[i, j] = height
            spreads[i, j] = spread

            if progress_object is not None:
                percentage_refined = (
                    ((trial_size - 3.) / 2.) / ((big - 1.) / 2.)
                ) + (
                    ((i - test_box_padding) /
                     (m - 2 * test_box_padding)) / (((big - 1) / 2))
                )  # Progress metric when using a looping peak-finding waitbar.
                progress_object.set_position(percentage_refined)
    # normalize peak heights
    heights = heights / (np.max(input_offset) - np.min(input_offset))
    # normalize fitted Gaussian widths
    spreads = spreads / trial_size
    offset_radii = np.sqrt(ys**2 + xs**2)  # Calculate offset radii.
    return filter_peaks(heights, spreads, offset_radii, trial_size,
                        sensitivity_threshold)
コード例 #7
0
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 12 14:02:54 2017

@author: aliTakin
"""

#this is task three

import numpy as np
from scipy.ndimage.morphology import white_tophat
import matplotlib.pyplot as plt

import matplotlib.image as img

temp = img.imread(r"C:\Users\aliTakin\Desktop\4.92\sgn_41007\oulu.jpg")
plt.imshow(temp)
print(np.shape(temp))
tempR = temp[:, :, 0]
tempG = temp[:, :, 1]
tempB = temp[:, :, 2]

mean_whole_image = np.mean(temp)
meanR = np.mean(tempR, axis=0)
meanG = np.mean(tempG, axis=0)
meanB = np.mean(tempB, axis=0)

plt.figure()
plt.imshow(white_tophat(temp, size=10))
コード例 #8
0
def whitehat(image, size=400):
    return morphology.white_tophat(image, size)
コード例 #9
0
 def topHatVolume(x):
     for i in range(x.shape[2]):
         x[:, :, i] = white_tophat(x[:, :, i], structureSize)
     return x
コード例 #10
0
def top_hat_transform(img):
    return morphology.white_tophat(img, size=400)
コード例 #11
0
    plt.figure()
    plt.imshow(im)

    # check type and shape
    print(type(im))
    print(im.shape)

    # mean of all image
    print(np.mean(im))

    # mean of each channel (RGB)
    print(np.mean(im, axis=tuple((0, 1))))

    # apply white tophead transform
    plt.figure()
    plt.imshow(white_tophat(im, size=10))

    """
    Question 4 & 5
    """
    # read the data
    mat = loadmat("twoClassData.mat")
    print(mat.keys())
    X = mat["X"]
    y = mat["y"].ravel()

    # plot two class data
    plt.figure()
    plt.plot(X[y == 0, 0], X[y == 0, 1], "ro")
    plt.plot(X[y == 1, 0], X[y == 1, 1], "bo")
    plt.show()
コード例 #12
0
def split(radiograph, interval=50, show=False):
    img = cv2.cvtColor(radiograph, cv2.COLOR_BGR2GRAY)
    img = morphology.white_tophat(img, size=400)

    height, width = img.shape
    mask = 255 - img
    filt = gaussian_filter(450, width)
    if width % 2 == 0:
        filt = filt[:-1]
    mask = np.multiply(mask, filt)

    minimal_points = []
    for x in range(interval, width, interval):
        hist = []
        for y in range(int(height * 0.4), int(height * 0.7), 1):
            hist.append((np.sum(mask[y][x - interval:x + interval + 1]), x, y))

        fft = scipy.fftpack.rfft([intensity for (intensity, _, _) in hist])
        fft[30:] = 0
        smoothed = scipy.fftpack.irfft(fft)

        indices = scipy.signal.argrelmax(smoothed)[0]
        minimal_points_width = []
        for idx in indices:
            minimal_points_width.append(hist[idx])
        minimal_points_width.sort(reverse=True)

        count = 0
        to_keep = []
        for min_point in minimal_points_width:
            _, _, d = min_point
            if all(abs(b - d) > 150 for _, _, b in to_keep) and count < 4:
                count += 1
                to_keep.append(min_point)
        minimal_points.extend(to_keep)

    edges = []
    for _, x, y in minimal_points:
        min_intensity = float('inf')
        min_coords = (-1, -1)
        for _, u, v in minimal_points:
            intensity = _edge_intensity(mask, (x, y), (u, v))
            if x < u and intensity < min_intensity and abs(v -
                                                           y) < 0.1 * height:
                min_intensity = intensity
                min_coords = (u, v)
        if min_coords != (-1, -1):
            edges.append([(x, y), min_coords])

    paths = []
    for edge in edges:
        new_path = True

        for path in paths:
            if path.edges[-1] == edge[0]:
                new_path = False
                path.extend(edge)
        if new_path:
            paths.append(Path([edge[0], edge[1]]))

    mask2 = mask * (255 / mask.max())
    mask2 = mask2.astype('uint8')

    map(lambda p: p.trim(mask2), paths)
    paths = remove_short_paths(paths, width, 0.3)

    best_path = sorted([(p.intensity(img) / (p.length()), p)
                        for p in paths])[0][1]

    if show:
        plotting_code.plot_jaw_split(img, minimal_points, paths, best_path)

    return best_path
コード例 #13
0
def tophat(image, syze):
    return white_tophat(image, size=syze)
コード例 #14
0
def feature_find(image,
                 best_size,
                 sensitivity_threshold=34,
                 start_search=3,
                 end_search="auto",
                 progress_object=None):
    """
    A one-line summary needed to explain what function does.

    Several sentances providing extended description.

    Parameters
    ----------
    image: np.array
    Peak_find assumes a dark-field image where features are white and
    background is black.
    *Note: If you wish to use this function on bright-field images simply
    invert the image before using the function.
    best_size :  int
    An odd integer 3 or larger which is smaller than the width of the image.
    If this is unknown the get_trial_size() function needs to be run to
    determine the best feature spacing.
    sensitivity_threshold :
    start_search :
    end_search :
    progress_object :

    Returns
    -------
    list: x, y coordinates of peak location.

    Examples
    --------

    """

    # Removes slowly varying background from image to simplify Gaussian fitting.
    input_offset = white_tophat(image, 2 * best_size)
    # image dimension sizes, used for loop through image pixels
    m, n = get_data_shape(image)
    #print (m, n)
    big = get_end_search(image, end_search)

    # Create blank arrays.
    heights = np.empty(image.shape, dtype=np.float32)
    spreads = np.empty(image.shape, dtype=np.float32)
    xs = np.empty(image.shape, dtype=np.float32)
    ys = np.empty(image.shape, dtype=np.float32)

    # Half of the trial size, equivalent to the border that will not be inspected.
    test_box_padding = int((best_size - 1) / 2.)

    # Coordinate set for X and Y fitting.
    base_axis = np.arange(-test_box_padding,
                          test_box_padding + 1.,
                          dtype=np.float32)
    # Followed by the restoration progress bar:
    if progress_object is not None:
        progress_object.set_title("Identifying Image Peaks...")
        progress_object.set_position(0)

    for i in range(test_box_padding, n - (test_box_padding)):
        currentStrip = input_offset[i - test_box_padding:i +
                                    (test_box_padding + 1)]
        for j in range(test_box_padding + 1, m - (test_box_padding + 1)):
            I = currentStrip[:, j - test_box_padding:j + test_box_padding + 1]
            y, x, height, spread = fit_block(I, base_axis)
            ys[i, j] = y
            xs[i, j] = x
            heights[i, j] = height
            spreads[i, j] = spread

            if progress_object is not None:
                percentage_refined = (
                    ((best_size - 3.) / 2.) / ((big - 1.) / 2.)
                ) + (
                    ((i - test_box_padding) /
                     (m - 2 * test_box_padding)) / (((big - 1) / 2))
                )  # Progress metric when using a looping peak-finding waitbar.
                progress_object.set_position(percentage_refined)
    # normalize peak heights
    heights = heights / (np.max(input_offset) - np.min(input_offset))
    # normalize fitted Gaussian widths
    spreads = spreads / best_size
    offset_radii = np.sqrt(ys**2 + xs**2)  # Calculate offset radii.
    return filter_peaks(heights, spreads, offset_radii, best_size,
                        sensitivity_threshold)