def show_outline(src):
    contours = cv2.Canny(image=src, threshold1=125, threshold2=350)
    plt_save(255 - contours, title='Canny Contours')

    image_gray = cv2.cvtColor(src=src, code=cv2.COLOR_BGR2GRAY)

    contours = cv2.Canny(image=image_gray, threshold1=125, threshold2=350)
    plt_save(255 - contours, title='Canny Contours Gray')

    # Hough tranform for line detection
    theta = np.pi / 180
    threshold = 50
    lines = cv2.HoughLinesP(image=contours, rho=1, theta=theta, threshold=threshold)

    if lines is not None:
        src_clone = src.copy()

        for line in lines:
            for x1, y1, x2, y2 in line:
                cv2.line(img=src_clone, pt1=(x1, y1), pt2=(x2, y2), color=(0, 255, 0), thickness=2)
            pass
        pass

        plt_save(src_clone, title='Lines with HoughP, threshold: ' + str(threshold))
    pass

    # Detect circles
    # blur =  cv2.GaussianBlur(src=image_gray, ksize=(5, 5), sigmaX=1.5)
    threshold = 200
    min_votes = 100

    circles = cv2.HoughCircles(image=image_gray, method=cv2.HOUGH_GRADIENT, dp=2, minDist=20,
                               param1=threshold, param2=min_votes, minRadius=15, maxRadius=50)

    if circles is not None:
        src_clone = src.copy()
        for circle in circles:
            for x1, y1, r in circle:
                cv2.circle(img=src_clone, center=(x1, y1), radius=int(r), color=(0, 255, 0), thickness=2)
            pass

        plt_save(src_clone, title='Circles with HoughP, threshold: ' + str(threshold) + ', min_votes=' + str(min_votes))

    pass

    # Get the contours
    src_clone = src.copy()
    contours, _ = cv2.findContours(image=image_gray, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_NONE)
    image_contours = cv2.drawContours(image=src_clone, contours=contours, contourIdx=-1, color=(255, 255, 255),
                                      thickness=2)

    plt_save(image_contours, title='Contours with RETR_LIST')
Beispiel #2
0
from image_filtering import show_filtering
from image_outline import show_outline
from image_transformation import show_transformation
from utils import plt_save
from image_color import show_hsv
from image_enhancement import show_enhancement

if __name__ == '__main__':
    file_path = './images/000000507081.jpg'

    origin = cv2.imread(file_path)
    origin = origin[:, :, [2, 1, 0]]

    # x, y = origin.shape[0:2]
    # origin = cv2.resize(origin, (int(y / 3), int(x / 3)))
    plt_save(image=origin, title='Origin')

    # --------------------图像色彩--------------------
    # 转换成HSV色彩空间
    show_hsv(origin)

    # --------------------图像变换--------------------
    show_transformation(origin)

    # --------------------图像过滤--------------------
    show_filtering(origin)

    # --------------------提取直线、轮廓、区域--------------------
    show_outline(origin)

    # -------------------- 图像增强-白平衡等--------------------
Beispiel #3
0
def show_filtering(src):
    # 低通滤波
    # Blur the image with a mean filter
    blur = cv2.blur(src=src, ksize=(5, 5))
    plt_save(image=blur, title='Mean filtered (5x5)')

    # Blur the image with a mean filter 9x9
    blur = cv2.blur(src=src, ksize=(9, 9))
    plt_save(image=blur, title='Mean filtered (9x9)')

    blur = cv2.GaussianBlur(src=src, ksize=(9, 9), sigmaX=1.5)
    plt_save(image=blur, title='Gaussian filtered Image (9x9)')

    gauss = cv2.getGaussianKernel(ksize=9, sigma=1.5, ktype=cv2.CV_32F)
    print('GaussianKernel 1.5 = [', end='')
    for item in gauss:
        print(item, end='')
    pass
    print(']')

    gauss = cv2.getGaussianKernel(ksize=9, sigma=-1, ktype=cv2.CV_32F)
    print('GaussianKernel -1 = [', end='')
    for item in gauss:
        print(item, end='')
    pass
    print(']')

    # 缩减 采样
    blur = cv2.GaussianBlur(src=src, ksize=(11, 11), sigmaX=1.75)
    resized1 = cv2.resize(src=blur,
                          dsize=(0, 0),
                          fx=0.25,
                          fy=0.25,
                          interpolation=cv2.INTER_CUBIC)
    plt_save(image=resized1, title='resize CUBIC 0.25')

    # resizing with NN
    resized2 = cv2.resize(src=resized1,
                          dsize=(0, 0),
                          fx=4,
                          fy=4,
                          interpolation=cv2.INTER_NEAREST)
    plt_save(image=resized2, title='resize NEAREST x4')

    # resizing with bilinear
    resized3 = cv2.resize(src=resized1,
                          dsize=(0, 0),
                          fx=4,
                          fy=4,
                          interpolation=cv2.INTER_LINEAR)
    plt_save(image=resized3, title='resize LINEAR x4')

    # 中值滤波
    median_blur = cv2.medianBlur(src=src, ksize=5)
    plt_save(image=median_blur, title='Median filtered')

    # 定向滤波器
    image_gray = cv2.cvtColor(src=src, code=cv2.COLOR_BGR2GRAY)

    # Compute Sobel X derivative
    sobel_x = cv2.Sobel(src=image_gray,
                        ddepth=cv2.CV_8U,
                        dx=1,
                        dy=0,
                        ksize=3,
                        scale=0.4,
                        delta=128,
                        borderType=cv2.BORDER_DEFAULT)
    plt_save(image=sobel_x, title='Sobel X')

    # Compute Sobel Y derivative
    sobel_y = cv2.Sobel(src=image_gray,
                        ddepth=cv2.CV_8U,
                        dx=0,
                        dy=1,
                        ksize=3,
                        scale=0.4,
                        delta=128,
                        borderType=cv2.BORDER_DEFAULT)
    plt_save(image=sobel_y, title='Sobel Y')

    # Compute norm of Sobel
    sobel_x = cv2.Sobel(src=image_gray, ddepth=cv2.CV_16S, dx=1, dy=0)
    sobel_y = cv2.Sobel(src=image_gray, ddepth=cv2.CV_16S, dx=0, dy=1)

    sobel_1 = abs(sobel_x) + abs(sobel_y)
    plt_save(image=sobel_1, title='abs Sobel X+Y')

    sobel_2 = cv2.convertScaleAbs(sobel_x) + cv2.convertScaleAbs(sobel_y)
    plt_save(image=sobel_2, title='cv2.convertScaleAbs Sobel X+Y')

    # Compute Sobel X derivative (7x7)
    sobel_x_7x7 = cv2.Sobel(src=image_gray,
                            ddepth=cv2.CV_8U,
                            dx=1,
                            dy=0,
                            ksize=7,
                            scale=0.001,
                            delta=128)
    plt_save(image=sobel_x_7x7, title='Sobel X (7x7)')

    uint8_sobel_1 = np.uint8(sobel_1)
    plt_save(image=uint8_sobel_1, title='uint8 sobel_1')

    uint8_sobel_2 = np.uint8(sobel_2)
    plt_save(image=uint8_sobel_2, title='uint8 sobel_2')

    int8_sobel_1 = np.int8(sobel_1)
    plt_save(image=int8_sobel_1, title='int8 sobel_1')

    int8_sobel_2 = np.int8(sobel_2)
    plt_save(image=int8_sobel_2, title='int8 sobel_2')

    # Apply threshold to Sobel norm (low threshold value)
    _, thresh_binary = cv2.threshold(src=uint8_sobel_1,
                                     thresh=255,
                                     maxval=255,
                                     type=cv2.THRESH_BINARY)
    plt_save(image=thresh_binary,
             title='Binary Sobel (low)  cv2.threshold uint8_sobel_1')

    _, thresh_binary = cv2.threshold(src=uint8_sobel_2,
                                     thresh=255,
                                     maxval=255,
                                     type=cv2.THRESH_BINARY)
    plt_save(image=thresh_binary,
             title='Binary Sobel (low)  cv2.threshold uint8_sobel_2')

    # Apply threshold to Sobel norm (high threshold value)
    _, thresh_binary = cv2.threshold(src=uint8_sobel_1,
                                     thresh=190,
                                     maxval=255,
                                     type=cv2.THRESH_BINARY)
    plt_save(image=thresh_binary,
             title='Binary Sobel Image (high)  cv2.threshold uint8_sobel_1')

    _, thresh_binary = cv2.threshold(src=uint8_sobel_2,
                                     thresh=190,
                                     maxval=255,
                                     type=cv2.THRESH_BINARY)
    plt_save(image=thresh_binary,
             title='Binary Sobel Image (high)  cv2.threshold uint8_sobel_2')

    add_weighted_sobel = cv2.addWeighted(sobel_x, 0.5, sobel_y, 0.5, 0)
    plt_save(image=add_weighted_sobel, title='cv2.addWeighted abs')

    # down-sample and up-sample the image
    reduced = cv2.pyrDown(src=src)
    rescaled = cv2.pyrUp(src=reduced)
    plt_save(image=rescaled, title='Rescaled')

    # down-sample and up-sample the image
    reduced = cv2.pyrDown(src=image_gray)
    rescaled = cv2.pyrUp(src=reduced)

    w, h = src.shape[0:2]
    rescaled = cv2.resize(rescaled, (h, w))

    plt_save(image=rescaled, title='Rescaled')

    subtract = cv2.subtract(src1=rescaled, src2=image_gray)
    subtract = np.uint8(subtract)
    plt_save(image=subtract, title='cv2.subtract')

    gauss05 = cv2.GaussianBlur(src=image_gray, ksize=(0, 0), sigmaX=0.5)
    gauss15 = cv2.GaussianBlur(src=image_gray, ksize=(0, 0), sigmaX=1.5)
    subtract = cv2.subtract(src1=gauss15, src2=gauss05, dtype=cv2.CV_16S)
    subtract = np.uint8(subtract)
    plt_save(image=subtract, title='cv2.subtract gauss15 - gauss05')

    gauss20 = cv2.GaussianBlur(src=image_gray, ksize=(0, 0), sigmaX=2.0)
    gauss22 = cv2.GaussianBlur(src=image_gray, ksize=(0, 0), sigmaX=2.2)
    subtract = cv2.subtract(src1=gauss22, src2=gauss20, dtype=cv2.CV_32F)
    subtract = np.uint8(subtract)
    plt_save(image=subtract, title='cv2.subtract gauss22 - gauss20')
        title='Test Set Visualization',
        xlabel='Latent dim 1',
        ylabel='Latent dim 2')

print('\nCorresponding mapping coordinates in the latent space - one image per digit:\n')
encoded_x_test = encoder.predict(x_test)
digits = {}
i = 0
while len(digits) < 10:
    digits[y_test[i]] = encoded_x_test[i]
    i += 1

ut.output('g.c', pd.DataFrame(digits))

# (d)
z_sample = np.array([[-2.5, 0.55]])
decoded_x = generator.predict(z_sample)
ut.image(decoded_x)
ut.plt_save('g.d')

# (e)
source, target = digits[3], digits[5]
(x1, y1), (x2, y2) = source, target
a = (y2 - y1) / (x2 - x1)
b = y1 - (a * x1)
f = lambda _x_: a * _x_ + b
x_samples = np.linspace(x1, x2, num=10)
samples = [np.array([[_x, f(_x)]]) for _x in x_samples]
ut.images(map(generator.predict, samples), map(str, x_samples), n_cols=3)
ut.plt_save('g.e')
Beispiel #5
0
        title='Test Set Visualization',
        xlabel='Latent dim 1',
        ylabel='Latent dim 2')

print('\nf. Corresponding mapping coordinates in the latent space - one image per digit:\n')
encoded_x_test = encoder.predict(x_test)
digits = {}
i = 0
while len(digits) < 10:
    digits[y_test[i]] = encoded_x_test[i]
    i += 1

ut.output('f.c', pd.DataFrame(digits))

# (d)
z_sample = np.array([[-2.5, 0.55]])
decoded_x = generator.predict(z_sample)
ut.image(decoded_x)
ut.plt_save('f.d')

# (e)
source, target = digits[3], digits[5]
(x1, y1), (x2, y2) = source, target
a = (y2 - y1) / (x2 - x1)
b = y1 - (a * x1)
f = lambda _x_: a * _x_ + b
x_samples = np.linspace(x1, x2, num=10)
samples = [np.array([[_x, f(_x)]]) for _x in x_samples]
ut.images(map(generator.predict, samples), map(str, x_samples), n_cols=3)
ut.plt_save('f.e')
Beispiel #6
0
def show_transformation(src):
    # 用形态学滤波器腐蚀和膨胀图像
    element_3x3 = np.ones((3, 3), np.uint8)

    # 腐蚀 3x3
    eroded = cv2.erode(src=src, kernel=element_3x3)
    plt_save(image=eroded, title='eroded')

    # 膨胀 3x3 3次
    dilated = cv2.dilate(src=src, kernel=element_3x3, iterations=3)
    plt_save(image=dilated, title='dilated 3 times')

    # 腐蚀 7x7
    element_7x7 = np.ones((7, 7), np.uint8)
    eroded_7x7 = cv2.erode(src=src, kernel=element_7x7, iterations=1)
    plt_save(image=eroded_7x7, title='eroded 7x7')

    # 腐蚀 3x3 3次
    eroded_3 = cv2.erode(src=src, kernel=element_3x3, iterations=3)
    plt_save(image=eroded_3, title='eroded 3 times')

    # 用形态学滤波器开启和闭合图像
    image_gray = cv2.cvtColor(src=src, code=cv2.COLOR_RGB2GRAY)
    # plt_save(image=image_gray, title='image_gray')

    # Close the image
    element_5x5 = np.ones((5, 5), np.uint8)

    closed = cv2.morphologyEx(src=image_gray,
                              op=cv2.MORPH_CLOSE,
                              kernel=element_5x5)
    plt_save(image=closed, title='closed')

    # Open the image
    opened = cv2.morphologyEx(src=image_gray,
                              op=cv2.MORPH_OPEN,
                              kernel=element_5x5)
    plt_save(image=opened, title='opened')

    closed = cv2.morphologyEx(src=image_gray,
                              op=cv2.MORPH_CLOSE,
                              kernel=element_5x5)
    closed_opened = cv2.morphologyEx(src=closed,
                                     op=cv2.MORPH_OPEN,
                                     kernel=element_5x5)
    plt_save(image=closed_opened, title='Closed -> Opened')

    opened = cv2.morphologyEx(src=image_gray,
                              op=cv2.MORPH_OPEN,
                              kernel=element_5x5)
    opened_closed = cv2.morphologyEx(src=opened,
                                     op=cv2.MORPH_CLOSE,
                                     kernel=element_5x5)
    plt_save(image=opened_closed, title='Opened -> Closed')

    # 在灰度图像中应用形态学运算
    edge = cv2.morphologyEx(src=image_gray,
                            op=cv2.MORPH_GRADIENT,
                            kernel=element_3x3)
    plt_save(image=255 - edge, title='Gradient | Edge')

    # Apply threshold to obtain a binary image
    threshold = 80
    _, thresh_binary = cv2.threshold(src=edge,
                                     thresh=threshold,
                                     maxval=255,
                                     type=cv2.THRESH_BINARY)
    plt_save(image=thresh_binary,
             title='Gradient | Edge -> Thresh Binary | Edge')

    # 7x7 Black Top-hat Image
    black_hat = cv2.morphologyEx(src=image_gray,
                                 op=cv2.MORPH_BLACKHAT,
                                 kernel=element_7x7)
    plt_save(image=255 - black_hat, title='7x7 Black Top-hat')

    # Apply threshold to obtain a binary image
    threshold = 25
    _, thresh_binary = cv2.threshold(src=black_hat,
                                     thresh=threshold,
                                     maxval=255,
                                     type=cv2.THRESH_BINARY)
    plt_save(image=255 - thresh_binary,
             title='7x7 Black Top-hat -> Thresh Binary | Edge')

    # Apply the black top-hat transform using a 7x7 structuring element
    closed = cv2.morphologyEx(src=thresh_binary,
                              op=cv2.MORPH_CLOSE,
                              kernel=element_7x7)
    plt_save(image=255 - closed, title='7x7 Black Top-hat -> Closed')

    pass
        ylabel='Latent dim 2')

print(
    '\nCorresponding mapping coordinates in the latent space - one image per digit:\n'
)
encoded_x_test = encoder.predict(x_test)
digits = {}
i = 0
while len(digits) < 10:
    digits[y_test[i]] = encoded_x_test[i]
    i += 1

ut.output('c', pd.DataFrame(digits))

# (d)
z_sample = np.array([[-2.5, 0.55]])
decoded_x = generator.predict(z_sample)
ut.image(decoded_x)
ut.plt_save('d')

# (e)
source, target = digits[3], digits[5]
(x1, y1), (x2, y2) = source, target
a = (y2 - y1) / (x2 - x1)
b = y1 - (a * x1)
f = lambda _x_: a * _x_ + b
x_samples = np.linspace(x1, x2, num=10)
samples = [np.array([[_x, f(_x)]]) for _x in x_samples]
ut.images(map(generator.predict, samples), map(str, x_samples), n_cols=3)
ut.plt_save('e')
def show_enhancement(src):
    balance_img1 = white_balance(src)
    plt_save(balance_img1, 'White Balance 1')

    # 灰度世界算法
    balance_img2 = grey_world(src)
    plt_save(balance_img2, 'White Balance 2')

    # 直方图均衡化
    balance_img3 = his_equl_color(src)
    plt_save(balance_img3, 'White Balance 3')

    # 视网膜-大脑皮层(Retinex)增强算法
    # 单尺度Retinex
    ssr1 = single_scale_retinex(src, 300)
    ssr1[ssr1 < 0] = 0
    plt_save(ssr1, 'Single Scale Retinex 1')

    ssr2 = s_s_r(src)
    plt_save(ssr2, 'Single Scale Retinex 2')

    # 多尺度Retinex
    msr1 = multi_scale_retinex(src, [15, 80, 250])
    msr1[msr1 < 0] = 0
    plt_save(msr1, 'Multi Scale Retinex 1')

    msr2 = m_s_r(src, sigma_list=[15, 80, 250])
    plt_save(msr2, 'Multi Scale Retinex 2')

    msrcr1 = m_s_r_c_r(src, sigma_list=[15, 80, 250])
    plt_save(msrcr1, 'Multi Scale Retinex With Color Restoration 1')

    # 自动白平衡 AWB
    awb = automatic_white_balance(src)
    plt_save(awb, 'Automatic White Balance')

    # 自动色彩均衡 ACE
    balance_img4 = automatic_color_equalization(src)
    plt_save(balance_img4, 'Automatic Color Equalization')