예제 #1
0
def adaptive_thresholding():
    '''
    Instead of using one set value for image thresholding we can use an algorithm that will adjust threshold value for small regions of image
    This can help us when there are different lighting conditions in an image
    '''

    image_src = cv2.imread("img/adaptive2.png", cv2.IMREAD_GRAYSCALE)

    threshold = 127

    ret, thresh1 = cv2.threshold(image_src, threshold, 255, cv2.THRESH_BINARY)
    '''
    For every pixel in the input image look in the neighbouthood of size 11x11 pixels
    Calculate mean value of pixels in the block and subtract constant 2 from that mean
    Set the pixel value to 255 if it's value in original image is greater then subtracted mean
    Othervise set it to 0
    '''
    thresh2 = cv2.adaptiveThreshold(image_src, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY, 11, 2)
    '''
    Similar approach
    For pixels in the block calculate weighted sum, where weights are Gaussian distributed
    Pixel in the center of the block has the largest weight, pixels on the edge of the block have the smallest weight
    Devide the weighted sum by pixel count and use that value as threshold
    '''
    thresh3 = cv2.adaptiveThreshold(image_src, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                cv2.THRESH_BINARY, 11, 2)

    plot_images_list([[image_src, thresh1], [thresh2, thresh3]]),
예제 #2
0
def global_thresholding():
    #important is to work with grayscale images
    image_src = cv2.imread("img/opencv.png", cv2.IMREAD_GRAYSCALE)
    (
        image_src_h,
        image_src_w,
    ) = image_src.shape

    threshold = 20

    #every pixel with value >= threshold set to 255, all other to 0
    ret, thresh1 = cv2.threshold(image_src, threshold, 255, cv2.THRESH_BINARY)

    #every pixel with value >= threshold set to 0, all other to 255
    ret, thresh2 = cv2.threshold(image_src, threshold, 255,
                                 cv2.THRESH_BINARY_INV)

    #every pixel with value >= threshold set to 255, all other stay the same
    ret, thresh3 = cv2.threshold(image_src, threshold, 255, cv2.THRESH_TRUNC)

    #every pixel with value >= threshold stays the same, all other are 0
    ret, thresh4 = cv2.threshold(image_src, threshold, 255, cv2.THRESH_TOZERO)

    #every pixel with value >= threshold set to 0, all other stay the same
    ret, thresh5 = cv2.threshold(image_src, threshold, 255,
                                 cv2.THRESH_TOZERO_INV)

    plot_images_list([[image_src, thresh1, thresh2],
                      [thresh3, thresh4, thresh5]])
예제 #3
0
def test_morphology_operations(image):
    ret, image_threshold = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)

    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))

    erosion = cv2.erode(image_threshold, kernel, iterations = 1)
    dilation = cv2.dilate(image_threshold, kernel, iterations = 1)
    opening = cv2.morphologyEx(image_threshold, cv2.MORPH_OPEN, kernel)
    closing = cv2.morphologyEx(image_threshold, cv2.MORPH_CLOSE, kernel)

    plot_images_list([[image, image_threshold], [erosion, dilation], [opening, closing]])
예제 #4
0
def check_cube_rgb_channels():

    '''
    In the B channel blues are similar to whites
    In the G channel greens, yellows and whites are similar
    In the R channel reds, oranges, and yellows are similar
    Darker image has lower intensities accross all channels when compared to lighner image
    '''

    plot_images_list([light_bgr, dark_bgr])

    light_channels = cv2.split(light_bgr)
    dark_channels = cv2.split(dark_bgr)

    plot_images_list([[light_bgr]+light_channels, [dark_bgr]+dark_channels])
예제 #5
0
def test_image_padding(image, border=50):
    '''
    Convolutions with 'same' mode are implemented using image padding (adding values on image borders).
    The chosen padding method affects output results near the image border.
    OpenCV uses cv2.BORDER_REFLECT_101 by default in cv2.filter2D.

    https://docs.opencv.org/master/d2/de8/group__core__array.html#ga2ac1049c2c3dd25c2b41bffe17658a36
    https://numpy.org/doc/stable/reference/generated/numpy.pad.html
    '''
    image_pad_constant = cv2.copyMakeBorder(image, border, border, border, border, cv2.BORDER_CONSTANT, 0)
    image_pad_replicate = cv2.copyMakeBorder(image, border, border, border, border, cv2.BORDER_REPLICATE)
    image_pad_reflect = cv2.copyMakeBorder(image, border, border, border, border, cv2.BORDER_REFLECT)
    image_pad_reflect101 = cv2.copyMakeBorder(image, border, border, border, border, cv2.BORDER_REFLECT_101) # default
    image_pad_wrap = cv2.copyMakeBorder(image, border, border, border, border, cv2.BORDER_WRAP)

    plot_images_list([[image, image_pad_constant, image_pad_replicate], [image_pad_reflect, image_pad_reflect101, image_pad_wrap]])
예제 #6
0
def check_cube_lab_channels():
    '''
    L - Lightness (intensity)
    A - Color component with range Green to Magenta
    B - Color component with range Blue to Yellow
    We have a dedicated channel for intensity of color in this colorspace
    '''

    #convert images from bgr to lab color space

    light_lab = cv2.cvtColor(light_bgr, cv2.COLOR_BGR2LAB)
    dark_lab = cv2.cvtColor(dark_bgr, cv2.COLOR_BGR2LAB)

    light_channels = cv2.split(light_lab)
    dark_channels = cv2.split(dark_lab)

    plot_images_list([[light_bgr]+light_channels, [dark_bgr]+dark_channels])

    '''
def image_translate(image_path):
    '''
    We will translate image from origin (0, 0) one quarted of height and width in the positive axes direction
    Shape of the resulting image is unchanged
    '''

    image_src = cv2.imread(image_path)
    (image_src_h, image_src_w, _) = image_src.shape

    translate_height = image_src_h / 4
    translate_width = image_src_h / 4

    translation_transform = identity_transform.copy()
    translation_transform[0, 2] = translate_width
    translation_transform[1, 2] = translate_height

    #transofrm the src image using the transformation matrix T and set the size of output image
    img_translation = cv2.warpAffine(image_src, translation_transform,
                                     (image_src_w, image_src_h))
    img_translation2 = cv2.warpAffine(image_src, translation_transform,
                                      (image_src_w * 2, image_src_h * 2))

    plot_images_list([[image_src], [img_translation], [img_translation2]])
예제 #8
0
def check_cube_hsv_channels():
    '''
    H – Hue ( Dominant Wavelength ).
    S – Saturation ( how vivid is the color)
    V – Value ( Intensity - lightness or darkness of color )
    We have a dedicated channel for intensity of color in this colorspace
    '''

    #convert images from bgr to lab color space

    light_hsv = cv2.cvtColor(light_bgr, cv2.COLOR_BGR2HSV)
    dark_hsv = cv2.cvtColor(dark_bgr, cv2.COLOR_BGR2HSV)

    light_channels = cv2.split(light_hsv)
    dark_channels = cv2.split(dark_hsv)

    plot_images_list([[light_bgr]+light_channels, [dark_bgr]+dark_channels])

    '''
    We can see that the main difference between light and dark image is in the Value channel
    Hue channel contains the information on the dominant wavelenght of the color so we should look to it to segment the images
    What happened to red? Hue is a angle of circle with values from 0-360 and red colors are in ranges [0, 60] and [300,360]
    '''