예제 #1
0
def erosion(img, show=True):
	kernel = np.ones((3,3), np.uint8)

	erosion_img = cv2.erode(img, kernel, iterations=1)
	if show:
		show_pictures([img, erosion_img], titles=['Original', 'Erosion'])
	return erosion_img
예제 #2
0
def dilation(img, show=True):
	kernel = np.ones((3,3), np.uint8)

	dilation_img = cv2.dilate(img, kernel, iterations=1)
	if show:
		show_pictures([img, dilation_img], titles=['original', 'Dilation'])
	return dilation_img
예제 #3
0
def contour():
	img = cv2.imread('../images/cctv_example1.jpg')
	img1 = img.copy()
	img2 = img.copy()
	img3 = img.copy()
	img4 = img.copy()
	gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	ret, thresh = cv2.threshold(gray_img, 127, 255, 0)
	th, contours_tree, hierarchy1 = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
	th, contours_external, hierarchy2 = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
	th, contours_list, hierarchy3 = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
	th, contours_ccomp, hierarchy4 = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)

	print(type(contours_tree), type(hierarchy1))
	print('Tree : ', len(contours_tree), len(hierarchy1))
	print('External : ', len(contours_external), len(hierarchy2))
	print('List : ', len(contours_list), len(hierarchy3))
	print('ccomp : ', len(contours_ccomp), len(hierarchy4))

	cv2.drawContours(img1, contours_tree, -1, (0, 255, 0), 1)
	cv2.drawContours(img2, contours_external, -1, (0, 255, 0), 1)
	cv2.drawContours(img3, contours_list, -1, (0, 255, 0), 1)
	cv2.drawContours(img4, contours_ccomp, -1, (0, 255, 0), 1)
	show_pictures(plist=[gray_img, thresh, img1, img2, img3, img4], 
		titles=['Gray Img', 'Thresh', 'Contour Tree', 'Contour External', 'Contour List', 'Contour ccomp'])
예제 #4
0
def threshing():
	img = cv2.imread('cctv_example1.jpg', 0)

	ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
	th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
	th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
	ret2, th4 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
	blur = cv2.GaussianBlur(img, (5, 5), 0)
	ret3, th5 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
	# titles = ['original', 'Global Threshing (v=127)', 'Adaptive Mean', 'Adaptive Gaussian']
	# plist = [img, th1, th2, th3]
	plist = [img, th4, blur, th5]
	titles = ['original', 'otsu', 'G blur', 'G blur otsu']

	show_pictures(plist, titles=titles)
예제 #5
0
def closing(img, show=True, iterations=1):
	"""
	Closing 기법 : Dilation -> Erosion 적용
	물체 내부의 노이즈가 많을때 적합
	"""
	kernel = np.ones((5,5), np.uint8)
	
	closing_list = []
	closing_list.append(img)
	titles = []
	titles.append('Orignal')

	for i in range(1, iterations+1):
		closing_list.append(cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=i))
		titles.append('Closing Iter='+str(i))

	if show:
		show_pictures(closing_list, titles=titles)
	
	return closing_list
예제 #6
0
def opening(img, show=True, iterations=1):
	"""
	Opening 기법 : Erosion -> Dilation 적용
	물체 외부의 노이즈가 많을때 적합
	"""
	kernel = np.ones((5,5), np.uint8)
	
	opening_list = []
	opening_list.append(img)
	titles = []
	titles.append('Orignal')

	for i in range(1, iterations+1):
		opening_list.append(cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=i))
		titles.append('Opening Iter='+str(i))

	if show:
		show_pictures(opening_list, titles=titles)
	
	return opening_list
예제 #7
0
def bit_operation(xpos, ypos):
    img_back = cv2.imread('cute.png')
    img_logo = cv2.imread('opencv-logo.png')

    # cv2.imshow('window', img_back)
    # cv2.imshow('window2', img_logo)

    row, col, ch = img_logo.shape
    roi = img_back[xpos:xpos + row, ypos:col + ypos]

    logo2gray = cv2.cvtColor(img_logo, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(logo2gray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    # cv2.imshow('window1', mask)
    # cv2.imshow('window2', mask_inv)
    # cv2.moveWindow('window2', 200,0)

    img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
    img_fg = cv2.bitwise_and(img_logo, img_logo, mask=mask)

    # cv2.imshow('window3', img_bg)
    # cv2.moveWindow('window3', 400,0)
    # cv2.imshow('window4', img_fg)
    # cv2.moveWindow('window4', 600,0)

    dst = cv2.add(img_bg, img_fg)
    # cv2.imshow('window5', dst)
    # cv2.moveWindow('window5', 800, 0)

    img_back[xpos:xpos + row, ypos:col + ypos] = dst

    # cv2.imshow('window6', img_back)
    # cv2.moveWindow('window6', 1000, 0)

    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    plist = [mask, mask_inv, img_bg, img_fg, dst]
    show_pictures(plist,
                  titles=['mask', 'mask_inv', 'img_bg', 'img_fg', 'dst'])
예제 #8
0
import numpy as np
import cv2
from my_cv_lib import show_pictures



if __name__ == '__main__':
	img = cv2.imread('../images/bolt.png')
	print(img.shape)
	img = np.float32(img) / 255.0

	# Calculate gradient
	gx = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=1)
	gy = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=1)

	# Calculate magnitude and direction ( in degrees )
	mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)

	"""
	The gradient image removed a lot of non-essential information ( e.g. constant colored background ), 
	but highlighted outlines. 
	In other words, you can look at the gradient image and still easily say there is a person in the picture.
	"""

	show_pictures([gx, gy, mag, img], 
		titles=['Absolute value of gx', 'Absolute value of gy', 'Magnitude', 'Original'])