def LK_Optical_Flow(image, p0, mask=None): lk_params = dict(winSize=(50, 50), maxLevel=5, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) image_old = image image_old = cv2.add(image_old, np.zeros(np.shape(image_old), dtype=np.uint8), mask=mask) linemask = np.zeros_like(image) while 1: #image=cv2.add(image,np.zeros(np.shape(image),dtype=np.uint8),mask=mask) p1, st, err = cv2.calcOpticalFlowPyrLK(image_old, image, p0, None, **lk_params) try: good_new = p1[st == 1] good_old = p0[st == 1] except TypeError: warn("Lose track") return for i, (new, old) in enumerate(zip(good_new, good_old)): a, b = new.ravel() c, d = old.ravel() linemask = cv2.line(linemask, (a, b), (c, d), color[i].tolist(), 2) image = cv2.circle(image, (a, b), 5, color[i].tolist(), -1) img = cv2.add(image, linemask) k = cv2.waitKey(30) & 0xff if k == 27: break image_old = image.copy() p0 = good_new.reshape(-1, 1, 2) image = (yield img)
def random_bright_image(self, image, brightness_range): """ Randomly brighten the given image. The intent is to allow a model to generalize across images trained on different lighting levels. Parameters ---------- image : ndim np.array image to be brightened brightness_range : tuple of ints specifies the range from within the brightness value (in pixels) should be chosen Returns ------- brightened image as np.array """ hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) h, s, v = cv2.split(hsv) start_range, end_range = brightness_range rand_val = random.randint(start_range, end_range) v = cv2.add(v, rand_val) v[v > 255] = 255 v[v < 0] = 0 final_hsv = cv2.merge((h, s, v)) image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2RGB) return np.copy(image)
def fit_colors2(): for c in range(0,6): cara=ncara[c] for npunto in range(1,9): punto=cv2.imread('Fotos_caras\Puntos\cara_'+ncara[c]+'\C_'+ncara[c]+'_P_'+str(npunto)+'.jpg') #cv2.imshow('Imagen de entrada',punto) frameHSV = cv2.cvtColor(punto, cv2.COLOR_BGR2HSV) maskRed1 = cv2.inRange(frameHSV, R2_rojo[0], R2_rojo[1]) maskRed2 = cv2.inRange(frameHSV, R2_rojo[2], R2_rojo[3]) maskRed = cv2.add(maskRed1, maskRed2) #Como se puede ver en la imagen de HSV, el color rojo se parte maskBlue = cv2.inRange(frameHSV, R_azul[0], R_azul[1]) maskOrange = cv2.inRange(frameHSV, R_naranja[0], R_naranja[1]) maskGreen = cv2.inRange(frameHSV, R_verde[0], R_verde[1]) maskYellow = cv2.inRange(frameHSV, R_amarillo[0], R_amarillo[1]) cv2.imshow('Azul',maskBlue) cv2.imshow('Verde',maskGreen) cv2.imshow('Amarillo',maskYellow) cv2.imshow('Rojo',maskRed) cv2.imshow('Naranja',maskOrange) c_azul=contorno(maskBlue,Azul) c_verde=contorno(maskGreen,Verde) c_rojo=contorno(maskRed,Rojo) c_naranja=contorno(maskOrange,Naranja) c_amarillo=contorno(maskYellow,Amarillo) color_cuadro=[c_azul,c_naranja,c_rojo,c_verde,c_amarillo] colores=[Azul,Naranja,Rojo,Verde,Amarillo] for t in range(len(color_cuadro)): if color_cuadro[t]==True: caras.modificar_caras(cara,npunto,colores[t])
def addTwoImgs(self, img1_RGBA, face_param): #将贴纸贴到图片上 self.img = cv2.imread(self.path) self.img = cv2.resize(self.img, (int(face_param[2] / 1), int(face_param[2] / 1)), interpolation=cv2.INTER_CUBIC) try: self.rows, self.cols = self.img.shape[:2] except: NoteLabel.config(text='Fail in loading sticker!') self.getStickerPosition(face_param) if self.x1 >= 0 and self.x2 <= img1_RGBA.shape[ 1] and self.y1 >= 0 and self.y2 <= img1_RGBA.shape[0]: #制作掩膜 roi = img1_RGBA[self.y1:self.y2, self.x1:self.x2] sticker_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(sticker_gray, 10, 255, cv2.THRESH_BINARY) del ret #没什么意义,不想出现黄色报错而已 mask_inv = cv2.bitwise_not(mask) img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGBA) dst = cv2.add(img1_bg, self.img) img1_RGBA[self.y1:self.y2, self.x1:self.x2] = dst return True, img1_RGBA else: NoteLabel.config(text="No enough space for stickers!") return False, None
def __image_preprocessing(self, im): #im-->PIL image obtained from pdf #save image as jpeg file in this notebook im.save(directory + '\\Text\\TextD\\image.jpg', 'JPEG') ''' Reading the saved image using opencv imread function. imread reads images in BGR format so to convert it into RGB, by using cv2.COLOR_BGR2RGB parameter. Image is stored in variable img ''' img = cv2.imread(directory + '\\Text\\TextD\\image.jpg', cv2.COLOR_BGR2RGB) #print(img.shape) # converting the img into grayscale mode, i.e., reducing from 3 channels to 2 img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) #plt.imshow(img) #plt.show() ''' Using Otsu binarization threshold to automatically choose a threshold for thresholding the image such that all pixel valus below it are 0 and rest to the maximim ''' img = cv2.threshold(img, 100, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)[1] #print(img.shape) #cv2.imwrite(r"./preprocess/img_threshold.png",img) #plt.imshow(img) #plt.show() ''' Morphological Filters are applied to remove noise and smoothen the image. A struturing element of dimension (4,4) is chosen fro applying the morphological transfromations ''' kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4)) #print(img.shape) ''' considering tophat and balckhat morphological filter to join broken parts and smoothening the edge ''' tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel) #blackhat=cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel) #print(img.shape) add = cv2.add(img, tophat) #sub=cv2.subtract(add,blackhat) #print(add.shape) #print(sub.shape) #t=threshold_local(sub,29, offset=35, method="gaussian", mode="mirror") #thresh=(sub>t).astype("uint")*255 #thresh_=cv2.bitwise_not(thresh) #print(thresh_.shape) #thresh_=np.moveaxis(thresh_, 0, 2) #plt.imshow(add) #plt.show() if add.shape != (2200, 1700): add = cv2.resize(add, (1700, 2200), interpolation=cv2.INTER_AREA) #plt.imshow(add) #plt.show() return add
def top_hat_demo(image):#顶帽法=原始图像-开运算图像,得到噪声图像 gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY) kernel = cv.getStructuringElement(cv.MORPH_RECT,(5,5)) dst = cv.morphologyEx(gray,cv.MORPH_TOPHAT,kernel) cimage = np.array(gray.shape,np.uint8) cimage = 100 dst = cv.add(dst,cimage) cv.imshow("result",dst)
def black_hat_demo(image):#黑帽法=闭运算图像-原始图像,得到图像内部小孔或景色中的小黑点 gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY) kernel = cv.getStructuringElement(cv.MORPH_RECT,(5,5)) dst = cv.morphologyEx(gray,cv.MORPH_BLACKHAT,kernel) cimage = np.array(gray.shape,np.uint8) cimage = 100 dst = cv.add(dst,cimage) cv.imshow("result",dst)
def _merge_pics(original_frame, solution_warped): ret, warp = cv2.threshold(solution_warped, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(warp) background = cv2.bitwise_and(original_frame, original_frame, mask=mask_inv) foreground = cv2.cvtColor(solution_warped, cv2.COLOR_GRAY2BGR) foreground[solution_warped > 0] = (255, 55, 0) dst = cv2.add(background, foreground) return dst
def overlay_images(img1, img2, mask): # img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY) # ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) img1_bg = cv2.bitwise_and(img1, img1, mask=mask_inv) img2_fg = cv2.bitwise_and(img2, img2, mask=mask) dst = cv2.add(img1_bg, img2_fg) return dst
def decrypt(share_1: np.ndarray, share_2: np.ndarray, denoise: bool = True, fix_proportions: bool = True): result: np.ndarray = cv2.add(share_1, share_2) if denoise: _denoise_decrypted_image(result) if fix_proportions: result = _fix_decrypted_image_proportion(result) return result
def find_junctions(skeleton): """Give the skeleton segments of a binary image. Parameters ---------- skeleton: numpy.ndarray A binary image of the skeleton, with '0' representing the backgroud, and '1' representing the object. Returns ---------- junction_image: numpy.ndarray A binary image with the found junction points in '255' """ # kernels for hit and miss # k1, k2, k3 and k4 are used to identify intersections k1 = np.array([[-1, 1, -1], [1, 1, 1], [-1, -1, -1]], dtype=int) k2 = np.array([[1, -1, 1], [-1, 1, -1], [1, -1, -1]], dtype=int) k3 = np.array([[1, -1, 1], [0, 1, 0], [0, 1, 0]], dtype=int) k4 = np.array([[-1, 1, -1], [1, 1, 0], [-1, 0, 1]], dtype=int) # k5 is used for identifying the corners k5 = np.array([[-1, -1, 0, 0, 0], [-1, -1, 1, 0, 0], [-1, -1, 0, 1, 0], [-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1]], dtype=int) skeleton = (skeleton > 0).astype(np.uint8) * 255 # dst accumulates all matches dst = np.zeros(skeleton.shape, dtype=np.uint8) # Do hit & miss for all possible directions (0,90,180,270) for _ in range(4): dst = cv2.add(dst, cv2.morphologyEx(skeleton, cv2.MORPH_HITMISS, k1)) dst = cv2.add(dst, cv2.morphologyEx(skeleton, cv2.MORPH_HITMISS, k2)) dst = cv2.add(dst, cv2.morphologyEx(skeleton, cv2.MORPH_HITMISS, k3)) dst = cv2.add(dst, cv2.morphologyEx(skeleton, cv2.MORPH_HITMISS, k4)) dst = cv2.add(dst, cv2.morphologyEx(skeleton, cv2.MORPH_HITMISS, k5)) # Rotate the kernels k1 = np.rot90(k1) k2 = np.rot90(k2) k3 = np.rot90(k3) k4 = np.rot90(k4) k5 = np.rot90(k5) return dst
def maximizeContrast(imgGrayscale): height, width = imgGrayscale.shape imgTopHat = np.zeros((height, width, 1), np.uint8) imgBlackHat = np.zeros((height, width, 1), np.uint8) structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement) imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement) imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat) imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat) return imgGrayscalePlusTopHatMinusBlackHat
def circle_mask( img: np.ndarray, color, size: int = 0, antialiasing: float = 2, ): if img.shape[0] != img.shape[1]: raise Exception( f"Image is non-square ({img.shape[0]}x{img.shape[1]}), " + "cannot apply circle mask.") size = size if size > 0 else img.shape[0] aa_size = int(antialiasing * size) img = cv2.resize(img, (aa_size, aa_size)) # convert to 4-channel image (including alpha) if img.shape[2] < 4: img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA) # create mask to extract face with mask = np.zeros(img.shape, img.dtype) cv2.circle( img=mask, center=(int(mask.shape[0] / 2), int(mask.shape[1] / 2)), radius=int(mask.shape[0] / 2), color=(255, 255, 255, 255), # thickness -1: fill inner circle thickness=-1, ) img = cv2.bitwise_and(img, mask) # create background and cut out circle background = np.full(img.shape, color, img.dtype) cv2.circle( img=background, center=(int(img.shape[0] / 2), int(img.shape[1] / 2)), radius=int(img.shape[0] / 2), color=(0, 0, 0), thickness=-1, ) # add background to face img = cv2.add(img, background) img = cv2.resize(img, (size, size)) return img
def optical_flow(imgs, dst='./capture_folder'): for idx, file in enumerate(imgs): copyfile(file, dst + '/' + str(idx) + '.bmp') feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7) lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) cap = cv2.VideoCapture(dst + "/%01d.bmp") color = np.random.randint(0, 255, (100, 3)) # Take first frame and find corners in it ret, old_frame = cap.read() old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params) # Create a mask image for drawing purposes mask = np.zeros_like(old_frame) while (1): ret, frame = cap.read() if frame is None: break frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # calculate optical flow p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) # Select good points good_new = p1[st == 1] good_old = p0[st == 1] # draw the tracks for i, (new, old) in enumerate(zip(good_new, good_old)): a, b = new.ravel() c, d = old.ravel() mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2) frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1) img = cv2.add(frame, mask) cv2.imshow('frame', img) #k = cv2.waitKey(30) & 0xff #if k == 27: # break # Now update the previous frame and previous points old_gray = frame_gray.copy() p0 = good_new.reshape(-1, 1, 2) cv2.destroyAllWindows() cap.release() return mask
def draw_emoji(frame, emoji_index, emoji_pos): real_emoji, inverse_mask = EMOJI_DICT.get(emoji_index) x, y, r = emoji_pos top = y - r bottom = y + r left = x - r rigth = x + r emoji = cv2.resize(real_emoji, (rigth - left, bottom - top)) inverse_mask = cv2.resize(inverse_mask, (rigth - left, bottom - top)) overlap_area = frame[top:bottom, left:rigth] overlap_area = cv2.bitwise_and(overlap_area, overlap_area, mask=inverse_mask) overlap_area = cv2.add(overlap_area, emoji) frame[top:bottom, left:rigth] = overlap_area return frame
def img_calc(img1, img2, method): if method == "add": return cv.add(img1, img2) elif method == "sub": return cv.subtract(img1, img2) elif method == "multi": return cv.multiply(img1, img2) elif method == "divide": return cv.divide(img1, img2) elif method == "and": return cv.bitwise_and(img1, img2) elif method == "or": return cv.bitwise_or(img1, img2) elif method == "not": return cv.bitwise_not(img1, img2) else: return False
def _overlap_shares(share_1: np.ndarray, share_2: np.ndarray, value: int): background: np.ndarray = np.full( shape=[share_1.shape[0] * 2, share_1.shape[1]], dtype=np.uint8, fill_value=255) background[share_1.shape[0] - value:share_1.shape[0] * 2 - value, 0:share_2.shape[1]] = share_2 background[0:share_1.shape[0], 0:share_1.shape[1]] = share_1 background_part = background[share_1.shape[0] - value:share_1.shape[0] * 2 - value, 0:share_2.shape[1]] added_shares = cv2.add(background_part, share_2) background[share_1.shape[0] - value:share_1.shape[0] * 2 - value, 0:share_2.shape[1]] = added_shares return background
def combination(self, bag_img, bbox_img, bbox_mask, bag_json_file, emblem_json): bag_img = cv.cvtColor(bag_img, cv.COLOR_RGB2RGBA) x, widht, y, height = self.return_bbox(bag_json_file) bbag_img = self.bbox(bag_img, bag_json_file) # 합성 roi, axis = self.roi_setting(bbag_img, bbox_img) bbox_inv = cv.bitwise_not(bbox_mask) fg = cv.bitwise_and(bbox_img, bbox_img, mask=bbox_mask) bg = cv.bitwise_and(roi, roi, mask=bbox_inv) combination_img = cv.add(fg, bg) bbag_img[axis[0]:axis[2], axis[1]:axis[3]] = combination_img bag_img[int(x):int(widht), int(y):int(height)] = bbag_img # json의 더해주어야 할 값 계산 plus_x = int(x) + axis[0] plus_y = int(y) + axis[1] new_json = self.modify_json(emblem_json, plus_x, plus_y, bag_json_file) return bag_img, new_json
def addTwoImgs(self, img1_RGBA, face_param): #将贴纸贴到图片上 self.img = cv2.imread(self.path) self.rows, self.cols = self.img.shape[:2] self.getStickerPosition(face_param) if self.x1 >= 0 and self.x2 >= 0 and self.y1 >= 0 and self.y2 >= 0: #制作掩膜 roi = img1_RGBA[self.y1:self.y2, self.x1:self.x2] sticker_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(sticker_gray, 10, 255, cv2.THRESH_BINARY) del ret #没什么意义,不想出现黄色报错而已 mask_inv = cv2.bitwise_not(mask) img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGBA) dst = cv2.add(img1_bg, self.img) img1_RGBA[self.y1:self.y2, self.x1:self.x2] = dst return True, img1_RGBA else: print("No enough space for stickers!") return False, None
def cleanImage(image, stage=0): V = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # applying topHat/blackHat operations topHat = cv2.morphologyEx(V, cv2.MORPH_TOPHAT, kernel) blackHat = cv2.morphologyEx(V, cv2.MORPH_BLACKHAT, kernel) # add and subtract between morphological operations add = cv2.add(V, topHat) subtract = cv2.subtract(add, blackHat) if (stage == 1): return subtract T = threshold_local(subtract, 29, offset=35, method="gaussian", mode="mirror") thresh = (subtract > T).astype("uint8") * 255 if (stage == 2): return thresh # invert image thresh = cv2.bitwise_not(thresh) return thresh
def foo(image): blank = np.zeros( (100, 100, 3), dtype='uint8', ) blank[0:100, 0:100] = 255, 255, 255 # I want to put logo on top-left corner, So I create a ROI rows, cols, channels = image.shape roi = blank[0:rows, 0:cols] # Now create a mask of logo and create its inverse mask also img2gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) # Now black-out the area of logo in ROI img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) # Take only region of logo from logo image. img2_fg = cv2.bitwise_and(image, image, mask=mask) result_image = cv2.add(img1_bg, img2_fg) return result_image
from cv2 import cv2 img1=cv2.imread('messi5.jpg') img1=cv2.resize(img1,(512,512)) img2=cv2.imread('WindowsLogo.jpg') img2=cv2.resize(img2,(512,512)) add_img=cv2.add(img1,img2) cv2.imshow('image',add_img) add_weighted=cv2.addWeighted(img1,.3,img2,.7,0) cv2.imshow('image2',add_weighted) cv2.waitKey(0) cv2.destroyAllWindows()
def rmReflection(self, bhImage, gsInvertedImg): img = cv2.add(gsInvertedImg, bhImage) return cv2.equalizeHist(cv2.medianBlur(img, 5))
def imagePyramidImg(self): imageFirst = Image.open(self.filename) imageLast = imageFirst.resize((450, 450), Image.ANTIALIAS) imageLast.save('img/dist/temp1.jpg') imageFirst2 = Image.open(self.filename2) imageLast2 = imageFirst2.resize((450, 450), Image.ANTIALIAS) imageLast2.save('img/dist/temp2.jpg') A = cv2.imread('img/dist/temp1.jpg') B = cv2.imread('img/dist/temp2.jpg') G = A.copy() gpA = [G] for i in range(6): G = cv2.pyrDown(G) gpA.append(G) G = B.copy() gpB = [G] for i in range(6): G = cv2.pyrDown(G) gpB.append(G) lpA = [gpA[5]] for i in range(6, 0, -1): GE = cv2.pyrUp(gpA[i]) GE = cv2.resize(GE, gpA[i - 1].shape[-2::-1]) L = cv2.subtract(gpA[i - 1], GE) lpA.append(L) lpB = [gpB[5]] for i in range(6, 0, -1): GE = cv2.pyrUp(gpB[i]) GE = cv2.resize(GE, gpB[i - 1].shape[-2::-1]) L = cv2.subtract(gpB[i - 1], GE) lpB.append(L) LS = [] lpAc = [] for i in range(len(lpA)): b = cv2.resize(lpA[i], lpB[i].shape[-2::-1]) lpAc.append(b) j = 0 for i in zip(lpAc, lpB): la, lb = i rows, cols, dpt = la.shape ls = np.hstack((la[:, 0:cols // 2], lb[:, cols // 2:])) j = j + 1 LS.append(ls) ls_ = LS[0] for i in range(1, 6): ls_ = cv2.pyrUp(ls_) ls_ = cv2.resize(ls_, LS[i].shape[-2::-1]) ls_ = cv2.add(ls_, LS[i]) B = cv2.resize(B, A.shape[-2::-1]) real = np.hstack((A[:, :cols // 2], B[:, cols // 2:])) cv2.imwrite('img/dist/pyramid.jpg', ls_)
# library imports import cv2.cv2 as cv # load an image b1 = cv.imread('../../images/moto.jpg') b2 = cv.imread('../../images/dolphin.jpg') # load an image as a single channel grayscale if b1.shape[:2] == b2.shape[:2]: sum_img = cv.add(b1, b2) # add images together cv.imshow('Summed Images', sum_img) cv.waitKey(0) cv.destroyAllWindows() scaled_img = cv.add(b1, 200) cv.imshow('Scalar Addition on Dolphin Image', scaled_img) cv.waitKey(0) cv.destroyAllWindows()
if cv2.waitKey(25) & 0xFF == ord('Q'): break # Break the loop else: break for i in range(0,420): foreground = img1_array[i] background = cv2.imread("Blending/girl.jpg") alpha = cv2.imread("Blending/mask.png") # Convert uint8 to float foreground = foreground.astype(float) background = background.astype(float) # Normalize the alpha mask to keep intensity between 0 and 1 alpha = alpha.astype(float)/255 # Multiply the foreground with the alpha matte foreground = cv2.multiply(alpha, foreground) # Multiply the background with ( 1 - alpha ) background = cv2.multiply(1.0 - alpha, background) # Add the masked foreground and background. outImage = cv2.add(foreground, background) # Display image cv2.imshow("outImg", outImage/255) print(i) if cv2.waitKey(25) & 0xFF == ord('Q'): break cap.release() # Closes all the frames cv2.destroyAllWindows()
# coding: utf-8 from cv2 import cv2 import numpy as np import matplotlib.pyplot as plt img_1 = cv2.imread(r'pictures\opencv.png') rows, cols = img_1.shape[0:2] img_2 = cv2.imread(r'pictures\cat.jpg') roi = img_2[0:rows, 0:cols] img_1_gray = cv2.cvtColor(img_1, cv2.COLOR_BGR2GRAY) ret, img_1_thres = cv2.threshold(img_1_gray, 200, 255, cv2.THRESH_BINARY_INV) img_1_frontground = cv2.add(img_1, img_1, mask=img_1_thres) print(img_1.shape, roi.shape) img_1_thres_inv = cv2.bitwise_not(img_1_thres) # 取反 roi_background = cv2.add(roi, roi, mask=img_1_thres_inv) img_add = cv2.addWeighted(img_1_frontground, 0.6, roi_background, 1, 0) img_2[0:rows, 0:cols] = img_add cv2.imshow("gray", img_1_gray) cv2.imshow("thres", img_1_thres) cv2.imshow("fg", img_1_frontground) cv2.imshow("tinv", img_1_thres_inv) cv2.imshow("roi_bg", roi_background) cv2.imshow("img_add", img_add) cv2.imshow("img_2", img_2) cv2.waitKey(0) cv2.destroyAllWindows()
for i in range(5, 0, -1): orange_extended = cv2.pyrUp(gp_orange[i]) laplacian = cv2.subtract(gp_orange[i - 1], orange_extended) lp_orange.append(laplacian) #cv2.imshow(str(i), orange_copy) # Now add left and right halves of images in each level apple_orange_pyramid = [] n = 0 for apple_lap, orange_lap in zip(lp_apple, lp_orange): n += 1 cols, rows, ch = apple_lap.shape laplacian = np.hstack( (apple_lap[:, 0:int(cols / 2)], orange_lap[:, int(cols / 2):])) apple_orange_pyramid.append(laplacian) # Reconstructing the image apple_orange_reconstruct = apple_orange_pyramid[0] for i in range(1, 6): apple_orange_reconstruct = cv2.pyrUp(apple_orange_reconstruct) apple_orange_reconstruct = cv2.add(apple_orange_pyramid[i], apple_orange_reconstruct) cv2.imshow('Apple', apple) cv2.imshow('Orange', orange) cv2.imshow('aaple_orange', apple_orange) cv2.imshow('apple_orange_reconstruct', apple_orange_reconstruct) cv2.waitKey(0) cv2.destroyAllWindows()
def of_demo(): pixels_cut = 50 pixels_cut_left = 100 cap = cv2.VideoCapture('rally.avi') # cap = cv2.VideoCapture('input.mp4') fourcc = cv2.VideoWriter_fourcc(*'XVID') # params for ShiTomasi corner detection feature_params = dict(maxCorners=1000, qualityLevel=0.2, minDistance=7, blockSize=7) # Parameters for lucas kanade optical flow lk_params = dict(winSize=(35, 35), maxLevel=4, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors color = np.random.randint(0, 255, (1000, 3)) # Take first frame and find corners in it ret, old_frame = cap.read() old_frame = old_frame[:-pixels_cut, pixels_cut_left:, :] out = cv2.VideoWriter('output2.avi', fourcc, 30.0, (old_frame.shape[1], old_frame.shape[0])) old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params) # Create a mask image for drawing purposes mask = np.zeros_like(old_frame) frno = 0 restart = False while (1): frno += 1 ret, frame = cap.read() frame = frame[:-pixels_cut, pixels_cut_left:, :] if ret and frno < 70: frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if restart: p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params) restart = False # calculate optical flow p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) successful = (st == 1) if np.sum(successful) == 0: restart = True # Select good points good_new = p1[successful] good_old = p0[successful] # draw the tracks count_of_moved = 0 for i, (new, old) in enumerate(zip(good_new, good_old)): a, b = new.ravel() c, d = old.ravel() velocity = np.sqrt((a - c)**2 + (b - d)**2) if velocity > 1: mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2) frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1) count_of_moved += 1 # mask_of_mask = cv2.inRange(mask, (0, 0, 0), (3, 3, 3))/255 # frame = frame*(np.expand_dims(mask_of_mask.astype(np.uint8),axis=2)) img = cv2.add(frame, mask) mask = np.round(mask.astype(np.float) / 1.1).astype(np.uint8) cv2.imshow('frame', img) k = cv2.waitKey(30) & 0xff if k == 27: break # Now update the previous frame and previous points old_gray = frame_gray.copy() p0 = good_new.reshape(-1, 1, 2) out.write(img) else: break cv2.destroyAllWindows() cap.release() out.release()
lpB.append(L) # Now add left and right halves of images in each level LS = [] ii = 0 for la,lb in zip(lpA,lpB): rows,cols,dpt = la.shape ls = np.hstack((la[:,0:cols/2], lb[:,cols/2:])) # cv2.imshow('ls' + str(ii), ls) # ii = ii + 1 LS.append(ls) # now reconstruct ls_ = LS[0] for i in xrange(1,6): ls_ = cv2.pyrUp(ls_) ls_ = cv2.resize(ls_, LS[i].shape[0:2]) ls_ = cv2.add(ls_, LS[i]) # image with direct connecting each half real = np.hstack((A[:,:cols/2],B[:,cols/2:])) cv2.imshow('apple', A) cv2.imshow('orange', B) cv2.imshow('blend direct', real) cv2.imshow('pyramids blend', ls_) # ''' # hold window cv2.waitKey(0) cv2.destroyAllWindows()