def get_frame(self): ret, frame = self.video.read() frame = cv2.flip(frame, 1) cv2.copyTo(guide, mask, frame) ret, jpeg = cv2.imencode('.jpeg', frame) return jpeg.tobytes()
def getEdgeMap2x2(image): '''detect edges with HED, by resizing image to 960x640 and run hed on 4 seperate 480x320 windows and then stitch everything back to 960x640 ''' w, h = 960, 640 image_resized = cv2.resize(image, (960, 640)) pad = 32 img00 = image_resized[pad:320 + pad, pad:480 + pad] img01 = image_resized[pad:320 + pad, 480 - pad:960 - pad] img10 = image_resized[320 - pad:640 - pad, pad:480 + pad] img11 = image_resized[320 - pad:640 - pad, 480 - pad:960 - pad] edgemap00 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8) edgemap01 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8) edgemap10 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8) edgemap11 = np.zeros((h - 2 * pad, w - 2 * pad), np.uint8) ww, hh = 480, 320 edgemap00[0:hh, 0:ww] = cv2.copyTo(getEdgeMap(img00), mask=None) edgemap01[0:hh, ww - 2 * pad:] = cv2.copyTo(getEdgeMap(img01), mask=None) edgemap10[hh - 2 * pad:, 0:ww] = cv2.copyTo(getEdgeMap(img10), mask=None) edgemap11[hh - 2 * pad:, ww - 2 * pad:] = cv2.copyTo(getEdgeMap(img11), mask=None) e0 = cv2.max(edgemap00, edgemap01) e1 = cv2.max(edgemap10, edgemap11) edgemap = cv2.max(e0, e1) edgemap_resized = cv2.resize(edgemap, (960, 640)) return edgemap_resized
def fill_polyline_transparent(image, pnts, color, opacity, thickness=-1): blk = np.zeros(image.shape, np.uint8) cv2.drawContours(blk, pnts, -1, color, -1) if thickness >= 0: cv2.polylines(image, pnts, True, color=color, thickness=thickness) res = cv2.addWeighted(image, 1.0, blk, 0.1, 0) cv2.copyTo(res, None, image)
def preprocess(image): image.bgr_to_gray() image.invert() image.blur(3) image.threshold() image.blur(2) structuring_element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) cv2.erode(image.image, structuring_element, dst=image.image) cv2.dilate(image.image, structuring_element, dst=image.image) # 1. Extract edges edges = cv2.adaptiveThreshold(image.image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -2) # 2. Dilate edges kernel = np.ones((2, 2)) edges = cv2.dilate(edges, kernel) # 4. blur smooth img smooth = cv2.blur(image.image, (2, 2)) # 5 smooth.copyTo(src, edges) # src, mask, dst -> dst cv2.copyTo(smooth, image.image, edges) # I think this is right
def getIris(frame): iris = [] array = np.asarray(frame) copy_img = array.copy() res_img = array.copy() mask = np.zeros(array.shape, np.uint8) gray_img = cv2.cvtColor(array, cv2.COLOR_BGR2GRAY) canny_img = cv2.Canny(gray_img, 100, 250) smooth_img = cv2.GaussianBlur(canny_img, (7, 7), 1) circles = getCircles(smooth_img) iris.append(res_img) for circle in circles: rad = int(circle[0][2]) global radius radius = rad cv2.circle(mask, centroid, rad, (255, 255, 255), 3, cv2.FILLED) inv = cv2.bitwise_not(mask, mask) cv2.subtract(array, copy_img, res_img, mask=inv) x = int(centroid[0] - rad) y = int(centroid[1] - rad) w = int(rad * 2) h = w cv2.circle(res_img, (x, y), (w, h), (0, 255, 0), 2) crop_img = np.asarray(array[w, h, 3], np.uint8, ) cv2.copyTo(res_img, crop_img) return crop_img return res_img
def preprocess(image): image.blur((5, 5), 0) image.threshold(150) structuring_element = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)) cv2.erode(image.image, structuring_element, dst=image.image) cv2.dilate(image.image, structuring_element, dst=image.image) # 1. Extract edges edges = cv2.adaptiveThreshold(image.image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -2) # 2. Dilate edges kernel = np.ones((3, 3)) cv2.dilate(edges, kernel, dst=edges) # 3. src.copyTo(smooth) smooth = image.image.copy() # 4. blur smooth img cv2.blur(smooth, (2, 2), dst=smooth) # 5 smooth.copyTo(esrc, edges) # src, mask, dst -> dst cv2.copyTo(smooth, edges, image.image) # I think this is right
def polyLineAnno(imgID): '''Make annotion with polylines. Returns a list of polylines [(x1,y1),(x2,y2),...,(xn,yn)]. Operations: Left click to mark a point, consequtive points are linked together. The first polyline should mark the outline of chest bones. The other polylines should mark each ribs, start from the outline to the spine. Ribs should be marked clockwise. Press n for next polyline. Press f to stop recording and return. ''' imgPath = params.data_dir + "/" + str(imgID) + ".png" img = cv.imread(imgPath) img, hscale, wscale = scaling(img) cv.putText(img, "ID: " + str(imgID), (0, 30), cv.FONT_HERSHEY_TRIPLEX, 1, (0, 255, 255)) cv.imshow("Display", img) polycnt = 1 poly = [] lineColor = (0, 0, 255) mask = np.full((img.shape[0], img.shape[1], 1), 255, np.uint8) old = cv.copyTo(img, mask) def mouseCallback(event, x, y, flags, param): nonlocal poly nonlocal img if event == cv.EVENT_LBUTTONUP: poly.append((x, y)) if len(poly) > 1: cv.line(img, poly[-2], poly[-1], lineColor, 2) else: cv.putText(img, str(polycnt), (x, y), cv.FONT_HERSHEY_SIMPLEX, 1, lineColor) if event == cv.EVENT_RBUTTONUP: if len(poly) > 0: poly = poly[:-1] img = cv.copyTo(old, mask) drawPoly(img, poly, lineColor, polycnt) result = [] cv.setMouseCallback("Display", mouseCallback) while True: cv.imshow("Display", img) key = cv.waitKey(50) if key != -1: if (key & 0xFF) in [ord('n'), ord('f')]: if len(poly) > 1: result.append((np.array(poly, dtype=float) / [wscale, hscale]).tolist()) polycnt += 1 lineColor = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) poly = [] old = cv.copyTo(img, mask) if (key & 0xFF) == ord('f'): break return result
def __handleObjectMaskCriterion(this, image, mask, boxSlice): segmentMask = PseudoLabeller.__random_colour_masks(mask)[boxSlice] segmentMask = cv.cvtColor(segmentMask, cv.COLOR_RGB2GRAY) nonZero = cv.countNonZero(segmentMask) area = float(image[boxSlice].shape[0] * image[boxSlice].shape[1]) if (nonZero / area) > 0.1: background = this.__getBackground()[boxSlice] cv.copyTo(image[boxSlice], segmentMask, background) image[boxSlice] = background return
def docut(self): self.test = copy.deepcopy(self.img) test = copy.deepcopy(self.img) mask = np.zeros(self.img.shape, np.uint8) cv2.fillPoly(mask, np.array([self.coordinates], dtype=np.int32), (255, 255, 255)) cv2.rectangle(self.img, (0, 0), (self.img.shape[1], self.img.shape[0]), (0, 255, 0), -1) cv2.copyTo(test, mask, self.img) cv2.imwrite(r'Testers\tut\tut_v\cut.jpg', self.img)
def rhgan(ganimg, srcimg): h, w, _ = ganimg.shape maxh = srcimg.shape[0] - h - 1 maxw = srcimg.shape[1] - w - 1 if maxh < 0 or maxw < 0: return srcimg y = random.randint(0, maxh) x = random.randint(0, maxw) roiimg = srcimg[y:y + h, x:x + w, :] mask = np.ones(ganimg.shape, ganimg.dtype) * 255 cv2.copyTo(ganimg, mask, roiimg) return srcimg
def mask_copyTo(): src = cv2.imread('airplane.bmp', cv2.IMREAD_COLOR) mask = cv2.imread('mask_plane.bmp', cv2.IMREAD_GRAYSCALE) dst = cv2.imread('field.bmp', cv2.IMREAD_COLOR) if src is None or mask is None or dst is None: print('Image load failed!') return cv2.copyTo(src, mask, dst) cv2.imshow('sum', dst) cv2.waitKey() cv2.destroyAllWindows()
def rgbFunc(Obj): tmp = Obj.loadImage() mask = cv2.inRange(tmp, np.array(Obj.__channel_range[0]), np.array(Obj.__channel_range[1])) tmp = cv2.copyTo(tmp, mask) Obj.setImage(tmp) pass
def add_mask(target_img, mask_img): if target_img.shape != mask_img.shape: raise ValueError(f'Mismatched shape: target_img {target_img.shape} and mask_img {mask_img.shape}') masked_img = target_img.copy() mark_for_white = mask_cutoff(mask_img) white_img = np.ones_like(mark_for_white, dtype=np.uint8) * 255 cv2.copyTo(white_img, mark_for_white, masked_img) mask_for_dark = cv2.bitwise_not(mask_img) mask_for_dark = mask_cutoff(mask_for_dark) dark_img = np.ones_like(mask_for_dark, dtype=np.uint8) cv2.copyTo(dark_img, mask_for_dark, masked_img) return masked_img
def ImagProgress(filename, flag): Image = filename # Image = cv.imread(filename) Image_Gau = cv.GaussianBlur(Image, (9, 9), 0) Image_HSV = cv.cvtColor(Image_Gau, cv.COLOR_BGR2HSV) if flag == 1: # blue limitition lowarray = np.array([95, 43, 46]) higharray = np.array([105, 255, 255]) elif flag == 2: # yellow limitition lowarray = np.array([26, 43, 46]) higharray = np.array([34, 255, 255]) else: # red limitition lowarray = np.array([156, 43, 46]) higharray = np.array([180, 255, 255]) dst = cv.inRange(Image_HSV, lowarray, higharray) diale = cv.copyTo(dst, dst) element = cv.getStructuringElement(cv.MORPH_RECT, (7, 7)) openImage = cv.morphologyEx(diale, cv.MORPH_OPEN, element) MedirImag = cv.medianBlur(dst, 9) x, y, w, h = cv.boundingRect(MedirImag) cv.rectangle(MedirImag, (x, y), (x + w, y + h), (255, 255, 255), 2) # cv.namedWindow('yuzhi', cv.WINDOW_NORMAL) # cv.namedWindow('zhongzhi', cv.WINDOW_NORMAL) cv.namedWindow('zhong', cv.WINDOW_NORMAL) # cv.imshow('yuzhi', dst) # cv.imshow('zhongzhi', MedirImag) cv.imshow('zhong', MedirImag) data = [x, y, w, h] # cv.waitKey(0) # return flag return data
def mask_copyTo(): src = cv2.imread('ref/airplane.bmp', cv2.IMREAD_COLOR) mask = cv2.imread('ref/mask_plane.bmp', cv2.IMREAD_GRAYSCALE) dst = cv2.imread('ref/field.bmp', cv2.IMREAD_COLOR) if src is None or mask is None or dst is None: print('Image load failed!') return cv2.copyTo(src, mask, dst) # dst[mask > 0] = src[mask > 0] cv2.imshow('src', src) cv2.imshow('dst', dst) cv2.imshow('mask', mask) cv2.waitKey() cv2.destroyAllWindows()
def mask_copyTo(): src = cv2.imread("airplane.bmp", cv2.IMREAD_COLOR) mask = cv2.imread("mask_plane.bmp", cv2.IMREAD_GRAYSCALE) dst = cv2.imread("field.bmp", cv2.IMREAD_COLOR) if src is None or mask is None or dst is None: print("Image load failed!") return cv2.copyTo(src, mask, dst) # dst[mask > 0] = src[mask > 0] cv2.imshow("src", src) cv2.imshow("dst", dst) cv2.imshow("mask", mask) cv2.waitKey() cv2.destroyAllWindows()
def grayFunc(Obj): tmp = Obj.loadImage() tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY) mask = cv2.inRange(tmp, np.array(Obj.__channel_range[0]), np.array(Obj.__channel_range[1])) tmp = cv2.copyTo(tmp, mask) Obj.setImage(tmp) pass
def __deliverResult(this, img, boxes, masks, frameOrder): if not len(boxes): print("Nothing detected in frame: " + str(frameOrder)) print("Caught " + str(this.totalObjectCaughtSoFar) + " objects so far.") return this.__getBackground() goodBoxes, goodMasks = this.__handleIntersection(boxes, masks) baseOverlay = np.zeros((processingSize[1], processingSize[0], 3), np.uint8) cv.copyTo(this.__getBackground(), None, baseOverlay) annotationTxt = open(this.datasetPath + str(frameOrder) + ".txt", "a+") trainTxt = open(this.datasetPath + "_train" + ".txt", "a+") trainTxt.write("%s\n" % (this.datasetPath + str(frameOrder) + ".jpg")) trainTxt.close() print("Committed " + str(len(goodBoxes)) + " object(s) in frame: " + str(frameOrder)) this.totalObjectCaughtSoFar += len(goodBoxes) print("Caught " + str(this.totalObjectCaughtSoFar) + " objects so far.") for i in range(len(goodBoxes)): box = goodBoxes[i] boxSlice = np.s_[int(box[0][1]):int(box[1][1]), int(box[0][0]):int(box[1][0])] if this.segmentation: this.__handleObjectMaskCriterion(img, goodMasks[i], boxSlice) cv.copyTo(img[boxSlice], None, baseOverlay[boxSlice]) yoloX = float(box[0][0] + ((box[1][0] - box[0][0]) / 2)) / img.shape[1] yoloY = float(box[0][1] + ((box[1][1] - box[0][1]) / 2)) / img.shape[0] yoloWidth = float(box[1][0] - box[0][0]) / img.shape[1] yoloHeight = float(box[1][1] - box[0][1]) / img.shape[0] annotationTxt.write("%d %f %f %f %f\n" % (PseudoLabeller.datasetClassIndice, yoloX, yoloY, yoloWidth, yoloHeight)) annotationTxt.close() cv.imwrite(this.datasetPath + str(frameOrder) + ".jpg", baseOverlay) for box in goodBoxes: cv.rectangle(img, box[0], box[1], color=(0, 255, 0), thickness=1) return baseOverlay
def test_copytomask(self): img = self.get_sample('python/images/baboon.png', cv.IMREAD_COLOR) eps = 0. #Create mask using inRange valeurBGRinf = np.array([0, 0, 100]) valeurBGRSup = np.array([70, 70, 255]) maskRed = cv.inRange(img, valeurBGRinf, valeurBGRSup) #New binding dstcv = np.full(np.array((2, 2, 1)) * img.shape, 255, dtype=img.dtype) cv.copyTo(img, maskRed, dstcv[:img.shape[0], :img.shape[1], :]) #using numpy dstnp = np.full(np.array((2, 2, 1)) * img.shape, 255, dtype=img.dtype) mask2 = maskRed.astype(bool) _, mask_b = np.broadcast_arrays(img, mask2[..., None]) np.copyto(dstnp[:img.shape[0], :img.shape[1], :], img, where=mask_b) self.assertEqual(cv.norm(dstnp, dstcv), eps)
def test_copytomask(self): img = self.get_sample('python/images/baboon.png', cv.IMREAD_COLOR) eps = 0. #Create mask using inRange valeurBGRinf = np.array([0,0,100]) valeurBGRSup = np.array([70, 70,255]) maskRed = cv.inRange(img, valeurBGRinf, valeurBGRSup) #New binding dstcv = np.full(np.array((2, 2, 1))*img.shape, 255, dtype=img.dtype) cv.copyTo(img, maskRed, dstcv[:img.shape[0],:img.shape[1],:]) #using numpy dstnp = np.full(np.array((2, 2, 1))*img.shape, 255, dtype=img.dtype) mask2=maskRed.astype(bool) _, mask_b = np.broadcast_arrays(img, mask2[..., None]) np.copyto(dstnp[:img.shape[0],:img.shape[1],:], img, where=mask_b) self.assertEqual(cv.norm(dstnp ,dstcv), eps)
def GetForegroundImgInfo(FilePath, showFlag = False): ''' 원본 이미지에 딥러닝을 이용하여 1차적인 배경 삭제 작업을 진행한다.(Imprint 훼손 가능성 있음) 딥러닝으로 배경 삭제 작업된 이미지의 전경 외곽선을 이용하여 다시 원본 이미지에서 최종적인 전경 추출 이미지를 구한다.(Imprint 훼손 없이 배경 삭제 이미지 획득 목표) :param FilePath: :param showFlag: :return: ''' #import time #start = time.time() #print(FilePath) img_color = cv.imread(FilePath) #cv.imshow('ss', img_color) #cv.waitKey(0) #img_color = hangulFilePathImageRead(FilePath) img_color = cv.resize(img_color, (Configuration.ImageSizeForPreprocess,Configuration.ImageSizeForPreprocess)) contours, lastIdx = GetForegroundImgContourInfo(FilePath) if contours is None: print("contour is not found!!") # 검정색 배경을 만듦 ImgMasking = np.zeros((img_color.shape[0], img_color.shape[1], 3), np.uint8) minX,minY,maxX,maxY, cx, cy = GetMinMaxPosInContour(contours[lastIdx], ImgMasking) ''' 중심점을 시각적으로 확인 cv.circle(ImgMasking, (cx, cy), 10, (0, 255, 255), -1) cv.imshow("center point", ImgMasking) cv.waitKey(0) ''' # 배경이 제거된 이미지에서 찾은 컨투어(외곽선)의 좌표를 위에서 만든 검정 배경에 그림 cv.drawContours(ImgMasking, contours, lastIdx, (255, 255, 255), 0) if showFlag: cv.imshow("ImgMasking1", ImgMasking) cv.waitKey(0) # 내부를 흰색으로 채워 줌 mask = np.zeros((img_color.shape[0] + 2, img_color.shape[1] + 2), np.uint8) mask[:] = 0 cv.floodFill(ImgMasking, mask, (cx, cy), (255, 255, 255)) #print(cx,cy) # 원본 이미지에 위에서 만든 mask를 씌워 원본 이미지의 배경을 제거 함. dst = cv.copyTo(img_color, ImgMasking) #print("time :", time.time() - start) if showFlag: cv.imshow("ImgMasking2", ImgMasking) cv.imshow("Last", dst) cv.waitKey(0) return dst, ImgMasking, contours, lastIdx, cx, cy, minX,minY,maxX,maxY
def labFunc(Obj): if Obj.getLoadReady() == True: tmp = Obj.loadImage() tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2LAB) mask = cv2.inRange(tmp, np.array(Obj.__channel_range[0]), np.array(Obj.__channel_range[1])) tmp = cv2.cvtColor(tmp, cv2.COLOR_LAB2BGR) tmp = cv2.copyTo(tmp, mask) Obj.setImage(tmp) pass
def getHStackCropedImg(rgb_img, mask_img, resize=(80, 140), bg_white=True, random_rotate=False): contours, hierarchy = cv2.findContours(mask_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # print(f'len: {len(contours)}') rgb_img_masked_img = cv2.copyTo(rgb_img, mask_img) if bg_white: ## black to white bg inv_mask_img = ~mask_img rgb_img_masked_img[np.where(inv_mask_img == 255)] = 255 ## end of black bg white resized_rgb_img_array = [] for contour in contours: assert len(contours) == 5 #check nail is 5 ext_left = tuple(contour[contour[:, :, 0].argmin()][0]) ext_right = tuple(contour[contour[:, :, 0].argmax()][0]) ext_top = tuple(contour[contour[:, :, 1].argmin()][0]) ext_bot = tuple(contour[contour[:, :, 1].argmax()][0]) cropped_image = rgb_img_masked_img[ext_top[1]:ext_bot[1], ext_left[0]:ext_right[0]] if random_rotate: padding = random.randint(20, 70) cropped_image = cv2.copyMakeBorder(cropped_image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(255, 255, 255)) cropped_image = transform(cropped_image) else: padding = 20 cropped_image = cv2.copyMakeBorder(cropped_image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(255, 255, 255)) resized_cropped_img = cv2.resize(cropped_image, resize, interpolation=cv2.INTER_CUBIC) # cv2.imshow("resized_cropped_img", resized_cropped_img) # cv2.waitKey() resized_rgb_img_array.append(resized_cropped_img) return resized_rgb_img_array
def alpha_blend(cls, left, left_mask, right, right_mask): both_masks = left_mask or right_mask #cv::imshow("maskOR", bothMasks); no_mask = 255 - both_masks raw_alpha = cls._create_image(template=no_mask, dtype=float) raw_alpha = 1.0 border_left = 255 - cls.border(left_mask) border_right = 255 - cls.border(right_mask) distance_left = cv2.distanceTransform(border_left, cv2.DIST_L2, 3) _, max, _ = cv2.minMaxLoc(distance_left, mask=left_mask[distance_left > 1]) distance_left = distance_left * 1.0 / max distance_right = cv2.distanceTransform(border_right, cv2.DIST_L2, 3) _, max, _ = cv2.minMaxLoc(distance_right, mask=right_mask[distance_right > 1]) distance_right = distance_left * 1.0 / max cv2.copyTo(raw_alpha, left_mask, distance_left)
def _push_char(): if not char_parts: return mask = np.zeros_like(binary_img) cv2.drawContours(mask, char_parts, -1, (255, ), thickness=cv2.FILLED) char_img = cv2.copyTo(binary_img, mask) l, t, r, b = char_non_zero_bbox if r - l < max_char_width * 0.5 or b - t < max_char_height * 0.8: l, t, r, b = char_bbox char_img = char_img[t:b, l:r] char_img_list.append((char_bbox, char_img))
def redbandchange(img): shape = img.shape temp = cv2.copyTo(img, None) h = shape[0] w = shape[1] for i in range(h): for j in range(w): t = img[i][j][2] + 40 if t > 255: t = 255 temp[i][j][2] = t return temp
def perform_edge_detection(img): """ Generates an image including only prominent edges in the image given as input. """ lower, upper = get_canny_thresholds(img) blur_img = cv.GaussianBlur(img, (7, 7), cv.BORDER_DEFAULT) edges_img = cv.Canny(blur_img, lower, upper) edges_img = cv.dilate(edges_img, KERNEL, iterations=1) return cv.copyTo(img, edges_img)
def load_image(self, frame, imgFromRealSense=False): """ Loads the image given a string """ self.img = frame # read the image in if imgFromRealSense: self.remove_dark_bands() self.imglanes = cv2.copyTo(self.img, None, dst=None) self.imggray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) self.imgHSV = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV) self.imgloaded = True
def mask_copyTo(): src = cv2.imread('airplane.bmp', cv2.IMREAD_COLOR) mask = cv2.imread('mask_plane.bmp', cv2.IMREAD_GRAYSCALE) dst = cv2.imread('field.bmp', cv2.IMREAD_COLOR) if src is None or mask is None or dst is None: print('Image load failed!') return cv2.copyTo(src, mask, dst) # cv2.copyTo(src, mask, dst=None) -> dst # src : source, 입력 영상 # mask : 마스크 영상 # dst : 출력 영상, 만약 src와 크기 및 타입이 같은 dst를 입력으로 지정하면 dst를 새로 생성하지 않고 연산을 수행 # 그렇지 않으면 dst를 새로 생성하여 연산을 수행한 후 반환 # dst[mask > 0] = src[mask > 0] cv2.imshow('src', src) cv2.imshow('dst', dst) cv2.imshow('mask', mask) cv2.waitKey() cv2.destroyAllWindows()
def apply_sunglasses_effect(self, canvas: np.ndarray, objects: List[Dict], left_eye_index: int, right_eye_index: int) -> np.ndarray: """Apply sunglasses effect. Args: canvas (np.ndarray): The image to apply the effect objects (list[dict]): The object list with keypoints - "keypoints" ([K,3]): keypoints in [x, y, score] left_eye_index (int): Keypoint index of the left eye right_eye_index (int): Keypoint index of the right eye Returns: np.ndarray: Processed image """ hm, wm = self.resource_img.shape[:2] # anchor points in the sunglasses image pts_src = np.array([[0.3 * wm, 0.3 * hm], [0.3 * wm, 0.7 * hm], [0.7 * wm, 0.3 * hm], [0.7 * wm, 0.7 * hm]], dtype=np.float32) for obj in objects: kpts = obj['keypoints'] if kpts[left_eye_index, 2] < self.kpt_thr or kpts[right_eye_index, 2] < self.kpt_thr: continue kpt_leye = kpts[left_eye_index, :2] kpt_reye = kpts[right_eye_index, :2] # orthogonal vector to the left-to-right eyes vo = 0.5 * (kpt_reye - kpt_leye)[::-1] * [-1, 1] # anchor points in the image by eye positions pts_tar = np.vstack( [kpt_reye + vo, kpt_reye - vo, kpt_leye + vo, kpt_leye - vo]) h_mat, _ = cv2.findHomography(pts_src, pts_tar) patch = cv2.warpPerspective(self.resource_img, h_mat, dsize=(canvas.shape[1], canvas.shape[0]), borderValue=(255, 255, 255)) # mask the white background area in the patch with a threshold 200 mask = cv2.cvtColor(patch, cv2.COLOR_BGR2GRAY) mask = (mask < 200).astype(np.uint8) canvas = cv2.copyTo(patch, mask, canvas) return canvas
def apply_sunglasses_effect(img, pose_results, sunglasses_img, left_eye_index, right_eye_index, kpt_thr=0.5): """Apply sunglasses effect. Args: img (np.ndarray): Image data. pose_results (list[dict]): The pose estimation results containing: - "keypoints" ([K,3]): keypoint detection result in [x, y, score] sunglasses_img (np.ndarray): Sunglasses image with white background. left_eye_index (int): Keypoint index of left eye right_eye_index (int): Keypoint index of right eye kpt_thr (float): The score threshold of required keypoints. """ hm, wm = sunglasses_img.shape[:2] # anchor points in the sunglasses mask pts_src = np.array([[0.3 * wm, 0.3 * hm], [0.3 * wm, 0.7 * hm], [0.7 * wm, 0.3 * hm], [0.7 * wm, 0.7 * hm]], dtype=np.float32) for pose in pose_results: kpts = pose['keypoints'] if kpts[left_eye_index, 2] < kpt_thr or kpts[right_eye_index, 2] < kpt_thr: continue kpt_leye = kpts[left_eye_index, :2] kpt_reye = kpts[right_eye_index, :2] # orthogonal vector to the left-to-right eyes vo = 0.5 * (kpt_reye - kpt_leye)[::-1] * [-1, 1] # anchor points in the image by eye positions pts_tar = np.vstack( [kpt_reye + vo, kpt_reye - vo, kpt_leye + vo, kpt_leye - vo]) h_mat, _ = cv2.findHomography(pts_src, pts_tar) patch = cv2.warpPerspective(sunglasses_img, h_mat, dsize=(img.shape[1], img.shape[0]), borderValue=(255, 255, 255)) # mask the white background area in the patch with a threshold 200 mask = cv2.cvtColor(patch, cv2.COLOR_BGR2GRAY) mask = (mask < 200).astype(np.uint8) img = cv2.copyTo(patch, mask, img) return img
def test_copytomask(self): img = self.get_sample('python/images/baboon.png', cv.IMREAD_COLOR) eps = 0. #Create mask using inRange valeurBGRinf = np.array([0,0,100]) valeurBGRSup = np.array([70, 70,255]) maskRed = cv.inRange(img, valeurBGRinf, valeurBGRSup) #New binding dstcv = cv.copyTo(img,maskRed) #using numpy mask2=maskRed.astype(bool) _, mask_b = np.broadcast_arrays(img, mask2[..., None]) dstnp = np.ma.masked_array(img, np.logical_not(mask_b)) dstnp =np.ma.filled(dstnp,[0]) self.assertEqual(cv.norm(dstnp ,dstcv), eps)