def win_warp_color_space_ana_orc(self, img, bg_color=None, lang='chi_sim'): mat = np.asarray(img) if bg_color is None: bg_color = mat[0, 0, :] mat = np.asarray(img) matgray = cv2.cvtColor(mat, cv2.COLOR_RGB2GRAY) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matgray) _ = min_val _ = max_val dark_point = mat[min_loc[1], min_loc[0]] light_point = mat[max_loc[1], max_loc[0]] p1 = dark_point.astype(np.float) p2 = light_point.astype(np.float) p3 = bg_color.astype(np.float) mat_bin = np.zeros(mat.shape, dtype=np.uint8) _ = p3 half_len = np.linalg.norm(p2 - p1) / 2 for row in range(mat.shape[0]): for col in range(mat.shape[1]): p = mat[row, col, :].astype(np.float) k1, d1 = find_perpendicular_param_pp(p2, p1, p) #k2,d2=find_perpendicular_param_pp(p1,p3,p) d3 = np.linalg.norm(p2 - p) _ = k1 th = 5 if (d1 < th and d3 < half_len): mat_bin[row, col] = np.array([0, 0, 0], dtype=np.uint8) else: mat_bin[row, col] = np.array([255, 255, 255], dtype=np.uint8) return self.ocr(mat_bin, lang=lang)
def getOneHandKeypoints(self, handimg): """hand手部关键点检测(单手) :param 手部图像路径,手部关键点 :return points单手关键点坐标集合 """ img_height, img_width, _ = handimg.shape aspect_ratio = img_width / img_height inWidth = int(((aspect_ratio * self.inHeight) * 8) // 8) inpBlob = cv2.dnn.blobFromImage( handimg, 1.0 / 255, (inWidth, self.inHeight), (0, 0, 0), swapRB=False, crop=False) self.hand_net.setInput(inpBlob) output = self.hand_net.forward() # vis heatmaps #self.vis_hand_heatmaps(handimg, output) # points = [] for idx in range(self.hand_num_points): probMap = output[0, idx, :, :] # confidence map. probMap = cv2.resize(probMap, (img_width, img_height)) # Find global maxima of the probMap. minVal, prob, minLoc, point = cv2.minMaxLoc(probMap) if prob > self.threshold: points.append((int(point[0]), int(point[1]))) else: points.append(None) return points
def most_probable_location(self, pil, image, precision): img_rgb = np.array(pil) img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) template = cv2.imread(image, cv2.IMREAD_GRAYSCALE) height, width = template.shape res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED) # Minimum Square Difference (TM_SQDIFF) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val < precision: return None if self.click_scope: with lock: if not self.click_blocked: self.click(pos=max_loc, action="left", sluggishness=0, offset=5, height=height, width=width) self.report(image, max_loc) if self.run_until_found_one: self.block_clicks() else: self.report(image, max_loc) else: self.report(image, max_loc) return max_loc
def _find_match_pos(self, screenshot, template, threshold=THRESHOLD) -> Tuple[int, int]: name = template source: np.ndarray if isinstance(screenshot, np.ndarray): source = screenshot else: source = cv.imread(screenshot) templatepath = "images/{}.png".format(template) if templatepath in self._imagecache: template = self._imagecache[templatepath] else: template = cv.imread(templatepath) height, width = source.shape[:2] fx = width / BASE_WIDTH fy = height / BASE_HEIGHT template = cv.resize(template, None, fx=fx, fy=fy, interpolation=cv.INTER_AREA) self._imagecache[templatepath] = template theight, twidth = template.shape[:2] ret = cv.matchTemplate(source, template, cv.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv.minMaxLoc(ret) if max_val > threshold: return (max_loc[0] + twidth / 2, max_loc[1] + theight / 2) else: return None
def compare(self, img_list, acc=0.85, special=False): imgs = [] self.screenshot = self.adbkit.screenshots() for item in img_list: imgs.append(cv2.imread(item)) if special: cv2.rectangle(self.screenshot, (0, 0), (1280, 420), color=(0, 0, 0), thickness=-1) for img in imgs: find_height, find_width = img.shape[:2:] result = cv2.matchTemplate(self.screenshot, img, cv2.TM_CCOEFF_NORMED) reslist = cv2.minMaxLoc(result) if self.debug: cv2.rectangle( self.screenshot, reslist[3], (reslist[3][0] + find_width, reslist[3][1] + find_height), color=(0, 250, 0), thickness=2) if reslist[1] > acc: if self.debug: print("[Detect]acc rate:", round(reslist[1], 2)) pos = [reslist[3][0], reslist[3][1]] pos = [x * self.adbkit.capmuti for x in pos] return pos, find_height * self.adbkit.capmuti, find_width * self.adbkit.capmuti if special: return False, 0, 0 else: return False
def standby(template, acc=0.85, special=False): # 模擬器截圖 # adbkit.screenshots() # 载入图像 target_img = adbkit.screenshots() if special == True: cv2.rectangle(target_img, (0, 0), (1280, 420), color=(0, 0, 0), thickness=-1) cv2.imwrite("screencap-rect.png", target_img) find_img = cv2.imread(str(template)) find_height, find_width = find_img.shape[:2:] # 模板匹配 result = cv2.matchTemplate(target_img, find_img, cv2.TM_CCOEFF_NORMED) # min_val, max__val, min_loc, max_loc = cv2.minMaxLoc(result) reslist = cv2.minMaxLoc(result) #reslist[1] = max__val; reslist[3] = max_loc; if debug: cv2.rectangle(target_img, reslist[3], (reslist[3][0]+find_width, reslist[3][1]+find_height), color=(0, 255, 0), thickness=2) #cv2.imwrite("screencap.png", target_img) cv2.imshow("screenshots", target_img) cv2.waitKey(1) if reslist[1] > acc: if debug: print("[Detect]acc rate:", round(reslist[1], 2)) return reslist[3], find_height, find_width else: if debug: print("[Detect]acc rate:", round(reslist[1], 2)) return False
def find_all_template(cls, source, target, threshold=0.8, mac_count=None): mask = None if len(target.shape) == 3 and target.shape[2] == 4: mask = target[:, :, 3] target = cv2.cvtColor(target, cv2.COLOR_BGRA2BGR) res = cv2.matchTemplate(source, target, cv2.TM_CCOEFF_NORMED, mask=mask) result = [] height, width = target.shape[:2] while True: _, max_val, _, tl = cv2.minMaxLoc(res) if max_val < threshold: break br = (tl[0] + width, tl[1] + height) mp = (int(tl[0] + width / 2), int(tl[1] + height / 2)) result.append({ 'pt': mp, 'rect': (tl, br), 'conf': max_val }) if mac_count is not None: if mac_count <= 0: break else: mac_count -= 1 cv2.floodFill(res, None, tl, (-1000,), max_val-threshold+0.1, 1, flags=cv2.FLOODFILL_FIXED_RANGE) return result
def image_search(target_img, pattern, precision=0.8): # preprocess image target = cv2.imread(target_img, 0) template = cv2.imread(pattern, 0) # if target_img is None: # raise FileNotFoundError('Image name {} cannot be found'.format(target_img)) # if template is None: # raise FileNotFoundError('Image name {} cannot be found'.format(template)) height, width = template.shape x_offset = width / 2 y_offset = height / 2 try: result = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) print(' Image matching rate: {}'.format(max_val)) if max_val < precision: return False x, y = (max_loc[0] + x_offset, max_loc[1] + y_offset) # print(x, y) return x, y except: print( "[ImgNotFound] OpenCV couldn't find the image file in the given directory" )
def tempmatch(img, tmp): # 返回相关值 if img.shape[0] >= tmp.shape[0] and img.shape[1] >= tmp.shape[1]: method = eval('cv.TM_CCOEFF') res = cv.matchTemplate(img, tmp, method) min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res) return max_val else: return 0
def standby(images=get_sh((0, 0)), tmp: str = None, threshold: float = 0.85) -> bool: img = images template = cv2.imread(tmp) res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED) res = cv2.minMaxLoc(res) # note改取得最相似之座標 res[0]為最小相似度的座標,res[1]為最大相似度的座標 if (res[1] >= threshold): return True return False
def gatcha(self): gatcha = [] for index in range(len(self.template)): result = cv2.matchTemplate(self.screenshot, self.template[index], cv2.TM_CCOEFF_NORMED) result = cv2.minMaxLoc(result) if result[1] > 0.9: gatcha.append(self.ark[index]) return gatcha
def calcAndDrawHist(image, color): hist = cv2.calcHist([image], [0], None, [256], [0.0, 255.0]) minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(hist) histImg = np.zeros([256, 256, 3], np.uint8) hpt = int(0.9 * 256) for h in range(256): intensity = int(hist[h]*hpt/maxVal) cv2.line(histImg, (h, 256), (h, 256-intensity), color) return histImg
def imageMatch(hwnd, image): # Match screenshot with template img = getWindowImg(hwnd) img_rgb = np.array(img) img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) template = cv2.imread(image, 0) template.shape[::-1] res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED) # Match rate return cv2.minMaxLoc(res)[1]
def match(small_pic_path, large_pic): small_pic = cv2.imread(small_pic_path) small_pic = cv2.cvtColor(small_pic, cv2.COLOR_BGR2GRAY) small_pic = cv2.Canny(small_pic, 50, 200) large_pic = cv2.cvtColor(large_pic, cv2.COLOR_BGR2GRAY) large_pic = cv2.Canny(large_pic, 50, 200) result = cv2.matchTemplate(large_pic, small_pic, cv2.TM_CCOEFF) _, max, _, max_location = cv2.minMaxLoc(result) return (max, max_location)
def imgdiffer(temurl): # im1 = Image.open(r'D:\\explor.png') # im1.save(r'D:\\explor.png') target = cv2.imread("D:\\twenty.png") template = cv2.imread(temurl) theight, twidth = template.shape[:2] result = cv2.matchTemplate(target,template,cv2.TM_SQDIFF_NORMED) # cv2.normalize( result, result, 0, 1, cv2.NORM_MINMAX, -1 ) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) print(min_val) return min_loc[0]+twidth/2,min_loc[1]+theight/2,min_val
def getHist256ImgFromHist(hist, color=[255, 255, 255]): minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(hist) histImg = np.zeros([256, 256, 3], np.uint8) #[256,256,3] [256,256] hpt = int(0.9 * 256) for h in range(256): #intensity = int(hist[h]*hpt/maxVal) intensity = hist[h] * hpt / maxVal #print(h,intensity) cv2.line(histImg, (h, 256), (h, 256 - intensity), color) return histImg
def imagesearch(image, precision=0.8): im = pyautogui.screenshot() img_rgb = np.array(im) img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) template = cv2.imread(image, 0) template.shape[::-1] res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val < precision: return [300, 500] return max_loc #返回圖片座標
def match_template_wrapped(input): res = cv2.matchTemplate(input, template_image, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if show_result: w, h = template_image.shape[::-1] top_left = min_loc if method in [ cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED ] else max_loc bottom_right = (top_left[0] + w, top_left[1] + h) cv2.rectangle(input, top_left, bottom_right, 255, 2) cv2.imshow('Template identification', input) return max_val, max_loc
def match(waitmatch, example, value=10000000): img = cv2.imread(waitmatch, 0) template = cv2.imread(example, 0) res = cv2.matchTemplate(img, template, cv2.TM_SQDIFF) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) flag = False if min_val < int(value): top_left = min_loc w, h = template.shape[::-1] return [top_left[0] + w // 2, top_left[1] + h // 2] else: return None
def calcAndDrawHist(img,color=[255,255,255]): #color histgram hist= cv2.calcHist([img], [0], None, [256], [0.0,255.0]) #print(hist) minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(hist) histImg = np.zeros([256,256,3], np.uint8) #[256,256,3] [256,256] hpt = int(0.9* 256) for h in range(256): intensity = int(hist[h]*hpt/maxVal) #print(h,intensity) cv2.line(histImg,(h,256), (h,256-intensity), color) return histImg
def searchBox(whole_image, list_templates): method = cv2.TM_CCORR_NORMED for i in range(len(list_templates)): result = cv2.matchTemplate(whole_image, list_templates[i], method) _, _, _, mnLoc = cv2.minMaxLoc(result) MPx, MPy = mnLoc trows, tcols = list_templates[i].shape[:2] cv2.rectangle(whole_image, (MPx, MPy), (MPx + tcols, MPy + trows), 1.0, 10) return whole_image
def getBoneKeypoints(self, img_cv2): """COCO身体关键点检测 :param 图像路径 :return 关键点坐标集合 """ #img_cv2 = cv2.imread(imgfile) img_height, img_width, _ = img_cv2.shape # 读取图像并生成输入blob inpBlob = cv2.dnn.blobFromImage(img_cv2, 1.0 / 255, (self.inWidth, self.inHeight), (0, 0, 0), swapRB=False, crop=False) # 向前通过网络 self.pose_net.setInput(inpBlob) self.pose_net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) self.pose_net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL) output = self.pose_net.forward() H = output.shape[2] W = output.shape[3] #print("形状:") #print(output.shape) # vis heatmaps #self.vis_bone_heatmaps(img_cv2, output) # points = [] for idx in range(self.bone_num_points): # 把输出的大小调整到与输入一样 probMap = output[0, idx, :, :] # confidence map. # 提取关键点区域的局部最大值 minVal, prob, minLoc, point = cv2.minMaxLoc(probMap) # Scale the point to fit on the original image x = (img_width * point[0]) / W y = (img_height * point[1]) / H if prob > self.threshold: points.append((int(x), int(y))) else: points.append(None) # print(points) return points
def get_coordinate(path, img): fz = 0.1 template = cv2.imread(path) h, w, t = template.shape img = np.array(img) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) res = cv2.matchTemplate(img, template, cv2.TM_SQDIFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if min_val < fz: top_left = min_loc center = (top_left[0] + int(w / 2), top_left[1] + int(h / 2)) return center else: return False
def find_upgrade_icon(self, tmpl): list_rect = self.config["建设菜单"]["政策中心"]["弹窗"]["滚动区域"]["矩形"] match_th = self.config["建设菜单"]["政策中心"]["弹窗"]["匹配门限"] img = self.win.screenshot(list_rect) canvas = self.green_enhance(img) result = cv2.matchTemplate(canvas, tmpl, cv2.TM_SQDIFF_NORMED) min_val, _, min_loc, _ = cv2.minMaxLoc(result) if min_val > match_th: return None else: #找到一个升级位置点,计算坐标返回 r = self.win.win_cfg_to_rect(list_rect) p = pyrect.Point(r.left+min_loc[0]+tmpl.shape[1],\ r.top+min_loc[1]+tmpl.shape[0]) return p
def get_contour_ssim(image_fake, image_real): gray_real = cv2.cvtColor(image_real, cv2.COLOR_BGR2GRAY) gray_fake = cv2.cvtColor(image_fake, cv2.COLOR_BGR2GRAY) (score, diff_image) = structural_similarity(gray_real, gray_fake, full=True) diff_image = (diff_image * 255).astype("uint8") print("SSIM: {}".format(score)) # https://www.pyimagesearch.com/2014/09/29/finding-brightest-spot-image-using-python-opencv/ diff_image_blur = cv2.GaussianBlur(diff_image, (19, 19), 0) (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(diff_image_blur) x, y = minLoc return x, y
def find_goods_dest(self, gray_list1, gray_list2): cmp_th = self.config["建设菜单"]["火车"]["绿光灰度比值限"] #矩阵两两相除 cmp_mat = np.array(gray_list2) / np.array(gray_list1) min_val, max_val, _, max_loc = cv2.minMaxLoc(cmp_mat) rect = self.target.get_abs_rect(max_loc[1], max_loc[0]) mat_std = cmp_mat.std() mat_var = cmp_mat.var() cmp_th = cmp_th if cmp_th < 1 + mat_std * 2 else 1 + mat_std * 2 if max_val > cmp_th: logging.debug(f"绿光检测成功:最大值 {max_val},最小值 {min_val}, 门限 {cmp_th}") logging.debug(f"std={mat_std},var={mat_var}") return pyrect.Point(rect.centerx, rect.centery) else: logging.error(f"绿光检测失败:最大值 {max_val},最小值 {min_val}, 门限 {cmp_th}") logging.debug(f"std={mat_std},var={mat_var}") return None
def detect_box(self, img): template = cv2.imread( "/home/martin/catkin_ws/src/ivr_assignment/template-box.png", 0) #Loads the template thresh = cv2.inRange(img, (0, 50, 100), (12, 75, 150)) #Marks all the orange areas out if (sum(sum(thresh)) == 0): #If it is obscured return None #Return none matching = cv2.matchTemplate( thresh, template, 1 ) #Performs matching between the thresholded data and the template min_val, max_val, min_loc, max_loc = cv2.minMaxLoc( matching) #Gets the results of the matching width, height = template.shape[:: -1] #Details of the template to generate the centre return np.array([min_loc[0] + width / 2, min_loc[1] + height / 2 ]) #Returns the centre of the target
def find(src_img, template_path): template = cv.imread(template_path) a, w, h = template.shape[::-1] res = cv.matchTemplate(src_img, template, cv.TM_CCOEFF_NORMED) threshold = 0.9 loc = np.where(res >= threshold) founded = len(loc[0]) > 0 if not founded: return 0, 0, founded min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res) top_left = max_loc return top_left[0] + (w // 2), top_left[1] + (h // 2), founded
def template_demo(): tpl = cv.imread("C:/Users/32936/Desktop/2/eye.png") target = cv.imread("C:/Users/32936/Desktop/2/lena.png") cv.imshow("tpl",tpl) cv.imshow("target",target) methods = [cv.TM_SQDIFF_NORMED,cv.TM_CCORR_NORMED,cv.TM_CCOEFF_NORMED]#方差,相关性,相关性因子 th,tw = tpl.shape[:2] for md in methods: print(md) result = cv.matchTemplate(target,tpl,md) min_val,max_val,min_loc,max_loc = cv.minMaxLoc(result) if md == cv.TM_SQDIFF_NORMED:#方差越小越好 tl = min_loc else:#相关性越大越好 tl = max_loc br = (tl[0]+tw,tl[1]+th) cv.rectangle(target,tl,br,(0,0,255),2)#tl是位置,br是终止位置 #cv.imshow("match"+np.str(md),target) cv.imshow("result"+np.str(md),result)
def findPic(self, url, threshold=0.9, size=(0, 0, 0, 0), img=None, template=None): if np.all(img == None): img = self.grab(size) if size != (0, 0, 0, 0) else self.grab() if np.all(template == None): template = cv2.imread(url, 0) else: url = "template" w, h = template.shape[::-1] res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) cv2.waitKey(0) if max_val >= threshold: return (max_loc[0] + w // 2, max_loc[1] + h // 2) else: return (-1, -1)