def bloom_d(img, debug=False): print('红水线尺寸:', img.shape) img_bgr = img.copy() # 用于绘制 mask_red, redhsv = separate_color(img_bgr, color='red') if debug: cv2.namedWindow('redhsv', cv2.WINDOW_NORMAL) cv2.imshow('redhsv', redhsv) mask_yz, yingzhang = separate_color(img_bgr, color='yingzhang') mask_yz = cv2.bitwise_not(mask_yz) if debug: cv2.imshow('yingzhang', yingzhang) # redline = cv2.bitwise_and(redhsv, redhsv, mask=mask_yz) # ================================================== redline = redhsv.copy() gray = cv2.cvtColor(redline, cv2.COLOR_BGR2GRAY) # 打算用灰度图去判断深色的晕染线。。 # ================================================== _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 1*W*10 <= x <= 2*W*10 正常红水线 cnt = 0 h, w = binary.shape for i in range(h): for j in range(w): if binary[i][j] > 10: cnt += 1 print('range should be in ', w * 10, 2 * w * 10) print('红水线比例:', cnt, '/', h * w)
def __init__(self, checkdir: check.CheckDir, bbox=None): """ :param upuv_img: 紫外反射图像, 检测黑色涂改墨团 :param upirtr_img: 红外透视图像, 检测刮擦 :param upg_img: 可见光图像, 得到文字掩膜 :param bbox: 团花坐标,若None,使用先验坐标 """ upg_img = checkdir.upg upir_img = checkdir.upir upirtr_img = checkdir.upirtr upuv_img = checkdir.upuv check_h, check_w, check_c = upuv_img.shape # 预定义位置 self.xmin = check_w - TH_W - TH_XOFF_RIGHT self.ymin = TH_YOFF_TOP self.xmax = self.xmin + TH_W self.ymax = self.ymin + TH_H if bbox is not None: assert len(bbox) == 4 self.xmin, self.ymin, self.xmax, self.ymax = bbox assert check_w >= self.xmax > self.xmin >= 0 and check_h > self.ymax > self.ymin >= 0 self.upg_box = upg_img[self.ymin:self.ymax, self.xmin:self.xmax] self.upir_box = upir_img[self.ymin:self.ymax, self.xmin:self.xmax] self.upirtr_box = upirtr_img[self.ymin:self.ymax, self.xmin:self.xmax] self.upuv_box = upuv_img[self.ymin:self.ymax, self.xmin:self.xmax] # 在紫外下要忽略文字掩膜区域 self.text_mask, _ = separate_color(self.upg_box, color="black") kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 3)) self.text_mask = cv2.dilate(self.text_mask, kernel, iterations=1) # 团花的分类 self.th_type = self._check_tuanhua_type(self.upuv_box) print('curr tuanhua type:', self.th_type) self.tuanhua_bin = None if self.th_type == ThType.TH_BRIGHT: self.tuanhua_bin, _ = separate_color(self.upuv_box, color='th_bright') else: self.tuanhua_bin, _ = separate_color(self.upuv_box, color='th_normal') if self.pixel_sum < 18000: self.tuanhua_bin, _ = separate_color(self.upuv_box, color='th_all')
def bloom_debug(): # 变造红水线 # check_dir = r'E:\DataSet\redline\UpG_redline' # debug_dir = r'E:\DataSet\redline\debug_bloom_th5' # 正常红水线 check_dir = r'E:\DataSet\redline_ok\redline_normal' debug_dir = r'E:\DataSet\redline_ok\debug_1' filename_list = os.listdir(check_dir) for imgname in filename_list: print('process:', imgname) imagePath = os.path.join(check_dir, imgname) img = cv_imread(imagePath) mask, res = separate_color(img, color='red') pair = detect_bloom(res) h, w = img.shape[:2] for x1, x2 in pair: if x2 - x1 > 10: cv2.rectangle(img, (x1, 2), (x2, h - 2), (0, 0, 255), 1) # red else: cv2.rectangle(img, (x1, 2), (x2, h - 2), (255, 0, 0), 1) # 窄 cv2.imwrite(os.path.join(debug_dir, imgname), img)
def __init__(self, upg_img, upir_img, upirtr_img, upuv_img, bbox=None): """ :param upuv_img: 紫外反射图像, 检测黑色涂改墨团 :param upirtr_img: 红外透视图像, 检测刮擦 :param upg_img: 可见光图像, 得到文字掩膜 :param bbox: 团花坐标,若None,使用先验坐标 """ check_h, check_w, check_c = upuv_img.shape xmin = check_w - TH_W - TH_XOFF_RIGHT ymin = TH_YOFF_TOP xmax = xmin + TH_W ymax = ymin + TH_H if bbox != None: assert len(bbox) == 4 xmin,ymin,xmax,ymax = bbox assert check_w >= xmax > xmin >= 0 and check_h > ymax > ymin >= 0 self.upg_box = upg_img[ymin:ymax, xmin:xmax] self.upir_box = upir_img[ymin:ymax, xmin:xmax] self.upirtr_box = upirtr_img[ymin:ymax, xmin:xmax] self.upuv_box = upuv_img[ymin:ymax, xmin:xmax] self.text_mask, _ = separate_color(self.upg_box, color="black") kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) self.text_mask = cv2.dilate(self.text_mask, kernel, iterations=1)
def detect_whiteink(img, debug=False): mask, res = separate_color(img, color='white') element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # 目标: 把零散的小白点腐蚀掉, 白墨团区域全搞成白色(非0区域) # 问题:白墨团中可能带有汉字, 也可能把墨团腐蚀掉了,区域变小 # 高斯滤波,会使小白点区域变大 # mask = cv2.GaussianBlur(mask, (5,5), 1) # 中值滤波一次, 带文字的白墨团也会被腐蚀 # mask = cv2.medianBlur(mask, 5) # 均值滤波然后二值化,是否可行,小白点区域的均值肯定小,墨团区域及时有黑色文字部分也肯定大 # 然后再膨胀一下 mask = cv2.blur(mask, (7, 7)) thre = (3 * 3 * 255) // (7 * 7) _, mask = cv2.threshold(mask, thre, 255, cv2.THRESH_BINARY) # 有少量的小白点,腐蚀一次,但是也会把那种白莫团里面有字的腐蚀掉 # erosion = cv2.erode(mask, element1, iterations=1) # 模糊 # erosion = cv2.GaussianBlur(erosion, (5, 5), 4) # 膨胀一下,轮廓明显 dilation = cv2.dilate(mask, element2, iterations=1) _, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print('find white ink conrours:', len(contours)) xs, ys, ws, hs = [], [], [], [] for i in range(len(contours)): cnt = contours[i] # 计算该轮廓的面积,过滤太小的 area = cv2.contourArea(cnt) if area < 16: continue x, y, w, h = cv2.boundingRect(cnt) xs.append(x) ys.append(y) ws.append(w) hs.append(h) cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 1) if debug: cv2.imshow('mask_white', np.vstack((mask, dilation))) # 白墨团,基本能提取出HSV # plt.figure('process') # plt.subplot(3,1,1) # plt.imshow(mask,cmap ='gray') # plt.subplot(3,1,2) # plt.imshow(erosion,cmap ='gray') # plt.subplot(3,1,3) # plt.imshow(dilation,cmap ='gray') # plt.show() cv2.imshow('white_conrours', img) return xs, ys, ws, hs
def __init__(self, bank_upuv): self.icon_bgr = bank_upuv.copy() # 直接二值化导致纤维丝也会被当作行徽 # self.icon_gray = cv2.cvtColor(self.icon_bgr, cv2.COLOR_BGR2GRAY) # _, self.icon_bin = cv2.threshold(self.icon_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 提取黄色、橙色等 self.det_hsv_mask, self.det_hsv = separate_color(bank_upuv, color='bank_yellow') self.icon_gray = cv2.cvtColor(self.det_hsv, cv2.COLOR_BGR2GRAY) # 需要灰度图直方图均衡化 _, self.icon_bin = cv2.threshold(self.icon_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) self.type = None self.central_img = None
def _check_tuanhua_type(self, tuanhua_img): """ :param tuanhua_img: 紫外下团花裁剪图像 :return: 团花类型 类型 pix_sum v_mean 淡化: 9130 180 正常:20000+ 205 偏黄:22000,219 发亮:24000,220 """ tuanhua_hsv = cv2.cvtColor(tuanhua_img, cv2.COLOR_BGR2HSV) # 首先以较大阈值提取出团花图案 th_mask, th_bgr = separate_color(self.upuv_box, color='th_judge') # 根据【像素数+亮度】两个维度,判断团花类型 self.pixel_sum = np.sum(th_mask > 10) th_v = tuanhua_hsv[:, :, 2] # (H*W*1) v_sum = 0 h, w = th_mask.shape[:2] for y in range(0, h): for x in range(0, w): if th_mask[y][x] > 0: v_sum += th_v[y][x] self.v_mean = v_sum // self.pixel_sum print('curr tuanhua pixel sum:', self.pixel_sum, end=' ') print('curr tuanhua V mean:', self.v_mean) # # # cv2.imshow('hsv msk', th_mask) # cv2.imshow('TH', tuanhua_img) # cv2.waitKey(0) if self.pixel_sum < 11000 and self.v_mean < 205: return ThType.TH_LIGHT if self.v_mean >= 220 and self.pixel_sum >= 24000: return ThType.TH_BRIGHT return ThType.TH_NORMAL
def detect_blackink(img, debug=False): # (UpG_161314,UpG_161319) mask, res = separate_color(img, color='black') cimg = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) element1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) ele = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # 1.第一次去做小腐蚀吗? 目的太粗的字体搞细一点, # 产生噪点 和 一些粗体留下来的小团 temp = cv2.erode(mask, ele, iterations=1) cv2.imshow('first ero', temp) # 2.去除小的噪点,还剩小团 blur = cv2.medianBlur(temp, 7) # blur = cv2.medianBlur(blur, 5) # 3.腐蚀小团,参数选取3*3的, 再中值滤波可去除小团, # 如果黑墨团也小的话, 无能为力..那就两次腐蚀? erosion = cv2.erode(blur, element1, iterations=1) erosion = cv2.medianBlur(erosion, 5) # 防止墨团里面有黑点,再去腐蚀 # 3.1 两次腐蚀blur操作,,腐蚀没了 erosion = cv2.erode(erosion, element1, iterations=1) erosion = cv2.medianBlur(erosion, 5) # 3.1 感觉还可以在这里提出小的团,根据轮廓大小,做一个mask????? # 4.再次膨胀,去找轮廓 dilation = cv2.dilate(erosion, element1, iterations=2) cv2.imshow('mask', mask) cv2.imshow('blur', blur) cv2.imshow('fushi', erosion) cv2.imshow('pengzhang', dilation)
def detect_bloom2(img, debug=False): img_bgr = img.copy() # 用于绘制 red_much = False mask_red, redhsv = separate_color(img_bgr, color='red') mask_yz, yingzhang = separate_color(img_bgr, color='yingzhang') mask_yz = cv2.bitwise_not(mask_yz) redline = cv2.bitwise_and(redhsv, redhsv, mask=mask_yz) # redline = redhsv # ================================================== gray = cv2.cvtColor(redline, cv2.COLOR_BGR2GRAY) # 打算用灰度图去判断深色的晕染线。。 # ================================================== _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 判断红水线像素数,根据数量大致判断整体的颜色程度 cnt = 0 h, w = binary.shape for i in range(h): for j in range(w): if binary[i][j] > 10: cnt += 1 print('range should be in ', w * 10, 2 * w * 10) print('红水线比例:', cnt, '/', h * w) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 3)) ero1 = cv2.erode(binary, tempkernel, iterations=1) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1)) ero2 = cv2.erode(ero1, tempkernel, iterations=1) if cnt < 2 * w * 10: ero2 = ero1 red_much = False else: red_much = True # ero2 图可以作为初步判断依据,晕染外的区域基本只剩噪点了 # 但是对于正常红水线来说,颜色较深的也可能被误检,mmp # ====================================11111===================================== blur_avg = cv2.blur(ero2, (11, 11)) thre = (4 * 4 * 255) // (11 * 11) _, blur_bin = cv2.threshold(blur_avg, thre - 10, 255, cv2.THRESH_BINARY) # _, blur_bin = cv2.threshold(blur_avg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 只寻找最外轮廓 _, contours, hierarchy = cv2.findContours(blur_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) CONTOURS_1 = contours # 保留一下,以防用到 RECTS_1 = [] AREA_1 = [] for i in range(len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) area = cv2.contourArea(cnt) RECTS_1.append([x, y, x + w, y + h]) AREA_1.append(area) if debug: print('find contours from avg blur:', len(RECTS_1)) # N1个检测框 N1 = len(RECTS_1) uf = UnionFind(N1) for i in range(0, N1 - 1): for j in range(i + 1, N1): xmin1, ymin1, xmax1, ymax1 = RECTS_1[i] xmin2, ymin2, xmax2, ymax2 = RECTS_1[j] l_x = max(xmin1, xmin2) r_x = min(xmax1, xmax2) t_y = max(ymin1, ymin2) b_y = min(ymax1, ymax2) # 阈值, 作为合并的依据。 if (r_x - l_x >= 0) and (b_y - t_y >= 0): uf.union(i, j) elif b_y - t_y > 0 and abs(r_x - l_x) <= 15: uf.union(i, j) elif r_x - l_x > 0 and abs(b_y - t_y) <= 10: uf.union(i, j) elif (r_x - l_x < 0) and (b_y - t_y < 0): if abs(r_x - l_x) <= 15 and abs(b_y - t_y) <= 10: uf.union(i, j) # 合并idx结束 # 处理合并的idx rect_merge = [] area_merge = [] D = {} for idx in range(0, N1): p = uf.find(idx) if p in D.keys(): D[p].append(idx) else: D[p] = [] D[p].append(p) if debug: print('union find:', D) for k, rects in D.items(): if (len(rects) == 1): rect_merge.append(RECTS_1[rects[0]]) xmin, ymin, xmax, ymax = RECTS_1[rects[0]] area_merge.append((xmax - xmin) * (ymax - ymin)) else: xmin = min([RECTS_1[r][0] for r in rects]) ymin = min([RECTS_1[r][1] for r in rects]) xmax = max([RECTS_1[r][2] for r in rects]) ymax = max([RECTS_1[r][3] for r in rects]) rect_merge.append([xmin, ymin, xmax, ymax]) area_merge.append((xmax - xmin) * (ymax - ymin)) if debug: print('avgblur合并后的框:', rect_merge) print('avgblue合并后面积:', area_merge) # 得到初步结果rect_merge,但是会误检正常的粗的红水线,而且很多,nmsl,mmp # 所以这一步得到的只能是候选结果 # 根据面积筛选出可能为红水线的候选框:RES_1 # ================================================================ RES_1 = [] # 可能为红水线的候选框,正常的图像上误检较多 # ================================================================ for r in rect_merge: xmin, ymin, xmax, ymax = r if xmax - xmin < 15 or ymax - ymin < 15: # 面积小的绘制为蓝色 cv2.rectangle(img_bgr, (xmin, ymin), (xmax, ymax), (255, 200, 0), 1) else: # 大的绘制为红色 并加入到maybe候选框 cv2.rectangle(img_bgr, (xmin, ymin), (xmax, ymax), (0, 0, 255), 1) RES_1.append(r) if debug: print('根据面积(15,15)过滤后的候选框RES_1:', RES_1) print('========================================') if red_much is False: if debug: cv2.imshow('red little', img_bgr) cv2.waitKey(0) return rect_merge """ 如果红水线颜色深,则继续判断 否则直接返回第一步的结果rect_merge """ # 再次进行腐蚀(竖着的),膨胀运算,留下团状晕染的区域 # 如果有轮廓,那么必定是晕染 # 2. 再次去做腐蚀ero2的操作,这次得到的大轮廓一定是晕染 tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3)) ero3 = cv2.erode(ero2, tempkernel, iterations=1) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) res_2 = cv2.dilate(ero3, tempkernel, iterations=1) # res 图求轮廓,可以求得一般的晕染(就一团红色,但对于色深线的还是不行ero2应该行) # 但是对于正常红水线来说,颜色较深的也可能被误检 # ======================================22222=================================== _, contours, hierarchy = cv2.findContours(res_2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) RECTS_2 = [] RECTS_2_TINY = [] # 过滤掉的矩形 for i in range(len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) area = cv2.contourArea(cnt) if w < 15 or h < 15 or area < 200: # 这里又是阈值 if w < 3 or h < 3 or area < 9: # 太小的忽略 pass else: RECTS_2_TINY.append([x, y, x + w, y + h]) continue RECTS_2.append([x, y, x + w, y + h]) cv2.rectangle(img_bgr, (x, y), (x + w, y + h), (0, 255, 0), 1) if debug: print('再次腐蚀后,一定是晕染的区域RECTS_2:', RECTS_2) print('太小过滤掉的框RECTS_2_TINY::', len(RECTS_2_TINY), RECTS_2_TINY) def in_rect(cx, cy, rect): assert len(rect) == 4 xmin, ymin, xmax, ymax = rect return xmin <= cx <= xmax and ymin <= cy <= ymax # 一般来说,RECT_2是 RES_1的子集 # 那么RECT_2 由子集中的某些构成,如果子集组成的面积与候选RECT_2相当,认为是晕染 RES_1_FLAG = [0 for _ in range(0, len(RES_1))] RES_1_PART = [[] for _ in range(0, len(RES_1))] RES_1_PART_AREA = [0 for _ in range(0, len(RES_1))] for r2 in RECTS_2: x1, y1, x2, y2 = r2 rect2_area = (x2 - x1) * (y2 - y1) for i in range(0, len(RES_1)): cx = (x1 + x2) // 2 cy = (y1 + y2) // 2 if in_rect(cx, cy, RES_1[i]): RES_1_FLAG[i] += 1 RES_1_PART[i].append(r2) # 由于子集关系,可以认为由r2组成 RES_1_PART_AREA[i] += rect2_area # 子集总面积 # ============================================================== RES_2 = [] # 根据规则保留RES_1中的结果,有的可能框大了,由于合并操作 # ============================================================== for i in range(0, len(RES_1)): x1, y1, x2, y2 = RES_1[i] r1_area = (x2 - x1) * (y2 - y1) # 这又是个阈值啊啊啊啊 if RES_1_FLAG[i] != 0 and (RES_1_PART_AREA[i] / r1_area > 0.15): RES_2.append(RES_1[i]) RES_1_FLAG[i] = -1 # 已经确认保留的,后续无需再验证, 置为-1 x1, y1, x2, y2 = RES_1[i] # cv2.rectangle(img_bgr, (x1, y1), (x2, y2), (0, 255, 0), 2) if debug: print('根据腐蚀图依然有大轮廓,RES_1中可以认为是晕染的区域RES_2:', RES_2) for i in range(0, len(RES_1)): if RES_1_FLAG[i] == -1: continue for r2_tiny in RECTS_2_TINY: x1, y1, x2, y2 = r2_tiny r2_tiny_area = (x2 - x1) * (y2 - y1) cx = (x1 + x2) // 2 cy = (y1 + y2) // 2 if in_rect(cx, cy, RES_1[i]): RES_1_FLAG[i] += 1 # 个数 += 1 RES_1_PART_AREA[i] += r2_tiny_area # 子集总面积 += # 可以利用密度, 个数/候选框总面积 或者 子集总面积/总面积 RES_3 = [] for i in range(0, len(RES_1)): if RES_1_FLAG == -1: continue x1, y1, x2, y2 = RES_1[i] r1_area = (x2 - x1) * (y2 - y1) if debug: print('长宽比:', (x2 - x1) / (y2 - y1)) if (x2 - x1) / (y2 - y1) > 3.0: continue if debug: print('第', i, '个, 面积所占比例:', RES_1_PART_AREA[i] / r1_area) # print('第', i, '个, 个数所占比例:', RES_1_FLAG[i] / (x2-x1)) if RES_1_PART_AREA[i] / r1_area > 0.05 and RES_1_FLAG[i] > 3: cv2.rectangle(img_bgr, (x1, y1), (x2, y2), (0, 0, 0), 2) RES_3.append(RES_1[i]) if debug: print('根据密度可以认为是晕染的区域RES_3:', RES_3) if debug: cv2.imshow('redline', redline) cv2.imshow('separate', np.vstack((gray, binary))) cv2.imshow('process', np.vstack((ero1, ero2, blur_avg, blur_bin, ero3, res_2))) cv2.imshow('res', img_bgr) # plt.hist(gray.ravel(), 255) # plt.show() cv2.waitKey(0) # return RES_2 # return np.vstack((cv2.cvtColor(blur_bin,cv2.COLOR_GRAY2BGR), # cv2.cvtColor(res_2, cv2.COLOR_GRAY2BGR), # img_bgr)) # ero3 print("返回晕染区域结果:", RECTS_2 + RES_3) return RECTS_2 + RES_3 # (xmin, ymin, xmax, ymax)
def detect_scratch(img, debug=False): mask, res = separate_color(img, color='gratch') # 1.1中值滤波对消除椒盐噪声非常有效,能够克服线性滤波器带来的图像细节模糊等弊端, blur = cv2.medianBlur(mask, 3) # 核尺寸越大,过滤越多的噪点 # 1.2高斯滤波,对于提取的变造图像,效果没中值滤波好,弃用 # img = cv2.GaussianBlur(mask,(5,5),2) # 2.1 开运算,再次过滤噪点, 可获得刮擦主要区域, 然后可以在去blur连接周围噪点 rectKernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) opening = cv2.morphologyEx(blur, cv2.MORPH_OPEN, rectKernel, iterations=1) # 开运算,腐蚀,膨胀 # 2.2 先不过滤,直接使用滤波后的图像找轮廓,然后处理联通域 # opening = blur cimg = img # 用于绘制的图像 # 3.可以传递 RETR_EXTERNAL 只寻找最外层轮廓, 全部:RETR_TREE _, contours, hierarchy = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # print(hierarchy.shape) # (1, count, 4) [Next, Previous, First Child, Parent] # print(contours[0].shape) # (3, 1 , 2) 而不是(3,2), 3是点的个数 # print(hierarchy) print('find gratch conrours:', len(contours)) # for idx in range(0, len(contours)): # color = (0,255,0) # pts = cv2.approxPolyDP(contours[idx],cv2.arcLength(contours[idx],True)*0.005, True) # cimg = cv2.polylines(cimg,pts,True,color) # # print(pts.shape) # (n, 1, 2) # cv2.circle(cimg, (pts[0][0][0],pts[0][0][1]), 3, color) # cv2.circle(cimg, (pts[1][0][0],pts[1][0][1]), 2, color) # for i in range(2, len(pts)): # cv2.circle(cimg, (pts[i][0][0], pts[i][0][1]), 1, color) # if idx == 0: # maxrect = cv2.boundingRect(contours[idx]) # else: # rect = cv2.boundingRect(contours[idx]) # # maxrect = cv2.max(rect, maxrect) # cvMaxRect呢 # # x, y, w, h = maxrect # cv2.rectangle(cimg, (x, y), (x + w, y + h), (0, 0, 255), 1) rects = [] havemerge = [] for i in range(len(contours)): cnt = contours[i] # 计算该轮廓的面积,过滤太小的 area = cv2.contourArea(cnt) rects.append(cv2.boundingRect(cnt)) for i in range(len(rects)): if i in havemerge: continue xs, ys, ws, hs = [], [], [], [] # 4.合并联通域, 基于两个轮廓的距离, 点到轮廓的距离 for i in range(len(contours)): cnt = contours[i] # 计算该轮廓的面积,过滤太小的 area = cv2.contourArea(cnt) x, y, w, h = cv2.boundingRect(cnt) xs.append(x) ys.append(y) ws.append(w) hs.append(h) cv2.rectangle(cimg, (x, y), (x + w, y + h), (0, 0, 255), 1) if debug: cv2.imshow('mask', mask) cv2.imshow('blur', blur) cv2.imshow('opening', opening) cv2.imshow('draw_mask', cimg)
def detect_bloom(img_bgr, debug=False): mask, img = separate_color(img_bgr, color='red') # 1. 红水线提取部分(背景为全部为黑色0) 转化成灰度图 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) th, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) h, w = img.shape[:2] # 2.1 拉伸前做腐蚀操作,将小点腐蚀掉 tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) ero = cv2.erode(gray, tempkernel, iterations=1) # ero = cv2.dilate(ero, tempkernel, iterations=1) if debug: cv2.imshow('gray_pre', np.vstack((gray, ero))) # 2.2 膨胀横向补偿每一根红水线。弃用,基本全膨胀完了,由于高度太小的原因 # temp = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 1)) # gray = cv2.dilate(gray, temp, iterations=1) # cv2.imshow('gray_dilate', gray) # 3.为了方便后续形态学处理,将高度拉伸 gray = cv2.resize(ero, (w, h * 3)) # 3. 形态学变换的预处理,突出晕染变造区域 dilation = preprocess_redline(gray) # 4. 红水线,垂直投影统计白色点个数,找异常 v_projection = np.sum(dilation // 255, axis=0) if debug: plt.plot(v_projection, 'r-') plt.show() # 5. 阈值 thed = h * 3 // 3 * 2 thed = h * 3 // 2 # resize 的阈值,高度的一半 # thed = h//2 - 10 # 高度的三分之二 还是一半好呢 print('thred:', thed) # 6.1 寻找晕染边界 col_bounnd_list = [] # 寻找边界i, 以i附近的均值来看,弃用, 直方图不是连续的 # 搜索所有大于阈值的idx,搜索边界情况有点多,由于不是单调增或减 for i, x in enumerate(v_projection[:-10]): mean_1 = np.mean(v_projection[i:i + 10], dtype=int) if mean_1 >= thed: col_bounnd_list.append(i + 5) # col_bounnd_list.append(w - 1) # 防止i为奇数,最后也有变造 # print(col_bounnd_list) # 6.2 合并接近的区域 pairs = [] idx = 0 while idx < len(col_bounnd_list): left = idx while idx + 1 < len(col_bounnd_list) and col_bounnd_list[ idx + 1] - col_bounnd_list[idx] < 5: idx += 1 pairs.append((col_bounnd_list[left], col_bounnd_list[idx])) idx += 1 print(pairs) if debug: # h, w = img_bgr.shape[:2] for x1, x2 in pairs: if x2 - x1 > 10: cv2.rectangle(img_bgr, (x1, 2), (x2, h - 2), (0, 0, 255), 1) # red else: cv2.rectangle(img_bgr, (x1, 2), (x2, h - 2), (255, 0, 0), 1) # 窄 cv2.imshow('result', img_bgr) return pairs
def to_detect_scratch(checkdir:CheckDir, bbox): # 文字区域梯度肯定大,要排除这部分 xmin, ymin, xmax, ymax = bbox upg_bbox = checkdir.upg[ymin + 5:ymax, xmin:xmax] upirtr_bbox = checkdir.upirtr[ymin + 5:ymax, xmin:xmax] text_mask, _ = separate_color(upg_bbox, color="black") kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) text_mask = cv2.dilate(text_mask, kernel, iterations=2) # 红外反射下 upirtr_gray = cv2.cvtColor(upirtr_bbox, cv2.COLOR_BGR2GRAY) # minValue取值较小,边界点检测较多,maxValue越大,标准越高,检测点越少 v1 = cv2.Canny(upirtr_gray, 160, 310) # (80, 250) (150, 300) v1_not_text = cv2.bitwise_and(v1, cv2.bitwise_not(text_mask)) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) v1_not_text = cv2.dilate(v1_not_text, kernel, iterations=1) v1_not_text = cv2.erode(v1_not_text, kernel, iterations=1) v1_show = cv2.cvtColor(v1_not_text, cv2.COLOR_GRAY2BGR) # 亮度图 th_bright = 240 upirtr_gray[upirtr_gray <= th_bright] = 0 big245_show = cv2.cvtColor(upirtr_gray, cv2.COLOR_GRAY2BGR) _, contours, hierarchy = cv2.findContours(v1_not_text, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) suspect_rects = [] for i in range(len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) # 团花上竖直的线过滤 if w <= 5: continue if w*h > 64: suspect_rects.append([x, y, w, h]) cv2.rectangle(v1_show, (x, y), (x + w, y + h), (0, 0, 250)) scratch_bbox = [] for bbox in suspect_rects: x, y, w, h = bbox crop = upirtr_gray[y:y+h, x:x+w] s = np.sum(crop >= th_bright) # 30倍 经验值 if 15*s > w*h: cv2.rectangle(big245_show, (x, y), (x + w, y + h), (0, 0, 250)) scratch_bbox.append([x, y, x+w, y+h]) if len(scratch_bbox) > 0: cv2.imshow('tuanhua', np.hstack([upg_bbox, upirtr_bbox, v1_show, big245_show])) cv2.imshow('canny', np.hstack([text_mask, v1, v1_not_text, upirtr_gray])) cv2.waitKey(0) return scratch_bbox
def __init__(self, check_dir): self.debug = False self.th_w = 374 self.th_h = 145 self.off_x_by_right = 25 self.off_y_by_top = 10 TEMP_DIR = check_dir upuv = TEMP_DIR + '/' + 'Upuv.bmp' upg = TEMP_DIR + '/' + 'UpG.bmp' upir = TEMP_DIR + '/' + 'Upir.bmp' upirtr = TEMP_DIR + '/' + 'Upirtr.bmp' upuv_img = cv_imread(upuv) upg_img = cv_imread(upg) upir_img = cv_imread(upir) upirtr_img = cv_imread(upirtr) self.th_template = TuanHuaROI(upg_img,upir_img, upirtr_img, upuv_img) self.im = self.th_template.upuv_box.copy() # template region 团花左侧区域 中心74 self.tr_1 = TmpRegionInfo(5, 58 - 20, 30 + 30, 90 + 20, self.im) # 团花右侧区域 # self.tr_1 = TmpRegionInfo(325, 74-50, 325+50, 74+50, self.im) # 团花上部区域 y[6, 30]超过三十可能带有文字区域了 中心x187 # self.tr_1 = TmpRegionInfo(187 - 50, 6, 187 + 50, 30, self.im) cv2.imshow('tmp_1', self.tr_1.tmp_img) # 1)颜色提取 self.hsv_mask, self.hsv = separate_color(self.im, color='tuanhua_green') # 2)二值化 # _, self.hsv_mask = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # hsv -> gray -> denoise -> bin,转化为灰度图,然后降噪,在转变为二值图去做最后的匹配 # 这一步去除一些极小的噪点 hsv_gray = cv2.cvtColor(self.hsv, cv2.COLOR_BGR2GRAY) hsv_gray_denoise = cv2.fastNlMeansDenoising(hsv_gray, h=5.0) th, hsv_gray = cv2.threshold(hsv_gray_denoise, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 固定阈值效果不好 # _, hsv_gray = cv2.threshold(hsv_gray_denoise, 150, 255, cv2.THRESH_BINARY) # 局部二值化 效果不好 # hsv_gray = cv2.adaptiveThreshold(hsv_gray_denoise, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, C= 0) # print("去噪后OSTU二值化的阈值:", th) self.hsv_mask = hsv_gray.copy() gray = cv2.cvtColor(self.im, cv2.COLOR_BGR2GRAY) # _, gray = cv2.threshold(gray, thre, 255, cv2.THRESH_BINARY) _, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 只寻找最外轮廓 RETR_EXTERNAL RETR_CCOMP _, contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) H, W = gray.shape self.roi_mask = np.zeros((H, W), dtype=np.uint8) # 绘制ROI mask # cv2.drawContours(im, contours, -1, (0, 0, 255), 2, lineType=cv2.LINE_AA) count = 0 for cnt in contours: if cv2.contourArea(cnt) > H * W // 2: count += 1 cv2.drawContours(self.roi_mask, [cnt], 0, 255, -1, lineType=cv2.LINE_AA) # assert count == 1, 'Non-standard group flower image template' # 以右上角为原点,左x,下y,这个区域是文字区域 self.ignore_x_range = (110, 310) self.ignore_y_range = (80, 160) # 以团花为参考系,文字区域和收款行区域 # self.num_box = [90, 70, 290, 120] # xmin 90 100 self.num_box = [80, 70, 290, 120] self.recvbank_box = (0, 90) # (xmin, ymin) ymin 90 100 self.blank_thresh = 1 self.white_thresh = 3
def measure(self, th_dect:TuanHuaROI, bnd_box): """ :param dect_img: dectected bloom box img :param upirtr_img_path: upir check img :param bnd_box: [xmin, ymin, xmax, ymax] :return: confidence """ xmin, ymin, xmax, ymax = bnd_box upg_box = th_dect.upg_box upuv_box = th_dect.upuv_box upir_box = th_dect.upir_box upirtr_box = th_dect.upirtr_box # upg_black_mask, upg_black = separate_color(upg_box, color='black') # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # upg_black_mask = cv2.dilate(upg_black_mask, kernel, iterations=1) # 模板匹配左半部分 result = cv2.matchTemplate(upuv_box, self.tr_1.tmp_img, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) t_left = max_loc # (x,y) 访问 result[y][x] det_xmin, det_ymin = t_left print('模板在检测图上的位置:', t_left) # 验证模板匹配准确性 验证通过ok # cv2.rectangle(upuv_box, (det_xmin, det_ymin), (det_xmin + 25, det_ymin + 32), (0,0,0)) # cv2.imshow('pipei', upuv_box) # cv2.waitKey(0) H, W = self.im.shape[:2] canvas = np.zeros((H + 20, W + 20), dtype=np.uint8) # 扩大20像素 canvas[10: H+10, 10:W+10] = self.hsv_mask cv2.imshow('canvas', canvas) # 根据upuv_box V通道的平均值来判断,hsv阈值 # def v_mean(bgrimg): # hsvimg = det_hsv_mask, det_hsv = separate_color(upuv_box, color='tuanhua_green') tuanhua_hsv = cv2.cvtColor(upuv_box, cv2.COLOR_BGR2HSV) th_v = tuanhua_hsv[:, :, 2] V_mean = np.mean(th_v) if V_mean > 125: print('团花亮度: 太亮') det_hsv_mask, det_hsv = separate_color(upuv_box, color='tuanhua_green_v90') cv2.imshow('dect tuanhua box', upuv_box) cv2.imshow('detect tuanhua mask', det_hsv_mask) # 对检测图像也进行少许降噪 hsv_gray = cv2.cvtColor(det_hsv, cv2.COLOR_BGR2GRAY) hsv_gray_denoise = cv2.fastNlMeansDenoising(hsv_gray, h=10.0) _, hsv_gray = cv2.threshold(hsv_gray_denoise, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) det_hsv_mask = hsv_gray.copy() cv2.imshow('detect tuanhua mask 2', det_hsv_mask) cv2.waitKey(0) det_H, det_W = upuv_box.shape[:2] det_canvas = np.zeros((H + 20, W + 20), dtype=np.uint8) dx = det_xmin - self.tr_1.xmin dy = det_ymin - self.tr_1.ymin print('图像与模板偏移量:', dx, dy) det_canvas[10-dy:10-dy+det_H, 10-dx:10-dx+det_W] = det_hsv_mask cv2.imshow('canvas det', det_canvas) cv2.waitKey(0) # XOR结果 xor_mask = cv2.bitwise_xor(canvas, det_canvas) # A = np.zeros((H + 20, W + 20), dtype=np.uint8) # 扩大20像素 # A[10-dy:10-dy+det_H, 10-dx:10-dx+det_W] = upg_black_mask # A = cv2.bitwise_not(A) # xor_mask = cv2.bitwise_and(xor_mask, A) """ xor_mask 图像上做检测 """ text_mask_1 = cv2.bitwise_not(self.th_template.text_mask) text_mask_1 = paste_to_canvas(H+20, W+20, 10, 10, text_mask_1) text_mask_2 = cv2.bitwise_not(th_dect.text_mask) text_mask_2 = paste_to_canvas(H+20, W+20, 10-dy, 10-dx, text_mask_2) text_mask_all = cv2.bitwise_and(text_mask_1, text_mask_2) xor_mask_2 = cv2.bitwise_and(xor_mask, text_mask_all) # 去噪 # xor_mask_2 = cv2.fastNlMeansDenoising(xor_mask_2, h = 40.0) cv2.imshow('det xor template', np.vstack([xor_mask, xor_mask_2])) cv2.waitKey(0) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)) ero = cv2.erode(xor_mask_2, tempkernel, iterations=2) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)) ero = cv2.dilate(ero, tempkernel, iterations=2) ero_show = ero.copy() ero_show = cv2.cvtColor(ero_show, cv2.COLOR_GRAY2BGR) _, contours, hierarchy = cv2.findContours(ero, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) area = cv2.contourArea(cnt) if area > 10: # cv2.drawContours(ero_show, [cnt], 0, (0,0,244)) cv2.rectangle(ero_show, (x,y), (x+w, y+h), (0,0,200)) cv2.imshow('dila', ero_show) cv2.waitKey(0) if self.debug: print('detected tuanhua:') cv2.imshow('tuanhua uv', upuv_box) cv2.waitKey(0) # 利用红外反射图像上检测是否有涂改(黑),刮擦(白) blank_list = self.dect_blank_areas(upir_box) blank_list.sort(reverse=True) temp = sum(blank_list) if temp > 150: return 0.11 if temp > 100: return 0.21 if temp > 50: return 0.31 white_list = self.dect_gratch_by_lowbound(upirtr_box) white_list.sort(reverse=True) temp = sum(white_list) if temp > 100: return 0.1 if temp > 50: return 0.2 if temp > 30: return 0.3 if len(blank_list) > 2 or len(white_list) > 5: return 0.85 return 1.0
def detect_darkline(img, debug=False): mask_red, redhsv = separate_color(img, color='red') h, w, _ = img.shape for i in range(0, h): for j in range(0, w): a, b, c = redhsv[i][j] if a == 0 and b == 0 and c == 0: redhsv[i][j] = [255, 255, 255] # cv2.imshow('ori', img) # cv2.imshow('tiqutu', redhsv) # cv2.imshow('baidi', redhsv) if debug: cv2.imshow('tiqu', np.vstack((img, redhsv))) res_gray = cv2.cvtColor(redhsv, cv2.COLOR_BGR2GRAY) print(res_gray.shape) vals = [] for i in range(0, h): for j in range(0, w): if res_gray[i][j] != 255: vals.append(res_gray[i][j]) vals = np.array(vals) # plt.hist(vals,255) # plt.show() line_gray_mean = int(np.mean(vals)) print('红水线灰度化均值:', line_gray_mean) # cv2.namedWindow('image', cv2.WINDOW_KEEPRATIO) # cv2.createTrackbar('G', 'image', 88, 255, nothing) # cv2.createTrackbar('TH', 'image', 28, 40, nothing) # G = cv2.getTrackbarPos('G', 'image') # TH = cv2.getTrackbarPos('TH', 'image') # G 与 红水线均值有关, 均值颜色越深,我们的阈值也应该越深 # G = 90 # G = int(line_gray_mean)*2 - 110 cha = -line_gray_mean + 110 G = line_gray_mean - (10 if cha < 10 else cha) print('阈值为:', G) _, res = cv2.threshold(res_gray, G, 255, cv2.THRESH_BINARY) ser = cv2.bitwise_not(res) # 以(30,2)长度的滑动窗口,滑动,如果有校像素超过3/4 28? ,认为是深色红水线 # 中值滤波呢,不太行 # midblur = cv2.medianBlur(ser, 3) valid = [] blank = np.zeros((h, w), dtype=np.uint8) # 黑板 kh = 2 kw = 20 # TH 经验值 for i in range(0, h - kh): for j in range(0, w - kw): temp = ser[i:i + kh, j:j + kw] white = np.sum(temp == 255) if white >= TH: blank[i:i + kh, j:j + kw] = 255 if debug: cv2.imshow('gray th' + str(G), np.vstack((res_gray, res, ser))) _, contours, hierarchy = cv2.findContours(blank, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) print('find conrours:', len(contours)) cu_lines = [] for i in range(len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) # area = cv2.contourArea(cnt) cu_lines.append([x, y, x + w, y + h]) print('find lines:', len(cu_lines)) n_lines = len(cu_lines) uf = UnionFind(n_lines) for i in range(0, n_lines - 1): for j in range(i + 1, n_lines): xmin1, ymin1, xmax1, ymax1 = cu_lines[i] xmin2, ymin2, xmax2, ymax2 = cu_lines[j] w1, h1 = xmax1 - xmin1, ymax1 - ymin1 w2, h2 = xmax2 - xmin2, ymax2 - ymin1 cx1, cy1 = xmin1 + w1 // 2, ymin1 + h1 // 2 cx2, cy2 = xmin2 + w2 // 2, ymin2 + h2 // 2 x_low = max(xmin1, xmin2) x_high = min(xmax1, xmax2) if abs(cy1 - cy2) > 10: continue if x_high - x_low <= 0: continue iou = x_high - x_low percent_1 = iou / (xmax1 - xmin1) percent_2 = iou / (xmax2 - xmin2) if percent_1 > 0.5 and percent_2 > 0.5: uf.union(i, j) # 并查集合并结束 D = {} for idx in range(0, n_lines): p = uf.find(idx) if p in D.keys(): D[p].append(idx) else: D[p] = [] D[p].append(p) print('并查集结果:', D) rect_merge = [] for k, rects in D.items(): if (len(rects) == 1): rect_merge.append(cu_lines[rects[0]]) xmin, ymin, xmax, ymax = cu_lines[rects[0]] else: xmin = np.mean([cu_lines[r][0] for r in rects]) ymin = min([cu_lines[r][1] for r in rects]) xmax = np.mean([cu_lines[r][2] for r in rects]) ymax = max([cu_lines[r][3] for r in rects]) rect_merge.append([int(xmin), ymin, int(xmax), ymax]) culine_merge_rect = [] img_bgr = img.copy() # 用于绘制 blank_bgr = cv2.cvtColor(blank, cv2.COLOR_GRAY2BGR) # 用于绘制必须是BGR图 for r in rect_merge: xmin, ymin, xmax, ymax = r if ymax - ymin < 10: cv2.rectangle(blank_bgr, (xmin, ymin), (xmax, ymax), (255, 200, 0), 1) else: cv2.rectangle(img_bgr, (xmin, ymin), (xmax, ymax), (0, 0, 255), 1) cv2.rectangle(blank_bgr, (xmin, ymin), (xmax, ymax), (0, 0, 255), 1) culine_merge_rect.append(r) if debug: cv2.imshow('line merge', blank_bgr) # return culine_merge_rect return np.vstack((img_bgr, blank_bgr))
def detect_bloom2(img, debug=False): # img_bgr = constract_brightness(img_bgr, 1.5 , 1) # cv2.imshow('duibidu', img_bgr) img_bgr = img.copy() # 用于绘制 mask_red, redhsv = separate_color(img_bgr, color='red') mask_yz, yingzhang = separate_color(img_bgr, color='yingzhang') mask_yz = cv2.bitwise_not(mask_yz) # 0. 提取红色区域,把印章部分深红色去除,实验得出的阈值 # 0. 分别转化为灰度图(线状晕染),二值化图(团状晕染、圈状晕染) redline = cv2.bitwise_and(redhsv, redhsv, mask=mask_yz) # ================================================== gray = cv2.cvtColor(redline, cv2.COLOR_BGR2GRAY) # 打算用灰度图去判断深色的晕染线。。 # ================================================== _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 1. 腐蚀 + 滤波 = 找到可能是晕染的区域 # 1. 误检、轻微晕染(刮擦周围、圈状晕染)、团状晕染(明显) # 1. 主要难点在于误检以及轻微晕染的检测,对于阈值过于敏感 # 1. 所以第一步放宽条件,让其全部检测出 # 1. 为什么要均值滤波,有些晕染会分为许多相邻的小块,均值滤波可使其粘连,方便求轮廓 # 1. 其次,对于那些没被腐蚀掉的残留线,均值滤波后灰度值较低,期待被后续二值化去掉(不一定,看阈值大小) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) ero1 = cv2.erode(binary, tempkernel, iterations=1) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1)) ero2 = cv2.erode(ero1, tempkernel, iterations=1) # ero2 图可以作为初步判断依据,晕染外的区域基本只剩噪点了 # 但是对于正常红水线来说,颜色较深的也可能被误检,mmp # ====================================11111===================================== blur_avg = cv2.blur(ero2, (11, 11)) thre = (4 * 4 * 255) // (11 * 11) # 这里的阈值设定是否应该设置为动态的。 print('固定滤波后二值化阈值:', thre) thre = _var_max_interclass(blur_avg, calc_zero=False) - 120 print('最大类间方差计算的二值化阈值:', thre) _, blur_bin = cv2.threshold(blur_avg, thre, 255, cv2.THRESH_BINARY) # # 全局的OTSU必然把所有非0的都二值化,我们要的二值化是在白色和浅白色之间的阈值 # _, blur_bin = cv2.threshold(blur_avg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 只寻找最外轮廓 _, contours, hierarchy = cv2.findContours(blur_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) CONTOURS_1 = contours # 保留一下,以防用到 RECTS_1 = [] AREA_1 = [] for i in range(len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) area = cv2.contourArea(cnt) RECTS_1.append([x, y, x + w, y + h]) AREA_1.append(area) if debug: print('find contours from avg blur:', len(RECTS_1)) # N1个检测框 N1 = len(RECTS_1) uf = UnionFind(N1) for i in range(0, N1 - 1): for j in range(i + 1, N1): xmin1, ymin1, xmax1, ymax1 = RECTS_1[i] xmin2, ymin2, xmax2, ymax2 = RECTS_1[j] l_x = max(xmin1, xmin2) r_x = min(xmax1, xmax2) t_y = max(ymin1, ymin2) b_y = min(ymax1, ymax2) # 阈值, 作为合并的依据。 if (r_x - l_x >= 0) and (b_y - t_y >= 0): uf.union(i, j) elif b_y - t_y > 0 and abs(r_x - l_x) <= 15: uf.union(i, j) elif r_x - l_x > 0 and abs(b_y - t_y) <= 10: uf.union(i, j) elif (r_x - l_x < 0) and (b_y - t_y < 0): if abs(r_x - l_x) <= 15 and abs(b_y - t_y) <= 10: uf.union(i, j) # 合并idx结束 # 处理合并的idx rect_merge = [] area_merge = [] D = {} for idx in range(0, N1): p = uf.find(idx) if p in D.keys(): D[p].append(idx) else: D[p] = [] D[p].append(p) if debug: print('union find:', D) for k, rects in D.items(): if (len(rects) == 1): rect_merge.append(RECTS_1[rects[0]]) xmin, ymin, xmax, ymax = RECTS_1[rects[0]] area_merge.append((xmax - xmin) * (ymax - ymin)) else: xmin = min([RECTS_1[r][0] for r in rects]) ymin = min([RECTS_1[r][1] for r in rects]) xmax = max([RECTS_1[r][2] for r in rects]) ymax = max([RECTS_1[r][3] for r in rects]) rect_merge.append([xmin, ymin, xmax, ymax]) area_merge.append((xmax - xmin) * (ymax - ymin)) if debug: print('合并后的框:', rect_merge) print('合并后面积:', area_merge) # 得到初步结果rect_merge,但是会误检正常的粗的红水线,而且很多,nmsl,mmp # 所以这一步得到的只能是候选结果 # 根据面积筛选出可能为红水线的候选框:RES_1 # ================================================================ RES_1 = [] # 可能为红水线的候选框,正常的图像上误检较多 RES_1_AREA = [] # ================================================================ for r in rect_merge: xmin, ymin, xmax, ymax = r if xmax - xmin < 15 or ymax - ymin < 15: cv2.rectangle(img_bgr, (xmin, ymin), (xmax, ymax), (255, 200, 0), 1) else: cv2.rectangle(img_bgr, (xmin, ymin), (xmax, ymax), (0, 0, 255), 1) RES_1.append(r) RES_1_AREA.append((xmax - xmin) * (ymax - ymin)) if debug: print('过滤后的候选框;', RES_1) print('========================================') # 再次进行腐蚀(竖着的),膨胀运算,留下团状晕染的区域 # 如果有轮廓,那么必定是晕染 # 2. 再次去做腐蚀操作,这次得到的大轮廓一定是晕染 # tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3)) ero3 = cv2.erode(ero2, tempkernel, iterations=1) # res = cv2.medianBlur(ero2, 5) tempkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) res_2 = cv2.dilate(ero3, tempkernel, iterations=1) # res 图求轮廓,可以求得一般的晕染(就一团红色,但对于色深线的还是不行ero2应该行) # 但是对于正常红水线来说,颜色较深的也可能被误检 # ======================================22222=================================== _, contours, hierarchy = cv2.findContours(res_2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) RECTS_2 = [] RECTS_2_TINY = [] # 过滤掉的矩形 for i in range(len(contours)): cnt = contours[i] x, y, w, h = cv2.boundingRect(cnt) area = cv2.contourArea(cnt) if w < 15 or h < 15 or area < 200: # 这里又是阈值 if w < 3 or h < 3 or area < 9: # 太小的忽略 pass else: RECTS_2_TINY.append([x, y, x + w, y + h]) continue RECTS_2.append([x, y, x + w, y + h]) cv2.rectangle(img_bgr, (x, y), (x + w, y + h), (0, 255, 0), 1) if debug: print('是晕染的区域吧:', RECTS_2) print('太小过滤掉的框:', len(RECTS_2_TINY), RECTS_2_TINY) def in_rect(cx, cy, rect): assert len(rect) == 4 xmin, ymin, xmax, ymax = rect return xmin <= cx <= xmax and ymin <= cy <= ymax # 一般来说,RECT_2是 RES_1的子集 # 那么RECT_2 由子集中的某些构成,如果子集组成的面积与候选RES_1相当,认为是晕染 RES_1_FLAG = [0 for _ in range(0, len(RES_1))] RES_1_PART = [[] for _ in range(0, len(RES_1))] RES_1_PART_AREA = [0 for _ in range(0, len(RES_1))] for r2 in RECTS_2: x1, y1, x2, y2 = r2 rect2_area = (x2 - x1) * (y2 - y1) for i in range(0, len(RES_1)): cx = (x1 + x2) // 2 cy = (y1 + y2) // 2 if in_rect(cx, cy, RES_1[i]): RES_1_FLAG[i] += 1 RES_1_PART[i].append(r2) # 由于子集关系,可以认为由r2组成 RES_1_PART_AREA[i] += rect2_area # 子集总面积 # ============================================================== RES_2 = [] # 根据规则保留RES_1中的结果,有的可能框大了,由于合并操作 # ============================================================== for i in range(0, len(RES_1)): x1, y1, x2, y2 = RES_1[i] r1_area = (x2 - x1) * (y2 - y1) # 这又是个阈值啊啊啊啊 if RES_1_FLAG[i] != 0 and (RES_1_PART_AREA[i] / r1_area > 0.3): # 阈值类似于IoU 0.15 还是 0.5合适 RES_2.append(RES_1[i]) RES_1_FLAG[i] = -1 # 已经确认保留的,后续无需再验证, 置为-1 x1, y1, x2, y2 = RES_1[i] # cv2.rectangle(img_bgr, (x1, y1), (x2, y2), (0, 255, 0), 2) if debug: print('根据腐蚀图依然有大轮廓,RES_1中可以认为是晕染的区域:', RES_2) for i in range(0, len(RES_1)): if RES_1_FLAG[i] == -1: continue for r2_tiny in RECTS_2_TINY: x1, y1, x2, y2 = r2_tiny r2_tiny_area = (x2 - x1) * (y2 - y1) cx = (x1 + x2) // 2 cy = (y1 + y2) // 2 if in_rect(cx, cy, RES_1[i]): RES_1_FLAG[i] += 1 # 个数 += 1 RES_1_PART_AREA[i] += r2_tiny_area # 子集总面积 += # 可以利用密度, 个数/候选框总面积 或者 子集总面积/总面积 RES_3 = [] for i in range(0, len(RES_1)): if RES_1_FLAG == -1: continue x1, y1, x2, y2 = RES_1[i] r1_area = (x2 - x1) * (y2 - y1) if debug: print('长宽比:', (x2 - x1) / (y2 - y1)) if (x2 - x1) / (y2 - y1) > 3.0: continue if debug: print('第', i, '个, 面积所占比例:', RES_1_PART_AREA[i] / r1_area) # print('第', i, '个, 个数所占比例:', RES_1_FLAG[i] / (x2-x1)) if RES_1_PART_AREA[i] / r1_area > 0.05 and RES_1_FLAG[i] > 3: cv2.rectangle(img_bgr, (x1, y1), (x2, y2), (0, 0, 0), 2) # 黑色框描绘的结果根据密度或者面积比例筛选出来的 RES_3.append(RES_1[i]) if debug: print('根据密度可以认为是晕染的区域RES_3:', RES_3) if debug: cv2.imshow('redline', redline) cv2.imshow('separate', np.vstack((gray, binary))) # 灰度图、二值化图 # (3,3)腐蚀图 (3,1)腐蚀图 中值滤波图 中值滤波二值化图 cv2.imshow('process', np.vstack((ero1, ero2, blur_avg, blur_bin, ero3, res_2))) cv2.imshow('res', img_bgr) # plt.hist(gray.ravel(), 255) # plt.show() cv2.waitKey(0) # return RES_2 # return np.vstack((cv2.cvtColor(blur_bin,cv2.COLOR_GRAY2BGR), # cv2.cvtColor(res_2, cv2.COLOR_GRAY2BGR), # img_bgr)) # ero3 return RECTS_2 + RES_3 # (xmin, ymin, xmax, ymax)