예제 #1
0
def find_template(im_source,
                  im_search,
                  threshold: float = 0.85,
                  mode=cv2.TM_CCOEFF_NORMED):
    """
    模板匹配
    :param im_source: 待匹配图像
    :param im_search: 待匹配模板
    :param threshold: 匹配度
    :param mode: 识别模式
    :return: None or Rect
    """
    start = time.time()
    im_source, im_search = check_detection_input(im_source, im_search)
    # 模板匹配取得res矩阵
    res = _get_template_result_matrix(im_source, im_search)
    # 找到最佳匹配项
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    h, w = im_search.shape[:2]
    # 求可信度
    img_crop = im_source[max_loc[1]:max_loc[1] + h, max_loc[0]:max_loc[0] + w]
    confidence = cal_rgb_confidence(img_crop, im_search)
    # 如果可信度小于threshold,则返回None
    if confidence < threshold:
        return None
    # 求取位置
    x, y = max_loc
    rect = Rect(x=x, y=y, width=w, height=h)
    print('[tpl]{Rect}, confidence={confidence}, time={time:.2f}'.format(
        confidence=confidence, Rect=rect, time=(time.time() - start) * 1000))
    return generate_result(rect, confidence)
예제 #2
0
 def find_template(self, im_source, im_search, threshold=None):
     """
     模板匹配, 返回匹配度最高的坐标
     :param im_source: 待匹配图像
     :param im_search: 待匹配模板
     :param threshold: 匹配度
     :return:  None or Rect
     """
     start = time.time()
     im_source, im_search = self.check_detection_input(im_source, im_search)
     # 模板匹配取得res矩阵
     res = self._get_template_result_matrix(im_source, im_search)
     # 找到最佳匹配项
     min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
     h, w = im_search.shape[:2]
     # 求可信度
     img_crop = im_search.adjustROI(max_loc[1], max_loc[1] + h, max_loc[0],
                                    max_loc[0] + w)
     confidence = self.cal_rgb_confidence(img_crop, im_search)
     # confidence = self.test_cal_rgb_confidence(img_crop, im_search)
     # 如果可信度小于threshold,则返回None
     if confidence < (threshold or self.threshold):
         return None
     # 求取位置
     x, y = max_loc
     rect = Rect(x=x, y=y, width=w, height=h)
     print('[tpl]{Rect}, confidence={confidence}, time={time:.2f}'.format(
         confidence=confidence,
         Rect=rect,
         time=(time.time() - start) * 1000))
     return generate_result(rect, confidence)
예제 #3
0
 def crop_image(self, rect):
     """区域范围截图"""
     img = self.imread()
     height, width = self.shape
     if isinstance(rect, (list, tuple)) and len(rect) == 4:
         rect = Rect(*rect)
     elif isinstance(rect, Rect):
         pass
     else:
         raise ValueError('unknown rect: type={}, rect={}'.format(type(rect), rect))
     if not Rect(0, 0, width, height).contains(rect):
         raise OverflowError('Rect不能超出屏幕 {}'.format(rect))
     # 获取在图像中的实际有效区域:
     x_min, y_min = int(rect.tl.x), int(rect.tl.y)
     x_max, y_max = int(rect.br.x), int(rect.br.y)
     return image(img[y_min:y_max, x_min:x_max])
예제 #4
0
def create_similar_rect(x, y, w, h):
    x = [x, x + 1, x - 1]
    y = [y, y + 1, y - 1]
    w = [w, w + 1, w - 1]
    h = [h, h + 1, h - 1]
    t = []
    for i in itertools.product(*[range(3) for k in range(4)]):
        if (i[2] == 2 and i[3] == 1) or (i[2] == 1 and i[3] == 2):
            pass
        else:
            t.append(Rect(x=x[i[0]], y=y[i[1]], width=w[i[2]], height=h[i[3]]))
    return t
예제 #5
0
    def _two_good_points(self, pts_sch1, pts_sch2, pts_src1, pts_src2,
                         im_search, im_source):
        """返回两对匹配特征点情形下的识别结果."""
        # 先算出中心点(在im_source中的坐标):
        middle_point = [
            int((pts_src1[0] + pts_src2[0]) / 2),
            int((pts_src1[1] + pts_src2[1]) / 2)
        ]
        pypts = []
        # 如果特征点同x轴或同y轴(无论src还是sch中),均不能计算出目标矩形区域来,此时返回值同good=1情形
        if pts_sch1[0] == pts_sch2[0] or pts_sch1[1] == pts_sch2[1] or pts_src1[0] == pts_src2[0] or pts_src1[1] == \
                pts_src2[1]:
            confidence = self.ONE_POINT_CONFI
            return dict(result=middle_point,
                        rectangle=pypts,
                        confidence=confidence)
        # 计算x,y轴的缩放比例:x_scale、y_scale,从middle点扩张出目标区域:(注意整数计算要转成浮点数结果!)
        h, w = im_search.shape[:2]
        h_s, w_s = im_source.shape[:2]
        x_scale = abs(1.0 * (pts_src2[0] - pts_src1[0]) /
                      (pts_sch2[0] - pts_sch1[0]))
        y_scale = abs(1.0 * (pts_src2[1] - pts_src1[1]) /
                      (pts_sch2[1] - pts_sch1[1]))
        # 得到scale后需要对middle_point进行校正,并非特征点中点,而是映射矩阵的中点。
        sch_middle_point = int((pts_sch1[0] + pts_sch2[0]) / 2), int(
            (pts_sch1[1] + pts_sch2[1]) / 2)
        middle_point[0] = middle_point[0] - int(
            (sch_middle_point[0] - w / 2) * x_scale)
        middle_point[1] = middle_point[1] - int(
            (sch_middle_point[1] - h / 2) * y_scale)
        middle_point[0] = max(middle_point[0], 0)  # 超出左边界取0  (图像左上角坐标为0,0)
        middle_point[0] = min(middle_point[0], w_s - 1)  # 超出右边界取w_s-1
        middle_point[1] = max(middle_point[1], 0)  # 超出上边界取0
        middle_point[1] = min(middle_point[1], h_s - 1)  # 超出下边界取h_s-1

        # 计算出来rectangle角点的顺序:左上角->左下角->右下角->右上角, 注意:暂不考虑图片转动
        # 超出左边界取0, 超出右边界取w_s-1, 超出下边界取0, 超出上边界取h_s-1
        x_min, x_max = int(max(middle_point[0] - (w * x_scale) / 2, 0)), int(
            min(middle_point[0] + (w * x_scale) / 2, w_s - 1))
        y_min, y_max = int(max(middle_point[1] - (h * y_scale) / 2, 0)), int(
            min(middle_point[1] + (h * y_scale) / 2, h_s - 1))
        # 目标矩形的角点按左上、左下、右下、右上的点序:(x_min,y_min)(x_min,y_max)(x_max,y_max)(x_max,y_min)
        pts = np.float32([[x_min, y_min], [x_min, y_max], [x_max, y_max],
                          [x_max, y_min]]).reshape(-1, 1, 2)
        for npt in pts.astype(int).tolist():
            pypts.append(tuple(npt[0]))
        return Rect(x=x_min,
                    y=y_min,
                    width=(x_max - x_min),
                    height=(y_max - y_min))
예제 #6
0
def find_templates(im_source,
                   im_search,
                   threshold: float = 0.85,
                   max_count=10):
    """
    模板匹配
    :param im_source: 待匹配图像
    :param im_search: 待匹配模板
    :param threshold: 匹配度
    :param mode: 识别模式
    :param max_count: 最多匹配数量
    :return: None or Rect
    """
    start = time.time()
    im_source, im_search = check_detection_input(im_source, im_search)
    # 模板匹配取得res矩阵
    res = _get_template_result_matrix(im_source, im_search)
    result = []
    h, w = im_search.shape[:2]

    while True:
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        img_crop = im_source[max_loc[1]:max_loc[1] + h,
                             max_loc[0]:max_loc[0] + w]
        confidence = cal_rgb_confidence(img_crop, im_search)
        if confidence < threshold or len(result) > max_count:
            break
        x, y = max_loc
        rect = Rect(x=x, y=y, width=w, height=h)
        result.append(generate_result(rect, confidence))
        # 屏蔽最优结
        cv2.rectangle(res, (int(max_loc[0] - w / 2), int(max_loc[1] - h / 2)),
                      (int(max_loc[0] + w / 2), int(max_loc[1] + h / 2)),
                      (0, 0, 0), -1)
    if result:
        print('[tpls] find counts:{counts}, time={time:.2f}ms{result}'.format(
            counts=len(result),
            time=(time.time() - start) * 1000,
            result=''.join([
                '\n\t{}, confidence={}'.format(x['rect'], x['confidence'])
                for x in result
            ])))
    return result if result else None
예제 #7
0
    def _many_good_pts(self, im_source, im_search, kp_sch, kp_src,
                       good) -> Rect:
        sch_pts, img_pts = np.float32([
            kp_sch[m.queryIdx].pt for m in good
        ]).reshape(-1, 1, 2), np.float32([kp_src[m.trainIdx].pt
                                          for m in good]).reshape(-1, 1, 2)
        M, mask = self._find_homography(sch_pts, img_pts)
        # 计算四个角矩阵变换后的坐标,也就是在大图中的目标区域的顶点坐标:
        h, w = im_search.shape[:2]
        h_s, w_s = im_source.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        # trans numpy arrary to python list: [(a, b), (a1, b1), ...]
        def cal_rect_pts(dst):
            return [
                tuple(npt[0])
                for npt in np.rint(dst).astype(np.float).tolist()
            ]

        pypts = cal_rect_pts(dst)
        # pypts四个值按照顺序分别是: 左上,左下,右下,右上
        # 注意:虽然4个角点有可能越出source图边界,但是(根据精确化映射单映射矩阵M线性机制)中点不会越出边界
        lt, br = pypts[0], pypts[2]
        # 考虑到算出的目标矩阵有可能是翻转的情况,必须进行一次处理,确保映射后的“左上角”在图片中也是左上角点:
        x_min, x_max = min(lt[0], br[0]), max(lt[0], br[0])
        y_min, y_max = min(lt[1], br[1]), max(lt[1], br[1])
        # 挑选出目标矩形区域可能会有越界情况,越界时直接将其置为边界:
        # 超出左边界取0,超出右边界取w_s-1,超出下边界取0,超出上边界取h_s-1
        # 当x_min小于0时,取0。  x_max小于0时,取0。
        x_min, x_max = int(max(x_min, 0)), int(max(x_max, 0))
        # 当x_min大于w_s时,取值w_s-1。  x_max大于w_s-1时,取w_s-1。
        x_min, x_max = int(min(x_min, w_s - 1)), int(min(x_max, w_s - 1))
        # 当y_min小于0时,取0。  y_max小于0时,取0。
        y_min, y_max = int(max(y_min, 0)), int(max(y_max, 0))
        # 当y_min大于h_s时,取值h_s-1。  y_max大于h_s-1时,取h_s-1。
        y_min, y_max = int(min(y_min, h_s - 1)), int(min(y_max, h_s - 1))
        return Rect(x=x_min,
                    y=y_min,
                    width=(x_max - x_min),
                    height=(y_max - y_min))
예제 #8
0
from core.cv.match_template import find_template

Anchor = Anchor(dev={
    'width': 1920,
    'height': 1080
},
                cur={
                    'width': 3400,
                    'height': 1440,
                    'left': 260,
                    'right': 260
                },
                orientation=1)

rect = Rect.create_by_point_size(Anchor.point(0, 0), Anchor.size(1920, 1080))
img = image('./core/cv/test_image/emulator-5554.png')
#
im_search = image('./core/cv/test_image/star.png').resize(
    62 * 1.33333, 43 * 1.33333)
#
#
# a = cuda_find_template(im_source=img, im_search=im_search)

import cv2
from core.cv.cuda_match_template import cuda_template
tpl = cuda_template()
for i in range(10):
    tpl.find_template(im_source=img, im_search=img)
# img2 = cv2.cuda_GpuMat()
# img2.upload(cv2.imread('./core/cv/test_image/star.png'))