Пример #1
0
    def showMatch(self, i1, i2, idx_pairs, status=None):
        #print " -- idx_pairs = " + str(idx_pairs)
        kp_pairs = []
        for p in idx_pairs:
            kp1 = i1.kp_list[p[0]]
            kp2 = i2.kp_list[p[1]]
            kp_pairs.append( (kp1, kp2) )
        img1 = i1.load_gray()
        img2 = i2.load_gray()
        if status == None:
            status = np.ones(len(kp_pairs), np.bool_)
        h, w = img1.shape[:2]
        scale = 790.0/float(w)
        si1 = cv2.resize(img1, (0,0), fx=scale, fy=scale)
        si2 = cv2.resize(img2, (0,0), fx=scale, fy=scale)
        explore_match('find_obj', si1, si2, kp_pairs,
                      hscale=scale, wscale=scale, status=status)
        # status structure will be correct here and represent
        # in/outlier choices of user
        cv2.destroyAllWindows()

        # status is an array of booleans that parallels the pair array
        # and represents the users choice to keep or discard the
        # respective pairs.
        return status
Пример #2
0
    def showMatch(self, i1, i2, idx_pairs, status=None):
        #print " -- idx_pairs = " + str(idx_pairs)
        kp_pairs = []
        for p in idx_pairs:
            kp1 = i1.kp_list[p[0]]
            kp2 = i2.kp_list[p[1]]
            kp_pairs.append((kp1, kp2))
        img1 = i1.load_gray()
        img2 = i2.load_gray()
        if status == None:
            status = np.ones(len(kp_pairs), np.bool_)
        h, w = img1.shape[:2]
        scale = 790.0 / float(w)
        si1 = cv2.resize(img1, (0, 0), fx=scale, fy=scale)
        si2 = cv2.resize(img2, (0, 0), fx=scale, fy=scale)
        explore_match('find_obj',
                      si1,
                      si2,
                      kp_pairs,
                      hscale=scale,
                      wscale=scale,
                      status=status)
        # status structure will be correct here and represent
        # in/outlier choices of user
        cv2.destroyAllWindows()

        # status is an array of booleans that parallels the pair array
        # and represents the users choice to keep or discard the
        # respective pairs.
        return status
Пример #3
0
def matchSift():
    '''''
    匹配sift特征
    '''
    img1 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\853117000372320013_gc_skew_mf\00000684_000000000119C94C.bmp",
        0)  # queryImage
    img2 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\13033022900009_gc_skew_mf\00000006_0000000000322D19.bmp",
        0)  # trainImage
    # sift = cv2.SIFT()
    # sift = cv2.xfeatures2d.SIFT_create()
    sift = cv2.xfeatures2d.SURF_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    print(matches)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
Пример #4
0
def match_and_draw(win):
    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  # 2
    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    if len(p1) >= 4:
        H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

        explore_match(win, img1, img2, kp_pairs, None, H)
Пример #5
0
    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' % len(p1))

        explore_match(win, img1, img2, kp_pairs, None, H)
Пример #6
0
    def showMatchOrient(self, i1, i2, idx_pairs, status=None, orient='none'):
        #print " -- idx_pairs = " + str(idx_pairs)
        img1 = i1.load_gray()
        img2 = i2.load_gray()

        print 'orient:', orient
        if orient == 'aircraft':
            yaw1 = i1.aircraft_pose['ypr'][0]
            yaw2 = i2.aircraft_pose['ypr'][0]
        elif orient == 'camera':
            yaw1 = i1.camera_pose['ypr'][0]
            yaw2 = i2.camera_pose['ypr'][0]
        elif orient == 'sba':
            yaw1 = i1.camera_pose_sba['ypr'][0]
            yaw2 = i2.camera_pose_sba['ypr'][0]
        else:
            yaw1 = 0.0
            yaw2 = 0.0
        print yaw1, yaw2
        h, w = img1.shape[:2]
        scale = 790.0/float(w)
        si1, M1 = self.rotateAndScale(img1, yaw1, scale)
        si2, M2 = self.rotateAndScale(img2, yaw2, scale)

        kp_pairs = []
        for p in idx_pairs:
            kp1 = self.copyKeyPoint(i1.kp_list[p[0]])
            p1 = M1.dot( np.hstack((kp1.pt, 1.0)) )[:2]
            kp1.pt = (p1[0], p1[1])
            kp2 = self.copyKeyPoint(i2.kp_list[p[1]])
            p2 = M2.dot( np.hstack((kp2.pt, 1.0)) )[:2]
            kp2.pt = (p2[0], p2[1])
            print p1, p2
            kp_pairs.append( (kp1, kp2) )
        if status == None:
            status = np.ones(len(kp_pairs), np.bool_)

        #explore_match('find_obj', si1, si2, kp_pairs,
        #              hscale=scale, wscale=scale, status=status)
        explore_match('find_obj', si1, si2, kp_pairs,
                      hscale=1.0, wscale=1.0, status=status)
        # status structure will be correct here and represent
        # in/outlier choices of user
        cv2.destroyAllWindows()

        # status is an array of booleans that parallels the pair array
        # and represents the users choice to keep or discard the
        # respective pairs.
        return status
Пример #7
0
def match(pic1, pic2):
    img1 = cv2.imread(pic1, 0)  # queryImage
    img2 = cv2.imread(pic2, 0)  # trainImage
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    return len(kp_pairs)
Пример #8
0
def matchSift3():
    ''''' 
    匹配sift特征 
    '''
    img1 = cv2.imread('/home/liuzhen/Python test/image/quadrilateral_find.jpg',
                      0)  # queryImage
    img2 = cv2.imread('/home/liuzhen/Python test/image/screen_white_find.bmp',
                      0)  # trainImage
    #sift = cv2.SIFT()
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv3.0有drawMatchesKnn函数
    # Apply ratio test
    # 比值测试,首先获取与A 距离最近的点B(最近)和C(次近),只有当B/C
    # 小于阈值时(0.75)才被认为是匹配,因为假设匹配是一一对应的,真正的匹配的理想距离为0

    # good = []
    # for m, n in matches:
    #     if m.distance < 0.8 * n.distance:
    #         good.append([m])
    # img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[:], None, flags=2)
    # print (len(kp1),len(kp2),len(good))
    # plt.imshow(img3), plt.show()

    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    print('%d / %d  p1/p2' % (len(p1), len(p2)))
    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' %
              len(p1))

    explore_match('affine find_obj', img1, img2, kp_pairs, None, H)

    cv2.waitKey()
    cv2.destroyAllWindows()
def matchSift():
    ''''' 
    匹配sift特征 
    '''
    img1 = cv2.imread('../../data/box.png', 0)  # queryImage
    img2 = cv2.imread('../../data/box_in_scene.png', 0)  # trainImage
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
Пример #10
0
def doFeatureMatch(img1, img2, way, win):
    import sys, getopt
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)

    method = way + '-flann'
    feature_name = opts.get('--feature', 'surf-flann')
    detector, matcher = init_feature(feature_name)
    if img1 is None:
        print('Failed to load fn1:', fn1)
        sys.exit(1)

    if img2 is None:
        print('Failed to load fn2:', fn2)
        sys.exit(1)
    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)
    print('using', feature_name)
    pool = ThreadPool(processes=cv.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' %
                  len(p1))
        explore_match(win, img1, img2, kp_pairs, None, H)

    cv.waitKey()
    cv.destroyAllWindows()
Пример #11
0
def matchSift():
    '''''
    匹配sift特征
    '''
    img1 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\962215042606600003_gc_skew_mf\00000259_0000000000A7CF24.bmp",
        0)  # queryImage
    img2 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\962215042606600005_gc_skew_mf\00000245_0000000000A4BA9A.bmp",
        0)  # trainImage
    # sift = cv2.SIFT()
    # sift = cv2.xfeatures2d.SIFT_create()
    sift = cv2.xfeatures2d.SURF_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)

    num = 0
    result = 0
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            # result += m.distance*m.distance
            result += (m.distance - n.distance) * (m.distance - n.distance)
            num += 1
    if num > 0:
        result /= num
    # return result

    print(result)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
Пример #12
0
def matchSift():
    ''''' 
    匹配sift特征 
    '''
    img1 = cv2.imread('/home/liuzhen/Python test/image/xcl3.jpg',
                      0)  # queryImage
    img2 = cv2.imread('/home/liuzhen/Python test/image/screen_white.bmp',
                      0)  # trainImage
    # sift = cv2.SIFT()
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
def draw_features(img1_path, img2_path):
  print img1_path
  print img2_path
  img1 = cv2.imread(img1_path, 0)
  img2 = cv2.imread(img2_path, 0)
  print 'img1:', img1
  print 'img2:', img2

  # Initiate SIFT detector
  orb = cv2.ORB()

  # Find the keypoints and descriptors
  kp1, des1 = orb.detectAndCompute(img1,None)
  kp2, des2 = orb.detectAndCompute(img2,None)

  # Create BFMatcher object
  bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)

  matches = bf.knnMatch(des1, trainDescriptors=des2, k = 2)
  p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
  explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image

  cv2.waitKey()
Пример #14
0
    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2,
                                           k=2)  #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        #p1, p2, kp_pairs = filter_matches_std(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        else:  #regular sift
            '''# Initiate SIFT detector
            sift = cv2.xfeatures2d.SIFT_create()

            # find the keypoints and descriptors with SIFT
            kp1, des1 = sift.detectAndCompute(img1, None)
            kp2, des2 = sift.detectAndCompute(img2, None)

            # FLANN parameters
            FLANN_INDEX_KDTREE = 0
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)  # or pass empty dictionary

            flann = cv2.FlannBasedMatcher(index_params, search_params)

            matches = flann.knnMatch(des1, des2, k=2)

            # Need to draw only good matches, so create a mask
            matchesMask = [[0, 0] for i in range(len(matches))]

            # ratio test as per Lowe's paper
            for i, (m, n) in enumerate(matches):
                if m.distance < 0.7 * n.distance:
                    matchesMask[i] = [1, 0]

            draw_params = dict(matchColor=(0, 255, 0),
                               singlePointColor=(255, 0, 0),
                               matchesMask=matchesMask,
                               flags=2)

            img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)

            cv2.imshow('matched',img3)'''
            return sift_detect(img1, img2)

        img, corners = explore_match(win, img1, img2, kp_pairs, None, H)
        return img, corners, kp_pairs
Пример #15
0
def main(args):
    with h5py.File(args['ah'], "r") as a:
        kps = a['keypoints'][()]
        kp1 = [cv2.KeyPoint(kp[0], kp[1], kp[2], kp[3]) for kp in kps]
        desc1 = a['descriptors'][()]

    with h5py.File(args['bh'], "r") as b:
        kps = b['keypoints'][()]
        kp2 = [cv2.KeyPoint(kp[0], kp[1], kp[2], kp[3]) for kp in kps]
        desc2 = b['descriptors'][()]

    detector, matcher = init_feature("sift")

    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2
    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' %
              len(p1))

    img1 = cv2.imread(args['a'], 0)
    img2 = cv2.imread(args['b'], 0)

    explore_match("LIFT Match", img1, img2, kp_pairs, None, H)

    cv2.waitKey(0)
    cv2.destroyAllWindows()

    pass
Пример #16
0
def hacerMosaico(directorio):
	directorio="/home/diego/salida/{}".format(directorio)
	archivos = os.listdir(directorio)
	
	img1 = cv2.imread('{}/{}'.format(directorio,archivos[0]),1)
	img2 = cv2.imread('{}/{}'.format(directorio,archivos[1]),1)
	img3 = cv2.imread('{}/{}'.format(directorio,archivos[2]),1)

	h1, w1 = img1.shape[:2]
	h2, w2 = img2.shape[:2]

	#create empty matrix
	vis = np.zeros((max(h1, h2), w1+w2,3), np.uint8)

	#combine 2 images
	vis[:h1, :w1,:3] = img1
	vis[:h2, w1:w1+w2,:3] = img2

	detector = cv2.xfeatures2d.SIFT_create()
	norm = cv2.NORM_L2
	matcher = cv2.BFMatcher(norm)

	#vis = np.concatenate((img1, img2), axis=1)

	#cv2.imshow('MOSAICO',vis)	

	kp1, desc1 = detector.detectAndCompute(vis, None)
	kp2, desc2 = detector.detectAndCompute(img3, None)

	raw_matches = matcher.knnMatch(desc2, trainDescriptors = desc1, k = 2) #2		
	p1, p2, kp_pairs = filter_matches(kp2, kp1, raw_matches)
	print len(p1)
	if len(p1) >= 1:
		H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
		if status is not None:
			print '%d / %d  inliers/matched' % (np.sum(status), len(status))
			vis = explore_match('find_obj', vis, img3, kp_pairs, status, H)
	else:
		H, status = None, None

	cv2.waitKey(0)
Пример #17
0
    def locate(self, img2, show=False):
        center = None

        kp2, desc2 = self.detector.detectAndCompute(img2, None)

        raw_matches = self.matcher.knnMatch(self.desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(self.kp1, kp2, raw_matches)

        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))

            if H is not None:

                h2, w2 = img2.shape[:2]

                corners = np.int32( cv2.perspectiveTransform(self.corners1.reshape(1, -1, 2), H).reshape(-1, 2) ) #+ (w1, 0) 
                center = np.int32(np.mean(corners, axis=0))
                

                if show:
                    cv2.polylines(img2, [corners], True, (255, 255, 255))



                    cv2.circle(img2, (center[0], center[1]), 5,(255,255,255),)
                    cv2.imshow('hello1', img2)

                    if False:
                        vis = explore_match('hello', self.img1, img2, kp_pairs, status, H)
        else:
            if len(p2):
                center = np.int32(np.mean(p2, axis=0))
            else:
                center = None

            print '%d matches found, not enough for homography estimation' % len(p1)
            
        return center        
Пример #18
0
def matchSift(img1_gray, img2_gray):
    """
    匹配sift特征
    """
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1_gray, None)
    kp2, des2 = sift.detectAndCompute(img2_gray, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13/sources/samples/python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches, ratio=0.5)
    matche_img = explore_match('find_obj', img1_gray, img2_gray,
                               kp_pairs)  # cv2 shows image
    cv2.imwrite("C:/Users/qq619/Desktop/05matches.png", matche_img)
    # 保存特征连线图

    cv2.waitKey()
    cv2.destroyAllWindows()
    return p1, p2, des1, des2, matches
Пример #19
0
def matchSift():
    '''''
    匹配sift特征
    '''
    img1 = cv2.imread("E:/image_compare/yp/15/1502.png")  # queryImage
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.imread("E:/image_compare/yp/16/1602.png")  # trainImage
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    matche_img = explore_match('find_obj', img1, img2,
                               kp_pairs)  # cv2 shows image
    cv2.imwrite("C:/Users/qq619/Desktop/02matches.png", matche_img)

    cv2.waitKey()
    cv2.destroyAllWindows()
Пример #20
0
kp1, desc1 = detector.detectAndCompute(img1, None)
kp2, desc2 = detector.detectAndCompute(img2, None)

draw_keypoints(img1,kp1)
draw_keypoints(img2,kp2)


print 'matching...'
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)

if len(p1) >= 4:
	H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
	print '%d / %d  inliers/matched' % (np.sum(status), len(status))
else:
	H, status = None, None
	print '%d matches found, not enough for homography estimation' % len(p1)

print len(p1), len(p2)
print p1, p2
vis = explore_match('find_obj', img2, img1, kp_pairs, status, H)



#cv2.imshow('img1',img1)
#cv2.imshow('img2',img2)


cv2.waitKey()
cv2.destroyAllWindows()
Пример #21
0
def playVideo(nombre, descriptores):
	detector = cv2.xfeatures2d.SIFT_create()
	norm = cv2.NORM_L2
	matcher = cv2.BFMatcher(norm)

	cap = cv2.VideoCapture(nombre)

	fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
	i=0
	roi_index = 0
	levels=1
	while True:
		t0 = time.clock() # calcular tiempo
		ret, frame = cap.read() # Capture frame-by-frame
		if ret == False:
			break
		#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convertir frame a escala de grises
		fgmask = fgbg.apply(frame) # detectar primer plano en movimiento
		retval, thresh = cv2.threshold(fgmask, 200, 256, cv2.THRESH_BINARY); #eliminar sombra
		enmask = cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB); #triplicar canales, para poder compararlos con un frame en RGB
		enmask = cv2.bitwise_and(frame,enmask) #enmascarar frame original
		_, contours0,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
		contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
		# Find the index of the largest contour
		areas = [cv2.contourArea(c) for c in contours]
		if len(areas) > 0:
			print 'ROIs:',len(areas)
			max_index = np.argmax(areas)
			cnt=contours[max_index]
			x,y,w,h = cv2.boundingRect(cnt)
			area = w * h
			print 'area: ', area
			if (area > 100 ) & (h > w): #caviar-area: 500, visor-area: 1000
				cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1) #dibujar rectangulo
				cv2.circle(frame, (int(x+(w/2)), int(y+(h/2))), 2, (0,255,0)) #dibujar punto en centro del rectangulo
				roi_index = roi_index + 1
				roi = enmask[y:y+h,x:x+w]
				kp, desc = detector.detectAndCompute(roi, None)
				print len(kp)
				#print 'posicion: ({},{}) tamano:({},{}) area: {}'.format(x,y,w,h,area)
				if len(kp) > 0: #caviar: 4 , visor:20
					for par in descriptores:
						raw_matches = matcher.knnMatch(par.descriptor, trainDescriptors = desc, k = 2) #2		
						p1, p2, kp_pairs = filter_matches(par.keypoints, kp, raw_matches)
						if len(p1) >= 4:
							H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
							if status is not None:
								print '%d / %d  inliers/matched' % (np.sum(status), len(status))
								'''
								tmp1 = 'match{}_1.png'.format(i)
								tmp2 = 'match{}_2.png'.format(i)
								cv2.imwrite('{}{}'.format(dirsalida,tmp1),par.image)
								cv2.imwrite('{}{}'.format(dirsalida,tmp2),roi)
								'''
							vis = explore_match('find_obj', roi, par.image, kp_pairs, status, H)
						else:
							H, status = None, None
							#print '%d matches found, not enough for homography estimation' % len(p1)
					par = Par(kp,desc,roi)	
					print 'descriptores guardados:', len(descriptores)
					descriptores.append(par)
		cv2.imshow('frame',frame)
		#cv2.imshow('fgmask',fgmask)
		#cv2.imshow('thresh',thresh)
		#cv2.imshow('enmask',enmask)
		if cv2.waitKey(20) & 0xFF == ord('q'):
			# When everything done, release the capture
			break
		t1 = time.clock() - t0
		t0 = t1
		#print 'frame:',i, ' rate: ', 1. / t1
		i=i+1
Пример #22
0
kp2, desc2 = detector.detectAndCompute(img2, None)

raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
    H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
    print '%d / %d  inliers/matched' % (np.sum(status), len(status))

    if H is not None:

        h1, w1 = img1.shape[:2]
        h2, w2 = img2.shape[:2]

        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) ) #+ (w1, 0) 
        cv2.polylines(img2, [corners], True, (255, 255, 255))
        
        center = np.int32(np.mean(corners, axis=0))
        print center
        import pdb; pdb.set_trace()
        cv2.circle(img2, (center[0], center[1]), 5,(255,255,255),)
        cv2.imshow('hello1', img2)
else:
    H, status = None, None
    print '%d matches found, not enough for homography estimation' % len(p1)

vis = explore_match('hello', img1, img2, kp_pairs, status, H)

cv2.waitKey()
cv2.destroyAllWindows()
    def run(self):
        # init big image fro stitching
        ret, frame = self.cam.read()
        frame=cv2.resize(frame,(320,240))
        h,w,d=frame.shape
        big_image = np.zeros((h*12,w*3,3), np.uint8)
        starty=h*11
        startx=w
        total_transl_x=0
        total_transl_y=0

        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        self.prev_gray=frame_gray
        self.prev_frame=frame

        detector, matcher = init_feature('sift-flann')
        pool=ThreadPool(processes = cv2.getNumberOfCPUs())

        while True:
            for i in range(skip_frames):
                ret, frame = self.cam.read()
            ret, frame = self.cam.read()
            frame=cv2.resize(frame,(320,240))

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            img0, img1 = self.prev_gray, frame_gray

            kp1, desc1 = affine_detect(detector, img0[10:h-50,10:w-10], pool=pool)
            kp2, desc2 = affine_detect(detector, img1[10:h-50,10:w-10], pool=pool)
            print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))

            with Timer('matching'):
                raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
            p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
            if len(p1) >= 4:
                H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
                print '%d / %d  inliers/matched' % (np.sum(status), len(status))
                # do not draw outliers (there will be a lot of them)
                kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

                warp = cv2.warpPerspective(img0[10:h-50,10:w-10], H, (w, h*2))
                cv2.imshow("warped",warp)

            else:
                H, status = None, None
                print '%d matches found, not enough for homography estimation' % len(p1)

            vis = explore_match('affine find_obj', img0, img1, kp_pairs, None, H)




            # stitching-----------------------
            translation = np.zeros((3,1))  #3,1
            if len(p1)>4 and len(p2)>4:

                # temp1=[]
                # # temp2=[]

                # for i in range(len(kp1)):
                #     print kp1[i].pt+ (0,)
                #     temp1.append(kp1[i].pt+ (0,))
                # # for i in range(len(kp2)):
                # #     temp2.append(kp2[i].pt)
                # points1.astype(np.uint8)

                # points1 = np.array(temp1)
                # print points1
                # # points2 = np.array(temp2)

                # Hr=cv2.estimateRigidTransform(points1, points1,False)

                translation[:,0] = H[:,2] #Hr[:,2]

                # rotation = np.zeros((3,3))
                # rotation[:,0] = H[:,0]
                # rotation[:,1] = H[:,1]
                # rotation[:,2] = np.cross(H[0:3,0],H[0:3,1])

                # print "x translation:",translation[0]
                # print "y translation:",translation[1]

                draw_str(vis, (20, 40), 'x-axis translation: %.1f' % translation[0])
                draw_str(vis, (20, 60), 'y-axis translation: %.1f' % translation[1])

                if translation[0]<60 and translation[1]<60:  #check for bad H
                    total_transl_x+=int(translation[0])
                    total_transl_y+=int(translation[1])

                    draw_str(vis, (20, 80), 'tot x-axis translation: %.1f' % total_transl_x)
                    draw_str(vis, (20, 100), 'tot y-axis translation: %.1f' % total_transl_y)

                    #h,w,d=frame.shape

                    frame_over=self.prev_frame[10:h-50,10:w-10].copy()
                    overlay = cv2.warpPerspective(frame_over, H, (w, h))
                    frame_h,frame_w,d=frame_over.shape

                    cv2.imshow('overlay',overlay)
                    #vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0)
                    big_image[starty-int(total_transl_y):starty-int(total_transl_y)+frame_h,startx-int(total_transl_x):startx-int(total_transl_x)+frame_w]=overlay[0:frame_h,0:frame_w].copy()

            #small_image=big_image.copy()
            big_h,big_w,d=big_image.shape
            small_image=cv2.resize(big_image,(big_w/4,big_h/4))
            cv2.imshow('stitching', small_image)
            #cv2.imwrite("result.jpg",big_image);



            self.frame_idx += 1
            self.prev_gray = frame_gray
            self.prev_frame=frame

            ch = 0xFF & cv2.waitKey(5)
            if ch == 27:
                break
Пример #24
0
        print 'Match it!!!!!!'
        print 'image ', i
        # print data_image_top_des
        print type(des_target[0,0])

        ## from here. if the VW appears more than 5 time, then we perform the homography on it.




        for j in range(how_many_top):
            ## threshold: 5
            if data_image_top_des[j].shape[0] >= 5:
                print 'round ', j
                bf = cv2.BFMatcher(cv2.NORM_HAMMING)
                matches = bf.knnMatch(np.uint8(target_image_top_des[j]), trainDescriptors = np.uint8(data_image_top_des[j]), k = 2)
                print len(matches)
                p1, p2, kp_pairs = filter_matches(target_image_top_kpts[j], data_image_top_kpts[j], matches, 0.99)
                if len(kp_pairs) > 0:
                    try:
                        explore_match('find_obj', img_gray,img_tmp,kp_pairs)
                        cv2.waitKey()
                        cv2.destroyAllWindows()
                    except:
                        print 'error!!!'
                else:
                    print 'not enough pairs.'
                    # print np.uint8(target_image_top_des[j])
                    print np.uint8(data_image_top_des[j])
                    print 'len of kp_pairs: ', len(kp_pairs)
Пример #25
0
    if H is not None:

        h1, w1 = img1.shape[:2]
        h2, w2 = img2.shape[:2]

        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32(
            cv2.perspectiveTransform(corners.reshape(1, -1, 2),
                                     H).reshape(-1, 2))  #+ (w1, 0)
        cv2.polylines(img2, [corners], True, (255, 255, 255))

        center = np.int32(np.mean(corners, axis=0))
        print center
        import pdb
        pdb.set_trace()
        cv2.circle(
            img2,
            (center[0], center[1]),
            5,
            (255, 255, 255),
        )
        cv2.imshow('hello1', img2)
else:
    H, status = None, None
    print '%d matches found, not enough for homography estimation' % len(p1)

vis = explore_match('hello', img1, img2, kp_pairs, status, H)

cv2.waitKey()
cv2.destroyAllWindows()
Пример #26
0
    def showMatchOrient(self,
                        i1,
                        i2,
                        idx_pairs,
                        status=None,
                        orient='relative'):
        #print " -- idx_pairs = " + str(idx_pairs)
        img1 = i1.load_gray()
        img2 = i2.load_gray()

        # compute the affine transformation between points.  This is
        # used to determine relative orientation of the two images,
        # and possibly estimate outliers if no status array is
        # provided.

        src = []
        dst = []
        for pair in idx_pairs:
            src.append(i1.kp_list[pair[0]].pt)
            dst.append(i2.kp_list[pair[1]].pt)
        fullAffine = False
        affine = cv2.estimateRigidTransform(
            np.array([src]).astype(np.float32),
            np.array([dst]).astype(np.float32), fullAffine)
        print('affine:', affine)
        if affine is None:
            affine = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
            #return status, 0
        (rot, tx, ty, sx, sy) = self.decomposeAffine(affine)
        print(' ', rot, tx, ty, sx, sy)

        if status is None:
            status = np.ones(len(idx_pairs), np.bool_)
            # for each src point, compute dst_est[i] = src[i] * affine
            error = []
            for i, p in enumerate(src):
                p_est = affine.dot(np.hstack((p, 1.0)))[:2]
                print('p est:', p_est, 'act:', dst[i])
                #np1 = np.array(i1.coord_list[pair[0]])
                #np2 = np.array(i2.coord_list[pair[1]])
                d = np.linalg.norm(p_est - dst[i])
                print('dist:', d)
                error.append(d)
            print('errors:', error)
            error = np.array(error)
            avg = np.mean(error)
            std = np.std(error)
            print('avg:', avg, 'std:', std)

            # mark the potential outliers
            for i in range(len(idx_pairs)):
                if error[i] > avg + 3 * std:
                    status[i] = False

        print('orientation:', orient)
        if orient == 'relative':
            # estimate relative orientation between features
            yaw1 = 0
            yaw2 = rot
        elif orient == 'aircraft':
            yaw1 = i1.aircraft_pose['ypr'][0]
            yaw2 = i2.aircraft_pose['ypr'][0]
        elif orient == 'camera':
            yaw1 = i1.camera_pose['ypr'][0]
            yaw2 = i2.camera_pose['ypr'][0]
        elif orient == 'sba':
            yaw1 = i1.camera_pose_sba['ypr'][0]
            yaw2 = i2.camera_pose_sba['ypr'][0]
        else:
            yaw1 = 0.0
            yaw2 = 0.0
        print(yaw1, yaw2)
        h, w = img1.shape[:2]
        scale = 790.0 / float(w)
        si1, M1 = self.rotateAndScale(img1, yaw1, scale)
        si2, M2 = self.rotateAndScale(img2, yaw2, scale)

        kp_pairs = []
        for p in idx_pairs:
            kp1 = self.copyKeyPoint(i1.kp_list[p[0]])
            p1 = M1.dot(np.hstack((kp1.pt, 1.0)))[:2]
            kp1.pt = (p1[0], p1[1])
            kp2 = self.copyKeyPoint(i2.kp_list[p[1]])
            p2 = M2.dot(np.hstack((kp2.pt, 1.0)))[:2]
            kp2.pt = (p2[0], p2[1])
            # print p1, p2
            kp_pairs.append((kp1, kp2))

        key = explore_match('find_obj',
                            si1,
                            si2,
                            kp_pairs,
                            hscale=1.0,
                            wscale=1.0,
                            status=status)

        # status structure represents in/outlier choices of user.
        # explore_match() modifies the status array in place.

        cv2.destroyAllWindows()

        # status is an array of booleans that parallels the pair array
        # and represents the users choice to keep or discard the
        # respective pairs.
        return status, key
Пример #27
0
			pt_match_file = a
	assert query_image and train_image

	img1 = cv2.imread(query_image,0)          # queryImage
	img2 = cv2.imread(train_image,0) # trainImage

	if pt_match_file:
		# 从文件中读取标注好的点
		fp = open(pt_match_file, 'r')
		query_pt_set, train_pt_set = json.load(fp)
		fp.close()

		src_pts = np.float32(query_pt_set).reshape(-1, 1, 2)
		dst_pts = np.float32(train_pt_set).reshape(-1, 1, 2)

		explore_match('Raw', img1, img2, utils.construct_kp_pairs(query_pt_set, train_pt_set), output_img = original_match_image)

		# 使用8个对应点构造Homography
		M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
		print '%d / %d' % (len(filter(lambda x: x > 0, mask.ravel().tolist())), mask.shape[0])
		print M

		height, width = img1.shape;
		result = cv2.warpPerspective(img1, M, (width * 2, height * 2))
		result = utils.cut_black_edge(result)
		if transformed_image:
			cv2.imwrite(transformed_image, result)
		else:
			cv2.imshow('result', result);

		# 根据掩码筛选keypoint
Пример #28
0
        print 'unknown feature:', feature_name
        sys.exit(1)
    '''
    i = 0
    while True:
        #i=i+1
        flag, img2 = cap.read()
        #img2 = cv2.imread('input.png')
        img2 = blackbodysegment(img2, 2, 2300)
        img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
        #print img2
        
        kp2, desc2 = affine_detect(detector, img2, pool=pool)
        print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print '%d matches found, not enough for homography estimation' % len(p1)
        vis = explore_match('win', img1, img2, kp_pairs, None, H)
        


    cv2.waitKey()
    cv2.destroyAllWindows()
Пример #29
0
while(True):
	# Get Frame
	ret, img = cap.read()
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	faces = face_cascade.detectMultiScale(gray, 1.3, 5)
	for (x,y,w,h) in faces:
		cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # Draw Faces On img

		# Crop Face
		cropped = img[y:y+h, x:x+w]
		croppedGray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
		
		# Match Face
		kpIn, desIn = orb.detectAndCompute(croppedGray,None)
		matches = bf.knnMatch(desFor, trainDescriptors = desIn, k = 2)
		pFor, pIn, kp_pairs = filter_matches(kpFor, kpIn, matches)
		explore_match('Match', searchFor,croppedGray,kp_pairs) # CV2 shows image

	# Show Result
	cv2.imshow('Raw Image',img)

	# Check For Close
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def PlotMatches(sourceImage, kp1, rotatedImage, kp2, matches):
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match("K Nearest Neighbours", sourceImage, rotatedImage, kp_pairs)
    cv2.waitKey()
    cv2.destroyAllWindows()
Пример #31
0
from find_obj import filter_matches, explore_match

bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)

matches = bf.knnMatch(des1, trainDescriptors = des2, k = 2)
p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image

cv2.waitKey()
cv2.destroyAllWindows()

Пример #32
0
		if m.distance < 0.75 * n.distance:
			good_matches.append(m)

	# 输出Keypoints
	if query_keypoints_image:
		img1_with_keypoints = cv2.drawKeypoints(img1, kp1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
		cv2.imwrite(query_keypoints_image, img1_with_keypoints)
	if train_keypoints_image:
		img2_with_keypoints = cv2.drawKeypoints(img2, kp2, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
		cv2.imwrite(train_keypoints_image, img2_with_keypoints)

	# 显示匹配
	mkp1 = [kp1[m.queryIdx] for m in good_matches]
	mkp2 = [kp2[m.trainIdx] for m in good_matches]
	kp_pairs = zip(mkp1, mkp2)
	explore_match('BFMatcher', img1,img2,kp_pairs, output_img = original_match_image) #cv2 shows image


	# 从KeyPoint中提取位置坐标
	src_pts = np.float32([ kp1[m.queryIdx].pt for m in good_matches ]).reshape(-1, 1, 2)
	dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good_matches ]).reshape(-1, 1, 2)
	assert src_pts.shape == dst_pts.shape

	# 使用RANSAC方法查找Homography变换
	# M: 变换矩阵 3 * 3
	# mask: 30 * 1维的01矩阵,代表点对的选择或遗弃。1表示选择
	M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
	
	print '%d / %d' % (len(filter(lambda x: x > 0, mask.ravel().tolist())), mask.shape[0])
	print M
Пример #33
0
while(True):
	# Capture frame-by-frame
	ret, frame = cap.read()
	searchIn = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	img1 = searchFor
	img2 = searchIn

	# Initiate SIFT detector
	orb = cv2.ORB()

	# find the keypoints and descriptors with SIFT
	kp1, des1 = orb.detectAndCompute(img1,None)
	kp2, des2 = orb.detectAndCompute(img2,None)

	# create BFMatcher object
	bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)

	matches = bf.knnMatch(des1, trainDescriptors = des2, k = 2)
	p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
	explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image

	# Check For Close
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Пример #34
0
import cv2
from find_obj import filter_matches, explore_match
import matplotlib.pyplot as plt

img1 = cv2.imread('D:/11.jpg', 0)  # queryImage
img2 = cv2.imread('D:/12.jpg', 0)  # trainImage

# Initiate SIFT detector
#sift = cv2.SIFT()
sift = cv2.SURF()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)

# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)

# Apply ratio test
good = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append([m])

# cv2.drawMatchesKnn expects list of lists as matches.
p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
vis = explore_match('IMAGE', img1, img2, kp_pairs)  # cv2 shows image
cv2.imwrite("D:\\cat2.jpg", vis)
cv2.waitKey()
cv2.destroyAllWindows()