def compare_images(img1_path, img2_path):
  print img1_path
  print img2_path
  img1 = cv2.imread(img1_path, 0)
  img2 = cv2.imread(img2_path, 0)
  # print img1

  orb = cv2.ORB()

  # Find the keypoints and descriptors
  kp1, des1 = orb.detectAndCompute(img1,None)
  kp2, des2 = orb.detectAndCompute(img2,None)

  # Create BFMatcher object
  bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)

  try:
    matches = bf.knnMatch(des1, trainDescriptors=des2, k = 2)
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)

    good = []
    for m, n in matches:
      if m.distance < 0.7*n.distance:
        print n.distance
        good.append(m)

    return good
  except:
    print 'Images not similar sizes'
    print '------------------------'
    return []
Example #2
0
def matchSift():
    '''''
    匹配sift特征
    '''
    img1 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\853117000372320013_gc_skew_mf\00000684_000000000119C94C.bmp",
        0)  # queryImage
    img2 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\13033022900009_gc_skew_mf\00000006_0000000000322D19.bmp",
        0)  # trainImage
    # sift = cv2.SIFT()
    # sift = cv2.xfeatures2d.SIFT_create()
    sift = cv2.xfeatures2d.SURF_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    print(matches)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
Example #3
0
def extract_keypoints(path, scale_factor=1.0):
    cap = cv.VideoCapture(path)

    detector, matcher = init_feature('surf')

    raw_result = []
    first_frame = True
    frame_idx = 0
    kp1 = None
    kp2 = None
    des1 = None
    des2 = None
    key_points1 = None
    key_points2 = None

    while cap.isOpened():
        ret, frame = cap.read()

        if not ret:
            break

        kp, des = detector.detectAndCompute(frame, None)

        if first_frame:
            first_frame = False
            kp1 = kp
            des1 = des
            key_points1 = [{'pt': p.pt, 'color': generate_a_random_hex_color()} for p in kp]
            raw_result.append({'frame_idx': frame_idx, 'key_points': key_points1})

        else:
            kp2 = kp
            des2 = des
            raw_matches = matcher.knnMatch(des1, des2, k=2)
            src_pts, dst_pts, kp_pairs = filter_matches(kp1, kp2, raw_matches)
            key_points2 = get_key_points2(key_points1, src_pts, dst_pts)
            raw_result.append({'frame_idx': frame_idx, 'key_points': key_points2})

            kp1 = kp2
            des1 = des2
            key_points1 = key_points2

        frame_idx += 1

        print(frame_idx)

    cap.release()

    result = []
    for f in raw_result:
        key_points = []
        raw_key_points = f['key_points']
        for kp in raw_key_points:
            key_points.append({'pt': (int(round(kp['pt'][0] * scale_factor)), int(round(kp['pt'][1] * scale_factor))),
                               'color': kp['color']})

        result.append({'frame_idx': f['frame_idx'], 'key_points': key_points})

    return result
Example #4
0
    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print '%d matches found, not enough for homography estimation' % len(p1)

        vis = explore_match(win, img1, img2, kp_pairs, None, H)
Example #5
0
def match_and_draw(win):
    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  # 2
    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    if len(p1) >= 4:
        H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

        explore_match(win, img1, img2, kp_pairs, None, H)
    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2,
                                           k=2)  #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        #p1, p2, kp_pairs = filter_matches_std(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        else:  #regular sift
            '''# Initiate SIFT detector
            sift = cv2.xfeatures2d.SIFT_create()

            # find the keypoints and descriptors with SIFT
            kp1, des1 = sift.detectAndCompute(img1, None)
            kp2, des2 = sift.detectAndCompute(img2, None)

            # FLANN parameters
            FLANN_INDEX_KDTREE = 0
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)  # or pass empty dictionary

            flann = cv2.FlannBasedMatcher(index_params, search_params)

            matches = flann.knnMatch(des1, des2, k=2)

            # Need to draw only good matches, so create a mask
            matchesMask = [[0, 0] for i in range(len(matches))]

            # ratio test as per Lowe's paper
            for i, (m, n) in enumerate(matches):
                if m.distance < 0.7 * n.distance:
                    matchesMask[i] = [1, 0]

            draw_params = dict(matchColor=(0, 255, 0),
                               singlePointColor=(255, 0, 0),
                               matchesMask=matchesMask,
                               flags=2)

            img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)

            cv2.imshow('matched',img3)'''
            return sift_detect(img1, img2)

        img, corners = explore_match(win, img1, img2, kp_pairs, None, H)
        return img, corners, kp_pairs
Example #7
0
def match(pic1, pic2):
    img1 = cv2.imread(pic1, 0)  # queryImage
    img2 = cv2.imread(pic2, 0)  # trainImage
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    return len(kp_pairs)
Example #8
0
 def match_and_draw(win, kpa, desca3):
     with Timer('matching'):
         raw_matches = matcher.knnMatch(desc1,
                                        trainDescriptors=desca3,
                                        k=2)  #2
     p1, p2, kp_pairs = filter_matches(kp1, kpa, raw_matches)
     if len(p1) > 4:
         H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
         print '%d / %d  inliers/matched' % (np.sum(status),
                                             len(status))
         # do not draw outliers (there will be a lot of them)
         kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
         return win, kp_pairs, H, (np.sum(status))
     else:
         return win, kp_pairs, None, -1
Example #9
0
def matchSift3():
    ''''' 
    匹配sift特征 
    '''
    img1 = cv2.imread('/home/liuzhen/Python test/image/quadrilateral_find.jpg',
                      0)  # queryImage
    img2 = cv2.imread('/home/liuzhen/Python test/image/screen_white_find.bmp',
                      0)  # trainImage
    #sift = cv2.SIFT()
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv3.0有drawMatchesKnn函数
    # Apply ratio test
    # 比值测试,首先获取与A 距离最近的点B(最近)和C(次近),只有当B/C
    # 小于阈值时(0.75)才被认为是匹配,因为假设匹配是一一对应的,真正的匹配的理想距离为0

    # good = []
    # for m, n in matches:
    #     if m.distance < 0.8 * n.distance:
    #         good.append([m])
    # img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[:], None, flags=2)
    # print (len(kp1),len(kp2),len(good))
    # plt.imshow(img3), plt.show()

    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    print('%d / %d  p1/p2' % (len(p1), len(p2)))
    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' %
              len(p1))

    explore_match('affine find_obj', img1, img2, kp_pairs, None, H)

    cv2.waitKey()
    cv2.destroyAllWindows()
def matchSift():
    ''''' 
    匹配sift特征 
    '''
    img1 = cv2.imread('../../data/box.png', 0)  # queryImage
    img2 = cv2.imread('../../data/box_in_scene.png', 0)  # trainImage
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
Example #11
0
def hacerMosaico(directorio):
	directorio="/home/diego/salida/{}".format(directorio)
	archivos = os.listdir(directorio)
	
	img1 = cv2.imread('{}/{}'.format(directorio,archivos[0]),1)
	img2 = cv2.imread('{}/{}'.format(directorio,archivos[1]),1)
	img3 = cv2.imread('{}/{}'.format(directorio,archivos[2]),1)

	h1, w1 = img1.shape[:2]
	h2, w2 = img2.shape[:2]

	#create empty matrix
	vis = np.zeros((max(h1, h2), w1+w2,3), np.uint8)

	#combine 2 images
	vis[:h1, :w1,:3] = img1
	vis[:h2, w1:w1+w2,:3] = img2

	detector = cv2.xfeatures2d.SIFT_create()
	norm = cv2.NORM_L2
	matcher = cv2.BFMatcher(norm)

	#vis = np.concatenate((img1, img2), axis=1)

	#cv2.imshow('MOSAICO',vis)	

	kp1, desc1 = detector.detectAndCompute(vis, None)
	kp2, desc2 = detector.detectAndCompute(img3, None)

	raw_matches = matcher.knnMatch(desc2, trainDescriptors = desc1, k = 2) #2		
	p1, p2, kp_pairs = filter_matches(kp2, kp1, raw_matches)
	print len(p1)
	if len(p1) >= 1:
		H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
		if status is not None:
			print '%d / %d  inliers/matched' % (np.sum(status), len(status))
			vis = explore_match('find_obj', vis, img3, kp_pairs, status, H)
	else:
		H, status = None, None

	cv2.waitKey(0)
Example #12
0
def doFeatureMatch(img1, img2, way, win):
    import sys, getopt
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)

    method = way + '-flann'
    feature_name = opts.get('--feature', 'surf-flann')
    detector, matcher = init_feature(feature_name)
    if img1 is None:
        print('Failed to load fn1:', fn1)
        sys.exit(1)

    if img2 is None:
        print('Failed to load fn2:', fn2)
        sys.exit(1)
    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)
    print('using', feature_name)
    pool = ThreadPool(processes=cv.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' %
                  len(p1))
        explore_match(win, img1, img2, kp_pairs, None, H)

    cv.waitKey()
    cv.destroyAllWindows()
Example #13
0
def matchSift():
    '''''
    匹配sift特征
    '''
    img1 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\962215042606600003_gc_skew_mf\00000259_0000000000A7CF24.bmp",
        0)  # queryImage
    img2 = cv2.imread(
        r"E:\lenovo_exercitation\natong_work\naton"
        r"g_product\grabcut_folder\962215042606600005_gc_skew_mf\00000245_0000000000A4BA9A.bmp",
        0)  # trainImage
    # sift = cv2.SIFT()
    # sift = cv2.xfeatures2d.SIFT_create()
    sift = cv2.xfeatures2d.SURF_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)

    num = 0
    result = 0
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            # result += m.distance*m.distance
            result += (m.distance - n.distance) * (m.distance - n.distance)
            num += 1
    if num > 0:
        result /= num
    # return result

    print(result)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
Example #14
0
def matchSift():
    ''''' 
    匹配sift特征 
    '''
    img1 = cv2.imread('/home/liuzhen/Python test/image/xcl3.jpg',
                      0)  # queryImage
    img2 = cv2.imread('/home/liuzhen/Python test/image/screen_white.bmp',
                      0)  # trainImage
    # sift = cv2.SIFT()
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    #返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    #opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match('find_obj', img1, img2, kp_pairs)  # cv2 shows image
    cv2.waitKey()
    cv2.destroyAllWindows()
Example #15
0
    def locate(self, img2, show=False):
        center = None

        kp2, desc2 = self.detector.detectAndCompute(img2, None)

        raw_matches = self.matcher.knnMatch(self.desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(self.kp1, kp2, raw_matches)

        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))

            if H is not None:

                h2, w2 = img2.shape[:2]

                corners = np.int32( cv2.perspectiveTransform(self.corners1.reshape(1, -1, 2), H).reshape(-1, 2) ) #+ (w1, 0) 
                center = np.int32(np.mean(corners, axis=0))
                

                if show:
                    cv2.polylines(img2, [corners], True, (255, 255, 255))



                    cv2.circle(img2, (center[0], center[1]), 5,(255,255,255),)
                    cv2.imshow('hello1', img2)

                    if False:
                        vis = explore_match('hello', self.img1, img2, kp_pairs, status, H)
        else:
            if len(p2):
                center = np.int32(np.mean(p2, axis=0))
            else:
                center = None

            print '%d matches found, not enough for homography estimation' % len(p1)
            
        return center        
Example #16
0
def matchSift(img1_gray, img2_gray):
    """
    匹配sift特征
    """
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1_gray, None)
    kp2, des2 = sift.detectAndCompute(img2_gray, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13/sources/samples/python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches, ratio=0.5)
    matche_img = explore_match('find_obj', img1_gray, img2_gray,
                               kp_pairs)  # cv2 shows image
    cv2.imwrite("C:/Users/qq619/Desktop/05matches.png", matche_img)
    # 保存特征连线图

    cv2.waitKey()
    cv2.destroyAllWindows()
    return p1, p2, des1, des2, matches
def draw_features(img1_path, img2_path):
  print img1_path
  print img2_path
  img1 = cv2.imread(img1_path, 0)
  img2 = cv2.imread(img2_path, 0)
  print 'img1:', img1
  print 'img2:', img2

  # Initiate SIFT detector
  orb = cv2.ORB()

  # Find the keypoints and descriptors
  kp1, des1 = orb.detectAndCompute(img1,None)
  kp2, des2 = orb.detectAndCompute(img2,None)

  # Create BFMatcher object
  bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)

  matches = bf.knnMatch(des1, trainDescriptors=des2, k = 2)
  p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
  explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image

  cv2.waitKey()
Example #18
0
def matchSift():
    '''''
    匹配sift特征
    '''
    img1 = cv2.imread("E:/image_compare/yp/15/1502.png")  # queryImage
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.imread("E:/image_compare/yp/16/1602.png")  # trainImage
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    sift = cv2.SIFT()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)
    bf = cv2.BFMatcher()
    # 返回k个最佳匹配
    matches = bf.knnMatch(des1, des2, k=2)
    # cv2.drawMatchesKnn expects list of lists as matches.
    # opencv2.4.13没有drawMatchesKnn函数,需要将opencv2.4.13\sources\samples\python2下的common.py和find_obj文件放入当前目录,并导入
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    matche_img = explore_match('find_obj', img1, img2,
                               kp_pairs)  # cv2 shows image
    cv2.imwrite("C:/Users/qq619/Desktop/02matches.png", matche_img)

    cv2.waitKey()
    cv2.destroyAllWindows()
Example #19
0
def main(args):
    with h5py.File(args['ah'], "r") as a:
        kps = a['keypoints'][()]
        kp1 = [cv2.KeyPoint(kp[0], kp[1], kp[2], kp[3]) for kp in kps]
        desc1 = a['descriptors'][()]

    with h5py.File(args['bh'], "r") as b:
        kps = b['keypoints'][()]
        kp2 = [cv2.KeyPoint(kp[0], kp[1], kp[2], kp[3]) for kp in kps]
        desc2 = b['descriptors'][()]

    detector, matcher = init_feature("sift")

    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2
    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' %
              len(p1))

    img1 = cv2.imread(args['a'], 0)
    img2 = cv2.imread(args['b'], 0)

    explore_match("LIFT Match", img1, img2, kp_pairs, None, H)

    cv2.waitKey(0)
    cv2.destroyAllWindows()

    pass
Example #20
0
from find_obj import filter_matches, explore_match

bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)

matches = bf.knnMatch(des1, trainDescriptors = des2, k = 2)
p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image

cv2.waitKey()
cv2.destroyAllWindows()

    def run(self):
        # init big image fro stitching
        ret, frame = self.cam.read()
        frame=cv2.resize(frame,(320,240))
        h,w,d=frame.shape
        big_image = np.zeros((h*12,w*3,3), np.uint8)
        starty=h*11
        startx=w
        total_transl_x=0
        total_transl_y=0

        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        self.prev_gray=frame_gray
        self.prev_frame=frame

        detector, matcher = init_feature('sift-flann')
        pool=ThreadPool(processes = cv2.getNumberOfCPUs())

        while True:
            for i in range(skip_frames):
                ret, frame = self.cam.read()
            ret, frame = self.cam.read()
            frame=cv2.resize(frame,(320,240))

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            img0, img1 = self.prev_gray, frame_gray

            kp1, desc1 = affine_detect(detector, img0[10:h-50,10:w-10], pool=pool)
            kp2, desc2 = affine_detect(detector, img1[10:h-50,10:w-10], pool=pool)
            print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))

            with Timer('matching'):
                raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
            p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
            if len(p1) >= 4:
                H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
                print '%d / %d  inliers/matched' % (np.sum(status), len(status))
                # do not draw outliers (there will be a lot of them)
                kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

                warp = cv2.warpPerspective(img0[10:h-50,10:w-10], H, (w, h*2))
                cv2.imshow("warped",warp)

            else:
                H, status = None, None
                print '%d matches found, not enough for homography estimation' % len(p1)

            vis = explore_match('affine find_obj', img0, img1, kp_pairs, None, H)




            # stitching-----------------------
            translation = np.zeros((3,1))  #3,1
            if len(p1)>4 and len(p2)>4:

                # temp1=[]
                # # temp2=[]

                # for i in range(len(kp1)):
                #     print kp1[i].pt+ (0,)
                #     temp1.append(kp1[i].pt+ (0,))
                # # for i in range(len(kp2)):
                # #     temp2.append(kp2[i].pt)
                # points1.astype(np.uint8)

                # points1 = np.array(temp1)
                # print points1
                # # points2 = np.array(temp2)

                # Hr=cv2.estimateRigidTransform(points1, points1,False)

                translation[:,0] = H[:,2] #Hr[:,2]

                # rotation = np.zeros((3,3))
                # rotation[:,0] = H[:,0]
                # rotation[:,1] = H[:,1]
                # rotation[:,2] = np.cross(H[0:3,0],H[0:3,1])

                # print "x translation:",translation[0]
                # print "y translation:",translation[1]

                draw_str(vis, (20, 40), 'x-axis translation: %.1f' % translation[0])
                draw_str(vis, (20, 60), 'y-axis translation: %.1f' % translation[1])

                if translation[0]<60 and translation[1]<60:  #check for bad H
                    total_transl_x+=int(translation[0])
                    total_transl_y+=int(translation[1])

                    draw_str(vis, (20, 80), 'tot x-axis translation: %.1f' % total_transl_x)
                    draw_str(vis, (20, 100), 'tot y-axis translation: %.1f' % total_transl_y)

                    #h,w,d=frame.shape

                    frame_over=self.prev_frame[10:h-50,10:w-10].copy()
                    overlay = cv2.warpPerspective(frame_over, H, (w, h))
                    frame_h,frame_w,d=frame_over.shape

                    cv2.imshow('overlay',overlay)
                    #vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0)
                    big_image[starty-int(total_transl_y):starty-int(total_transl_y)+frame_h,startx-int(total_transl_x):startx-int(total_transl_x)+frame_w]=overlay[0:frame_h,0:frame_w].copy()

            #small_image=big_image.copy()
            big_h,big_w,d=big_image.shape
            small_image=cv2.resize(big_image,(big_w/4,big_h/4))
            cv2.imshow('stitching', small_image)
            #cv2.imwrite("result.jpg",big_image);



            self.frame_idx += 1
            self.prev_gray = frame_gray
            self.prev_frame=frame

            ch = 0xFF & cv2.waitKey(5)
            if ch == 27:
                break
        print 'Match it!!!!!!'
        print 'image ', i
        # print data_image_top_des
        print type(des_target[0,0])

        ## from here. if the VW appears more than 5 time, then we perform the homography on it.




        for j in range(how_many_top):
            ## threshold: 5
            if data_image_top_des[j].shape[0] >= 5:
                print 'round ', j
                bf = cv2.BFMatcher(cv2.NORM_HAMMING)
                matches = bf.knnMatch(np.uint8(target_image_top_des[j]), trainDescriptors = np.uint8(data_image_top_des[j]), k = 2)
                print len(matches)
                p1, p2, kp_pairs = filter_matches(target_image_top_kpts[j], data_image_top_kpts[j], matches, 0.99)
                if len(kp_pairs) > 0:
                    try:
                        explore_match('find_obj', img_gray,img_tmp,kp_pairs)
                        cv2.waitKey()
                        cv2.destroyAllWindows()
                    except:
                        print 'error!!!'
                else:
                    print 'not enough pairs.'
                    # print np.uint8(target_image_top_des[j])
                    print np.uint8(data_image_top_des[j])
                    print 'len of kp_pairs: ', len(kp_pairs)
Example #23
0
    def onMessage(self, payload,
                  isBinary):  #Code that runs when message is triggered
        message = format(payload.decode('utf8'))  #Message decoded as utf8
        global imageCounter
        #To enhance efficiency starting and stopping camera views separated
        #However DO NOT FORGET to close a camera capture
        if (message == '1'):  #Start Camera Capture
            print "Starting Camera Capture"
            self.cap = cv2.VideoCapture(0)  #Capture Camera
            time.sleep(2)  #Time given for camera to start up

        if (message == '2'):  #Stop Camera capture
            print "Closing Camera Capture"
            self.cap.release()

        if (message == '3'
            ):  #Multiple Camera displaying algorithms can be added below
            #Remember to refer to video capture as self.cap
            #Replace your own code here. Below is a simple example

            ret, frame = self.cap.read()
            fframe = cv2.flip(frame, 1)

            #The code below sends the captured frame to the Camera viewer. DO NOT DELETE
            #the image is passed to the encoder as frame, you can pass any other image matrix
            encode_param = [1, 90]
            res, image = cv2.imencode('.png', fframe, encode_param)
            sample = base64.b64encode(image)
            self.sendMessage(sample, isBinary)

        #This algorithm saves an image from the camera. Used for training
        #Hope it works
        if (message == '4'):

            ret, frame = self.cap.read()
            #fframe = cv2.flip(frame,1)

            encode_param = [1, 90]
            #res, savingImage = cv2.imencode('.png',fframe,encode_param)
            name = 'capture/' + ` imageCounter ` + '.png'
            print "Saving Image", name
            cv2.imwrite(name, fframe)
            imageCounter += 1
            self.sendMessage("Saved", isBinary=False)

        #This will take the snap shot and find similarities with the images
        #if it doesnt it replies unknown
        if (message == '5'):  #add extra algorithms like this

            ret, frame = self.cap.read()
            fframe = cv2.flip(frame, 1)

            j = 0
            result = "unknown"

            for item in templates:
                tmp = cv2.imread('res/' + item)

                orb = cv2.ORB()
                kp1, des1 = orb.detectAndCompute(fframe, None)
                kp2, des2 = orb.detectAndCompute(tmp, None)

                bf = cv2.BFMatcher(cv2.NORM_HAMMING)  #, crossCheck=True)
                matches = bf.knnMatch(des1, trainDescriptors=des2, k=2)

                p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)

                i = 0
                m1 = np.array([])
                m2 = np.array([])
                mp_pairs = np.array([])

                for p in p1:
                    if (p[0] > 266 and p[0] < 1014 and p[1] > 45
                            and p[1] < 675):
                        m1 = np.append(m1, p1[i] - np.array([266, 0]), axis=0)
                        m2 = np.append(m2, p2[i], axis=0)
                        mp_pairs = np.append(mp_pairs, kp_pairs[i], axis=0)
                    i += 1

                m1 = np.reshape(m1, (-1, 2))
                m2 = np.reshape(m2, (-1, 2))

                try:
                    homeo = cv2.findHomography(p1, p2, method=0)
                except Exception:
                    pass

                stackSize = m1.size / 2
                #print m1/m2
                ratio = np.sum(m1 / m2, axis=0)

                #print stackSize, ratio
                try:
                    xSim = abs(((stackSize - ratio[0]) / stackSize) * 100)
                    ySim = abs(((stackSize - ratio[1]) / stackSize) * 100)
                except:
                    pass

                if (~np.isnan(xSim) or ~np.isnan(ySim)):
                    print(xSim + ySim) / 2
                    if (((xSim + ySim) / 2) > 80):
                        result = templateNames[j]

                print result

                j += 1

            self.sendMessage(result, isBinary=False)
Example #24
0
while(True):
	# Get Frame
	ret, img = cap.read()
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	faces = face_cascade.detectMultiScale(gray, 1.3, 5)
	for (x,y,w,h) in faces:
		cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # Draw Faces On img

		# Crop Face
		cropped = img[y:y+h, x:x+w]
		croppedGray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
		
		# Match Face
		kpIn, desIn = orb.detectAndCompute(croppedGray,None)
		matches = bf.knnMatch(desFor, trainDescriptors = desIn, k = 2)
		pFor, pIn, kp_pairs = filter_matches(kpFor, kpIn, matches)
		explore_match('Match', searchFor,croppedGray,kp_pairs) # CV2 shows image

	# Show Result
	cv2.imshow('Raw Image',img)

	# Check For Close
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def PlotMatches(sourceImage, kp1, rotatedImage, kp2, matches):
    p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
    explore_match("K Nearest Neighbours", sourceImage, rotatedImage, kp_pairs)
    cv2.waitKey()
    cv2.destroyAllWindows()
while(True):
	# Capture frame-by-frame
	ret, frame = cap.read()
	searchIn = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	img1 = searchFor
	img2 = searchIn

	# Initiate SIFT detector
	orb = cv2.ORB()

	# find the keypoints and descriptors with SIFT
	kp1, des1 = orb.detectAndCompute(img1,None)
	kp2, des2 = orb.detectAndCompute(img2,None)

	# create BFMatcher object
	bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)

	matches = bf.knnMatch(des1, trainDescriptors = des2, k = 2)
	p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
	explore_match('find_obj', img1,img2,kp_pairs)#cv2 shows image

	# Check For Close
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Example #27
0
def playVideo(nombre, descriptores):
	detector = cv2.xfeatures2d.SIFT_create()
	norm = cv2.NORM_L2
	matcher = cv2.BFMatcher(norm)

	cap = cv2.VideoCapture(nombre)

	fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
	i=0
	roi_index = 0
	levels=1
	while True:
		t0 = time.clock() # calcular tiempo
		ret, frame = cap.read() # Capture frame-by-frame
		if ret == False:
			break
		#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convertir frame a escala de grises
		fgmask = fgbg.apply(frame) # detectar primer plano en movimiento
		retval, thresh = cv2.threshold(fgmask, 200, 256, cv2.THRESH_BINARY); #eliminar sombra
		enmask = cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB); #triplicar canales, para poder compararlos con un frame en RGB
		enmask = cv2.bitwise_and(frame,enmask) #enmascarar frame original
		_, contours0,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
		contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
		# Find the index of the largest contour
		areas = [cv2.contourArea(c) for c in contours]
		if len(areas) > 0:
			print 'ROIs:',len(areas)
			max_index = np.argmax(areas)
			cnt=contours[max_index]
			x,y,w,h = cv2.boundingRect(cnt)
			area = w * h
			print 'area: ', area
			if (area > 100 ) & (h > w): #caviar-area: 500, visor-area: 1000
				cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1) #dibujar rectangulo
				cv2.circle(frame, (int(x+(w/2)), int(y+(h/2))), 2, (0,255,0)) #dibujar punto en centro del rectangulo
				roi_index = roi_index + 1
				roi = enmask[y:y+h,x:x+w]
				kp, desc = detector.detectAndCompute(roi, None)
				print len(kp)
				#print 'posicion: ({},{}) tamano:({},{}) area: {}'.format(x,y,w,h,area)
				if len(kp) > 0: #caviar: 4 , visor:20
					for par in descriptores:
						raw_matches = matcher.knnMatch(par.descriptor, trainDescriptors = desc, k = 2) #2		
						p1, p2, kp_pairs = filter_matches(par.keypoints, kp, raw_matches)
						if len(p1) >= 4:
							H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
							if status is not None:
								print '%d / %d  inliers/matched' % (np.sum(status), len(status))
								'''
								tmp1 = 'match{}_1.png'.format(i)
								tmp2 = 'match{}_2.png'.format(i)
								cv2.imwrite('{}{}'.format(dirsalida,tmp1),par.image)
								cv2.imwrite('{}{}'.format(dirsalida,tmp2),roi)
								'''
							vis = explore_match('find_obj', roi, par.image, kp_pairs, status, H)
						else:
							H, status = None, None
							#print '%d matches found, not enough for homography estimation' % len(p1)
					par = Par(kp,desc,roi)	
					print 'descriptores guardados:', len(descriptores)
					descriptores.append(par)
		cv2.imshow('frame',frame)
		#cv2.imshow('fgmask',fgmask)
		#cv2.imshow('thresh',thresh)
		#cv2.imshow('enmask',enmask)
		if cv2.waitKey(20) & 0xFF == ord('q'):
			# When everything done, release the capture
			break
		t1 = time.clock() - t0
		t0 = t1
		#print 'frame:',i, ' rate: ', 1. / t1
		i=i+1
Example #28
0
	def onMessage(self, payload, isBinary):#Code that runs when message is triggered
		message = format(payload.decode('utf8')) #Message decoded as utf8
		category = 'none'
		global imageCounter
		#To enhance efficiency starting and stopping camera views separated
		#However DO NOT FORGET to close a camera capture
		if (message == '1'):#Start Camera Capture
			print "Starting Camera Capture"
			self.cap = cv2.VideoCapture(0)#Capture Camera
			time.sleep(2)#Time given for camera to start up
			
		if (message == '2'):#Stop Camera capture
			print "Closing Camera Capture"
			self.cap.release()
			
		if (message == '3'):#Multiple Camera displaying algorithms can be added below
			#Remember to refer to video capture as self.cap
			#Replace your own code here. Below is a simple example

			ret,frame = self.cap.read()
			fframe = cv2.flip(frame,1)
			
			
			#The code below sends the captured frame to the Camera viewer. DO NOT DELETE
			#the image is passed to the encoder as frame, you can pass any other image matrix
			encode_param=[1,90]
			res, image = cv2.imencode('.png',fframe,encode_param)
			sample = base64.b64encode(image)
			self.sendMessage(sample, isBinary)
			
		#This algorithm saves an image from the camera. Used for training
		#Hope it works
		if (message == '4'):

			ret,frame = self.cap.read()
			fframe = cv2.flip(frame,1)

			encode_param=[1,90]
			#res, savingImage = cv2.imencode('.png',fframe,encode_param)
			name = 'capture/' + `imageCounter` +'.png'
			print "Saving Image", name
			cv2.imwrite(name, fframe)
			imageCounter += 1
			self.sendMessage("Saved",isBinary = False)


		#This will take the snap shot and find similarities with the images
		#if it doesnt it replies unknown
		if (message == '5'): #add extra algorithms like this

			ret,frame = self.cap.read()
			fframe = cv2.flip(frame,1)

			j = 0;
			result = "unknown"
			category = "none"

			for item in templates:
				tmp = cv2.imread('res/'+item)

				orb = cv2.ORB()
				kp1, des1 = orb.detectAndCompute(fframe,None)
				kp2, des2 = orb.detectAndCompute(tmp,None)

				bf = cv2.BFMatcher(cv2.NORM_HAMMING)#, crossCheck=True)
				matches = bf.knnMatch(des1, trainDescriptors = des2, k=2)

				p1, p2, kp_pairs = filter_matches(kp1, kp2, matches)
	
				i = 0
				m1 = np.array([])
				m2 = np.array([])
				mp_pairs = np.array([])

				for p in p1:
					if(p[0]>266 and p[0]<1014 and p[1]>45 and p[1]<675):
						m1 = np.append(m1, p1[i] - np.array([266,0]), axis=0)
						m2 = np.append(m2, p2[i], axis=0)
						mp_pairs = np.append(mp_pairs, kp_pairs[i], axis=0)
					i+=1

				m1 = np.reshape(m1,(-1,2))
				m2 = np.reshape(m2,(-1,2))

				try:
					homeo = cv2.findHomography(p1,p2, method=0)
				except Exception:
					pass

				stackSize = m1.size / 2
				#print m1/m2
				ratio = np.sum( m1/m2, axis=0)

				#print stackSize, ratio
				try:
					xSim = abs(((stackSize - ratio[0]) / stackSize) * 100)
					ySim = abs(((stackSize - ratio[1]) / stackSize) * 100)
				except:
					pass

				if(~np.isnan(xSim) or ~np.isnan(ySim)):
					print (xSim + ySim)/2
					if (((xSim + ySim)/2) > 80):
						result = templateNames[j]
						category = categoryNames[j]

				print result

				j+=1

			self.sendMessage(result,isBinary = False)
			if category == 'none' :
				gpio.output(8,False)
				gpio.output(10,False)
				gpio.output(11,False)
			elif category == 'green' :
				gpio.output(8,True)
				gpio.output(10,False)
				gpio.output(11,False)
			elif category == 'red' :
				gpio.output(8,False)
				gpio.output(10,True)
				gpio.output(11,False)
			elif category == 'blue' :
				gpio.output(8,False)
				gpio.output(10,False)
				gpio.output(11,True)