def __init__(self, dataroot, room_number, feature_type, use_ransac):
        self.dataroot = dataroot
        self.room_number = room_number
        self.mask = None
        self.use_ransac = use_ransac

        # Open up image data file and load metadata
        image_data_file = open(
            dataroot + "/tumvi_room" + str(room_number) + "_cam0", "r")
        self.image_data = json.load(image_data_file)

        # Open up ground truth file and load timestamps and ground truth gsb
        gt_filename = "tumvi_room" + str(room_number) + "_gt"
        self.load_gt_file(gt_filename)

        # Get feature detector and matcher
        if feature_type == "sift":
            self.detector = cv2.xfeatures2d.SIFT_create()
            self.matcher = cv2.BFMatcher_create(normType=cv2.NORM_L2,
                                                crossCheck=True)
        elif feature_type == "surf":
            self.detector = cv2.xfeatures2d.SURF_create()
            self.matcher = cv2.BFMatcher_create(normType=cv2.NORM_L2,
                                                crossCheck=True)
        elif feature_type == "orb":
            self.detector = cv2.ORB_create()
            self.matcher = cv2.BFMatcher_create(normType=cv2.NORM_HAMMING,
                                                crossCheck=True)

        # Camera projection matrix for six indoor rooms
        fx = 190.97847715128717
        fy = 190.9733070521226
        cx = 254.93170605935475
        cy = 256.8974428996504
        self.K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
        self.K_inv = np.array([[1 / fx, 0, -cx / fx], [0, 1 / fy, -cy / fy],
                               [0, 0, 1]])

        # g_bc from TUMVI, cam0
        self.T_bc = np.array([0.04557484, -0.0711618, -0.04468125])
        R_bc_dcm = np.array([[-0.99952504, 0.00750192, -0.02989013],
                             [0.02961534, -0.03439736, -0.99896935],
                             [-0.00852233, -0.99938008, 0.03415885]])
        self.R_bc = Rotation.from_dcm(R_bc_dcm)

        # For storing pts and timestamps
        self.stored_points = {}
        self.timestamps = [0 for i in range(len(self.image_data))]

        # Place to dump text files full of points
        if self.use_ransac:
            self.pointlist_dir = dataroot + "/featurelists_gt_ransac/" + feature_type + \
                "/room" + str(self.room_number)
        else:
            self.pointlist_dir = dataroot + "/featurelists_gt/" + feature_type + \
                "/room" + str(self.room_number)
        if not os.path.exists(self.pointlist_dir):
            os.makedirs(self.pointlist_dir)
    def matchKeypoints(self, kpsA, kpsB, descriptorsA, descriptorsB, ratio,
                       thresh):
        # 建立暴力匹配器
        bruteForce = cv2.BFMatcher_create()
        # 使用KNN检测来自A、B图的SIFT特征匹配对,K=2
        rawMatches = bruteForce.knnMatch(descriptorsA, descriptorsB, 2)

        matches = []
        for m in rawMatches:
            # 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                # 存储两个点在featuresA, featuresB中的索引值
                matches.append((m[0].trainIdx, m[0].queryIdx))

        # 当筛选后的匹配度大于4时,计算视角变换矩阵
        if len(matches) > 4:
            # 获取匹配对的点坐标
            ptsA = np.float32([kpsA[i] for (_, i) in matches])
            ptsB = np.float32([kpsB[i] for (i, _) in matches])

            # 计算视角变换矩阵
            (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, thresh)

            # 返回结果
            return (matches, H, status)

        # 如果匹配对小于4时,返回None
        return None
def detect_best_sift(templates, template_names, kp_t, des_t, img_c, name, top,
                     time_start, time_end):
    sift = cv2.SIFT_create()

    bf = cv2.BFMatcher_create(cv2.NORM_L2, crossCheck=True)
    time_start.append(time.perf_counter())
    kp_c, des_c = sift.detectAndCompute(img_c, None)
    time_end.append(time.perf_counter())

    all_matches = []
    avg = []
    for des in des_t:
        time_start.append(time.perf_counter())
        matches = bf.match(des, des_c)
        time_end.append(time.perf_counter())
        matches.sort(key=lambda x: x.distance)
        # Avarge top 10
        top_10 = matches[:8]
        avg.append(mean(d.distance for d in top_10))
        all_matches.append(matches)

    # Sorting everything
    avg, templates, template_names, all_matches, kp_t, des_t = zip(
        *sorted(zip(avg, templates, template_names, all_matches, kp_t, des_t),
                key=lambda x: x[0]))

    good_matches = all_matches[0][:top]
    show_result(templates[0], img_c, template_names[0], name, 'sift', kp_t[0],
                kp_c, all_matches[0], good_matches, avg[0], time_start,
                time_end)
Exemple #4
0
    def __init__(self, det='sift'):
        self.detector = None
        self.det = det
        self.size = None

        if det == 'sift_flann':
            self.detector = cv2.xfeatures2d.SIFT_create()
            FLANN_INDEX_KDTREE = 1
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)  # or pass empty dictionary
            self.matcher = cv2.FlannBasedMatcher(index_params, search_params)

        elif det == 'orb_gms':
            self.detector = cv2.ORB_create(10000)
            self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        elif det == 'sift_bf':
            self.detector = cv2.xfeatures2d.SIFT_create()
            self.matcher = cv2.BFMatcher()

        elif det == 'orb_org':
            orb = cv2.ORB_create(5000)
            orb.setFastThreshold(0)
            if cv2.__version__.startswith('3'):
                matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
            else:
                matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)
            gms = GmsMatcher(orb, matcher)
            self.detector = GmsMatcher(orb, matcher)
            self.matcher = GmsMatcher(orb, matcher)

        else:
            raise Exception('Unknown detector {}'.format(det))
def matchImages(img1, img2, numPoints):
    # TODO when loading images, throw out
    # any points that arent in ideal place

    orb = cv.ORB_create()

    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)

    bf = cv.BFMatcher_create(cv.NORM_HAMMING, crossCheck=True)

    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)

    numPoints = min(numPoints, len(matches))

    # src_pts = np.float32([kp1[m.queryIdx].pt for m in range(10)]).reshape(-1, 1, 2)
    dst_pts = np.float32(
        [kp2[matches[i].trainIdx].pt for i in range(numPoints)]).reshape(-1, 1, 2)

    output = ([], matches[:numPoints])
    for i in range(numPoints):
        output[0].append(dst_pts[i][0])

    img3 = cv.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)
    #cv.imwrite("tes.png", img3)

    return output
Exemple #6
0
def detect_best_sift(templates, template_names, kp_t, des_t, img_c, name, top):
    sift = cv2.SIFT_create()

    bf = cv2.BFMatcher_create(cv2.NORM_L2, crossCheck=True)

    kp_c, des_c = sift.detectAndCompute(img_c, None)

    all_matches = []
    avg = []
    for des in des_t:
        matches = bf.match(des, des_c)
        matches.sort(key=lambda x: x.distance)
        #Avarge top 10
        top_10 = matches[:8]
        avg.append(mean(d.distance for d in top_10))
        all_matches.append(matches)

    #Sorting everything
    avg, templates, template_names, all_matches, kp_t, des_t = zip(
        *sorted(zip(avg, templates, template_names, all_matches, kp_t, des_t),
                key=lambda x: x[0]))

    img_t = cv2.drawKeypoints(templates[0], kp_t[0], None)
    img_c = cv2.drawKeypoints(img_c, kp_c, None)

    good_matches = all_matches[0][:top]
    show_result(img_t, img_c, template_names[0], "sift" + name, kp_t[0], kp_c,
                all_matches[0], good_matches, avg[0])
Exemple #7
0
def alignImages(im1, im2):
    im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
    im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    orb = cv2.ORB_create(MAX_FEATURES)

    kp1 = orb.detect(im1Gray)
    kp1, des1 = orb.compute(im1Gray, kp1)
    kp2 = orb.detect(im2Gray)
    kp2, des2 = orb.compute(im2Gray, kp2)

    matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)
    matches = matcher.match(des1, des2)

    matches = sorted(matches, key=lambda x: x.distance)
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[numGoodMatches:-1]
    # img3 = cv2.drawMatches(im1Gray,kp1,im2Gray,kp2,matches[:10],None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    # plotImages(img3)

    src_pts = np.float32([kp1[m.queryIdx].pt for m in matches])
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches])

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 0.5)

    h, w = im2Gray.shape
    im1Reg = cv2.warpPerspective(im1, M, (w, h))
    # plotImages(im2, im1Reg, im1, titles=['base', 'reg', 'flaw'])

    return im1Reg
def compute_matches(img1, img2, threshold=0.75):
    '''
    Computes keypoints, discriptors and matches between two GRAYSCALE images
    Input:
        img1 - image 1
        img2 - image 2
        threshold - float, threshold for the ratio test
    Output:
        kp1 - list of KeyPoints
        kp2 - dito
        des1 - list(?) of descriptors
        des2 - dito
        matches - list of matches between kp1 and kp2, based on des1 an des2
    '''

    # perform sift to get keypoints
    sift = cv.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # instantiate matcher object
    matcher = cv.BFMatcher_create()
    # get 2 best matches
    matches = matcher.knnMatch(des1, des2, k=2)
    # apply ratio test -> reject keypoints for which the second best
    # match is not much worse than the best match, as explained by D.Lowe
    # in his paper
    good_matches = []
    for i, j in matches:
        if i.distance < threshold * j.distance:
            good_matches.append([i])

    return kp1, des1, kp2, des2, good_matches
Exemple #9
0
def good_matching():
    src1 = cv2.imread('D:/python/Image/box.png', cv2.IMREAD_GRAYSCALE)
    src2 = cv2.imread('D:/python/Image/box_in_scene.png', cv2.IMREAD_GRAYSCALE)

    if src1 is None or src2 is None:
        print('Image load failed!')
        return

    orb = cv2.ORB_create()

    keypoints1, desc1 = orb.detectAndCompute(src1, None)
    keypoints2, desc2 = orb.detectAndCompute(src2, None)

    matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)
    matches = matcher.match(desc1, desc2)

    matches = sorted(matches, key=lambda x: x.distance)
    good_matches = matches[:50]

    dst = cv2.drawMatches(src1,
                          keypoints1,
                          src2,
                          keypoints2,
                          good_matches,
                          None,
                          flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)

    cv2.imshow('dst', dst)
    cv2.waitKey()
    cv2.destroyAllWindows()
def bruteForceMatcherTimes():

    bf = cv2.BFMatcher_create()   
    sift = cv2.xfeatures2d.SIFT_create(nOctaveLayers = 3)    

    # just get two random images 
    img1 = cv2.imread('/Volumes/USB/H653A_11.3/3/maskedSamples/H653A_002_0.png')
    img2 = cv2.imread('/Volumes/USB/H653A_11.3/3/maskedSamples/H653A_003_0.png')

    # get sift info
    _, des_ref = sift.detectAndCompute(img1,None)
    _, des_tar = sift.detectAndCompute(img2,None)

    des_ref_accum = []
    des_tar_accum = []

    timeStore = []
    tRange = np.arange(1, 20, 2)
    for i in tRange:
        print("Processing " + str(i))

        # make the lists i long
        des_ref_accum = []
        des_tar_accum = []
        for n in range(i):
            des_ref_accum.append(des_ref)
            des_tar_accum.append(des_tar)
        start = time()
        matches = bf.match(np.hstack(des_ref_accum), np.hstack(des_tar_accum))
        end = time()-start
        timeStore.append(end)

    plt.plot(tRange, timeStore); plt.show()

    print("")
Exemple #11
0
def match(kp1, des1, kp2, des2, th = 0.75, method = 'bfmatcher'):
    good_matchs = []
    if method.startswith('bf'):
        #交叉匹配
        bfm = cv2.BFMatcher_create( cv2.NORM_L2, True )   
        good_matchs = bfm.knnMatch( des1, des2, 1 )
        good_matchs = [m for m in good_matchs if m != []]

    else:
        flann = create_matcher() 
        matches = flann.knnMatch( des1, des2, k = 2)
        good_matchs = [[m] for m, n in matches if m.distance < th * n.distance]

    MIN_MATCH_COUNT = 10
    ransanc_good_match = []
    if len(good_matchs) > MIN_MATCH_COUNT:
        # 获取关键点的坐标
        src_pts = np.float32([ kp1[m[0].queryIdx].pt for m in good_matchs ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m[0].trainIdx].pt for m in good_matchs ]).reshape(-1,1,2)

        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
    else:
        print('good_matchs is less than MIN_MATCH_COUNT!')
        return ransanc_good_match, good_matchs
        
    for i in range(len(mask)):
        if mask[i]:
            ransanc_good_match.append(good_matchs[i])
    return ransanc_good_match, good_matchs
Exemple #12
0
def sift_match(img_sift_1, img_sift_2):
    """
    matching using sift
    :param img_sift_1: one of the images
    :param img_sift_2: the other image
    :return: image containing 2 images with lines indicating matches
    """
    sift = cv2.xfeatures2d.SIFT_create()
    bfmatcher = cv2.BFMatcher_create(cv2.NORM_L2, crossCheck=True)

    img_sift_gray_1 = cv2.cvtColor(img_sift_1, cv2.COLOR_BGR2GRAY)
    # img_sift_gray = np.float32(img_sift_gray)
    kpsSift_1 = sift.detect(img_sift_gray_1, None)
    # grayWithSift = cv2.drawKeypoints(img_sift_gray_1, kpsSift_1, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    kpsSift_1, dscSift_1 = sift.compute(img_sift_gray_1, kpsSift_1)

    img_sift_gray_2 = cv2.cvtColor(img_sift_2, cv2.COLOR_BGR2GRAY)
    # img_sift_gray = np.float32(img_sift_gray)
    kpsSift_2 = sift.detect(img_sift_gray_2, None)
    # grayWithSift = cv2.drawKeypoints(img_sift_gray_1, kpsSift_1, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    kpsSift_2, dscSift_2 = sift.compute(img_sift_gray_2, kpsSift_2)

    matchesSift = bfmatcher.match(dscSift_1, dscSift_2)
    matchesSift = sorted(matchesSift, key=lambda x: x.distance)
    match_comparison = cv2.drawMatches(img_sift_1,
                                       kpsSift_1,
                                       img_sift_2,
                                       kpsSift_2,
                                       matchesSift[:15],
                                       None,
                                       flags=2)  # top 15 matched keypoints
    return match_comparison
Exemple #13
0
def detect_best_orb(templates, template_names, kp_t, des_t, img_c, name, top):
    orb = cv2.ORB_create()  # WTA_K=3)

    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True)

    kp_c, des_c = orb.detectAndCompute(img_c, None)

    all_matches = []
    avg = []
    for des in des_t:
        matches = bf.match(des, des_c)
        matches.sort(key=lambda x: x.distance)
        # Avarge top 10
        top_10 = matches[:8]
        avg.append(mean(d.distance for d in top_10))
        all_matches.append(matches)

    # Sorting everything
    avg, templates, template_names, all_matches, kp_t, des_t = zip(
        *sorted(zip(avg, templates, template_names, all_matches, kp_t, des_t),
                key=lambda x: x[0]))

    good_matches = all_matches[0][:top]
    show_result(templates[0], img_c, template_names[0], "orb" + name, kp_t[0],
                kp_c, all_matches[0], good_matches, avg[0])
Exemple #14
0
def detect_best_surf(templates, template_names, kp_t, des_t, img_c, name, top,
                     ang):
    surf = cv2.xfeatures2d_SURF.create()

    bf = cv2.BFMatcher_create(cv2.NORM_L2, crossCheck=True)

    kp_c, des_c = surf.detectAndCompute(img_c, None)

    all_matches = []
    avg = []
    for des in des_t:
        matches = bf.match(des, des_c)
        matches.sort(key=lambda x: x.distance)
        # Avarge top 10
        top_10 = matches[:8]
        avg.append(mean(d.distance for d in top_10))
        all_matches.append(matches)

    # Sorting everything
    avg, templates, template_names, all_matches, kp_t, des_t = zip(
        *sorted(zip(avg, templates, template_names, all_matches, kp_t, des_t),
                key=lambda x: x[0]))

    good_matches = all_matches[0][:top]
    show_result(templates[0], img_c, template_names[0], name, 'surf', kp_t[0],
                kp_c, all_matches[0], good_matches, avg[0], ang)
def corner_match(img1_harris, img2_harris):
    sift = cv2.xfeatures2d.SIFT_create()  # FIXME Parameters
    bfmatcher = cv2.BFMatcher_create(cv2.NORM_L2, crossCheck=True)

    img = img1_harris
    img_mod = img2_harris
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray_scale = cv2.cvtColor(img_mod, cv2.COLOR_BGR2GRAY)

    corners = cv2.cornerHarris(gray, 3, 3, 0.01)

    kpsCorners = np.argwhere(corners > 0.01 * corners.max())
    kpsCorners = [cv2.KeyPoint(pt[1], pt[0], 3) for pt in kpsCorners]
    kpsCorners, dscCorners = sift.compute(gray, kpsCorners)

    cornersTwo = cv2.cornerHarris(gray_scale, 3, 3, 0.001)

    kpsCornersTwo = np.argwhere(cornersTwo > 0.01 * cornersTwo.max())
    kpsCornersTwo = [cv2.KeyPoint(pt[1], pt[0], 3) for pt in kpsCornersTwo]
    kpsCornersTwo, dscCornersTwo = sift.compute(gray_scale, kpsCornersTwo)

    matchCorners = bfmatcher.match(dscCorners, dscCornersTwo)
    matchCorners = sorted(matchCorners, key=lambda x: x.distance)
    cornerMatch = cv2.drawMatches(img, kpsCorners, img_mod, kpsCornersTwo, matchCorners[:10], None, flags=2)  # top 10 matched keypoints

    return cornerMatch
Exemple #16
0
def ap2A(ims):
    """Implementa modelo de índice invertido + bolsa de palabras
  Argumentos posicionales:
  - ims: Imágenes
  Devuelve:
  - Índice invertido y bolsa de palabras"""

    inv_index = [[] for _ in range(len(KMEANS_DICT))]
    bags = []
    matcher = cv.BFMatcher_create(crossCheck=False)

    for n, im in enumerate(ims):
        _, ds = getDescriptors(im)
        matches = matcher.match(ds, KMEANS_DICT)

        # Cuenta matches
        bag_dict = collections.Counter()
        for match in matches:
            bag_dict[match.trainIdx] += 1

        # Guarda en histograma e índice invertido
        bag = np.zeros(len(KMEANS_DICT))
        for word, cnt in bag_dict.items():
            bag[word] = cnt
            inv_index[word].append(n)

        # Normalizado
        bags.append(bag / np.linalg.norm(bag))
    return inv_index, bags
Exemple #17
0
def detect_img_sift(img_t, img_c, name, num):
    sift = cv2.SIFT_create()

    bf = cv2.BFMatcher_create(cv2.NORM_L2, crossCheck=True)

    kp_t, des_t = sift.detectAndCompute(img_t, None)
    kp_c, des_c = sift.detectAndCompute(img_c, None)

    img_t = cv2.drawKeypoints(img_t, kp_t, None)
    img_c = cv2.drawKeypoints(img_c, kp_c, None)

    matches = bf.match(des_t, des_c)
    matches.sort(key=lambda x: x.distance)

    good_matches = matches[:num]
    """# Apply ratio test
    good = []
    for m,n,o in matches:
        if m.distance < 0.65*n.distance:
            if m.distance < 0.65*o.distance:
                good.append([m])
                
    for m,n in matches:
        if m.distance < 0.75*n.distance:
            good.append([m])
    """

    show_result(img_t, img_c, "sift" + name, kp_t, kp_c, matches, good_matches)
Exemple #18
0
def match(des1, des2, method="BF", _sorted=False, distance=None):
  #gray = img.cvtColor(img, cv2.COLOR_BGR2GRAY)

  if method == "BF":
    bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True)
  
    matches = bf.match(des1, des2)

    if _sorted:
      matches = sorted(matches, key=lambda x:x.distance)
  
    if distance != None and abs(distance) <= 1:
      idx = int(round(len(matches)*abs(distance))) 
      matches = matches[:idx]
 
    return matches

  if method == "FLANN":
    index_params = dict(algorithm=6, table_number=6, key_size=12, multi_probe_level=1)

    search_params = dict(checks=80)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)
    
    # filter matches
    if distance != None and abs(distance) <= 1:
      matches_good = []

      for i,(m,n) in enumerate(matches):
        if m.distance < distance*n.distance:
          matches_good.append([m,n])
      matches = matches_good
    return matches
Exemple #19
0
def bf_matcher(desc1, desc2):

    bf = cv2.BFMatcher_create(normType=cv2.NORM_HAMMING, crossCheck=True)

    matches = bf.match(desc1, desc2)
    matches.sort(key=lambda x: x.distance)

    return matches
 def match(a: Descriptors, b: Descriptors) -> List[cv2.DMatch]:
     """
     For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one.
     Finds the best match for each descriptor from a query set.
     """
     matcher = cv2.BFMatcher_create()
     matches = matcher.match(queryDescriptors=a, trainDescriptors=b)
     return matches
    def __init__(self, train_path, nfeatures):

        self.files = []
        self.files += [train_path + f for f in os.listdir(train_path)]

        self.nfeatures = nfeatures
        self.sift = cv2.xfeatures2d.SIFT_create(nfeatures=self.nfeatures)
        self.matcher = cv2.BFMatcher_create(cv2.NORM_L1, crossCheck=False)
Exemple #22
0
    def __init__(self, train_path, nfeatures):

        self.files = []
        self.files += [train_path + f for f in os.listdir(train_path)]

        self.nfeatures = nfeatures
        self.superpoint = SuperPoint({'max_keypoints': nfeatures}).cuda()
        self.matcher = cv2.BFMatcher_create(cv2.NORM_L1, crossCheck=False)
    def __init__(self, stereo_pair_1, stereo_pair_2):
        self.stereo_pair_1 = stereo_pair_1
        self.stereo_pair_2 = stereo_pair_2

        #  create BFMatcher object
        bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True)

        # Match descriptors across time - left image
        self.matches = bf.match(stereo_pair_1.des1, stereo_pair_2.des1)
def matching(des1, des2):
    bf = cv.BFMatcher_create(normType=cv.NORM_L2, crossCheck=True)
    matches = bf.match(des1, des2)
    query_index, train_index = [], []
    for match in matches:
        query_index.append(match.queryIdx)
        train_index.append(match.trainIdx)

    return query_index, train_index
def project(view, screen, debug=False, PIL=True):
    
    if PIL:
        # * In case of PIL Images do color conversion
        view = cv.cvtColor(view, cv.COLOR_RGB2BGR)
        screen = cv.cvtColor(screen, cv.COLOR_RGB2BGR)

    # * Create ORB and Brute Force matcher object using Hamming distance
    orb = cv.ORB_create()

    view = np.rot90(view, 3)
    screen = np.rot90(screen, 3)

    # * Find key point and descriptors with ORB
    kp_view, dest_view = orb.detectAndCompute(view, None)
    kp_screen, dest_screen = orb.detectAndCompute(screen, None)

    # * Matching keypoints and sorting them using distance
    bf_matcher = cv.BFMatcher_create(normType=cv.NORM_HAMMING, crossCheck=True)

    matches = bf_matcher.match(dest_view, dest_screen)
    matches = sorted(matches, key=lambda x: x.distance)

    if len(matches) < MIN_MATCHES_COUNT:
        logging.debug('Not enough point matches.')
        return -1, -1


    # * Extract matched keypoints
    view_points = np.float32(
        [kp_view[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)

    screen_points = np.float32(
        [kp_screen[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

    # * Finding homography matrix and find perspective transform
    height, width = view.shape[:2]
    M, mask = cv.findHomography(view_points, screen_points, cv.RANSAC, 5.0)

    points = np.float32(
        [[(width - 1) * 0.5, (height - 1) * 0.5]]).reshape(-1, 1, 2)

    dest = cv.perspectiveTransform(points, M)

    x, y = np.int32(dest[0][0])

    print("CAL POINTS:", x, y)

    if debug:
        debug_image = _get_debug_image(
            x, y, view, screen, mask, M, kp_view, kp_screen, matches)

        return x, y, debug_image

    else:
        return x, y
Exemple #26
0
 def match(self, _, I2, webcam):
     # SIFT and SURF not included in opencv-contrib-python >= 3.4.3.18
     sift = cv2.xfeatures2d.SIFT_create()
     gray2 = cv2.cvtColor(I2, cv2.COLOR_RGB2GRAY)
     kpt2, des2 = sift.detectAndCompute(gray2, None)
     bf = cv2.BFMatcher_create()
     while True:
         # I1 = self.webcam.get_current_frame()
         I1 = webcam.get_current_frame()
         gray1 = cv2.cvtColor(I1, cv2.COLOR_RGB2GRAY)
         kpt1, des1 = sift.detectAndCompute(gray1, None)
         # Matching Brute force
         matches = bf.knnMatch(des2, des1, 2)  # knn: k nearest neighbor
         # Choose good matches
         good = []
         new_good = []
         for m, n in matches:
             if m.distance < 0.4 * n.distance:
                 good.append([m])
                 new_good.append(m)
         if len(good) > 3:
             srcPoints = np.float32([kpt2[m.queryIdx].pt
                                     for m in new_good]).reshape(-1, 1, 2)
             dstPoints = np.float32([kpt1[m.trainIdx].pt
                                     for m in new_good]).reshape(-1, 1, 2)
             # print(srcPoints)
             # print(dstPoints)
             M, H = cv2.findHomography(srcPoints, dstPoints)
             w = gray2.shape[1] - 1
             h = gray2.shape[0] - 1
             n_corners = np.float32([[0, 0], [w, 0], [w, h],
                                     [0, h]]).reshape(-1, 1, 2)
             # moving_line = np.float32([[0, h / 2], [w, h / 2]]).reshape(-1, 1, 2)
             # print(n_corners)
             # n_corners = np.float32([[0, 0], [w/2, 0], [w, 0], [w, h/2], [w, h], [w/2, h], [0, h], [0, h/2], [w/2, h/2]]).reshape(-1, 1, 2)
             if M is not None:
                 self.npts = cv2.perspectiveTransform(n_corners,
                                                      M).reshape(4, 2)
                 self.move = np.float32([(self.npts[0] + self.npts[3]) / 2,
                                         (self.npts[1] + self.npts[2]) / 2
                                         ]).reshape(-1)
                 if self.initial == 1:
                     self.corners_old = self.npts
                     self.initial = 0
                 ret, self.npts = stabilize_corners(self.corners_old,
                                                    self.npts)
                 if ret is True:
                     self.corners_old = self.npts
                 # print(self.move)
                 # print(self.move[0])
                 # self.M, _ = cv2.findHomography(self.npts.reshape(-1, 1, 2), cv2.perspectiveTransform(n_corners, M))
                 # print(self.npts)
                 # self.npts = np.int32(self.npts)
             self.glyph_found = True
         else:
             self.glyph_found = False
Exemple #27
0
Fichier : p3.py Projet : advy99/VC
def coincidencias_descriptores_2nn(descriptores1, descriptores2):
    """
    Funcion para obtener las correspondencias entre dos descriptores usando
    un 2NN
    """
    emparejador = cv.BFMatcher_create()

    coincidencias = emparejador.knnMatch(descriptores1, descriptores2, k=2)

    return coincidencias
Exemple #28
0
    def __init__(self, train_path, nfeatures, folder='train'):

        self.files = []
        self.folder = folder + '_data_rs/'
        self.train_path = os.path.join(train_path, self.folder)
        self.files += [
            self.train_path + f for f in os.listdir(self.train_path)
        ]

        self.matcher = cv2.BFMatcher_create(cv2.NORM_L1, crossCheck=False)
    def __init__(self, dataroot, room_number, feature_type):
        self.dataroot = dataroot
        self.room_number = room_number
        self.mask = None

        # Open up ground truth file and load data
        gt_file = open(dataroot + "/tumvi_room" + str(room_number) + "_cam0",
                       "r")
        self.data = json.load(gt_file)

        # Get feature detector and matcher
        if feature_type == "sift":
            self.detector = cv2.xfeatures2d.SIFT_create()
            self.matcher = cv2.BFMatcher_create(normType=cv2.NORM_L2,
                                                crossCheck=True)
        elif feature_type == "surf":
            self.detector = cv2.xfeatures2d.SURF_create()
            self.matcher = cv2.BFMatcher_create(normType=cv2.NORM_L2,
                                                crossCheck=True)
        elif feature_type == "orb":
            self.detector = cv2.ORB_create()
            self.matcher = cv2.BFMatcher_create(normType=cv2.NORM_HAMMING,
                                                crossCheck=True)

        # Camera projection matrix for six indoor rooms
        fx = 190.97847715128717
        fy = 190.9733070521226
        cx = 254.93170605935475
        cy = 256.8974428996504
        self.K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
        self.K_inv = np.array([[1 / fx, 0, -cx / fx], [0, 1 / fy, -cy / fy],
                               [0, 0, 1]])

        # For storing pts and timestamps
        #self.stored_points = [ [] for i in range(len(self.data)) ]
        self.stored_points = {}
        self.timestamps = [0 for i in range(len(self.data))]

        # Place to dump text files full of points
        self.pointlist_dir = dataroot + "/featurelists/" + feature_type + \
            "/room" + str(self.room_number)
        if not os.path.exists(self.pointlist_dir):
            os.makedirs(self.pointlist_dir)
    def match_features(self, des1: np.ndarray,
                       des2: np.ndarray) -> List[List[cv2.DMatch]]:
        """
        Match features from two images using keypoint descriptors of a pair of images
        match -- list of matched features from two images. Each match[i] is k or less matches for the same query descriptor
        """
        bf = cv2.BFMatcher_create()
        match = bf.knnMatch(des1, des2, k=2)

        return match