def calcOffsetByORB(src1, src2,isDebug=False,debugPath='./'):#如果报错,安装pip install opencv-contrib-python
    orb = cv2.ORB_create(10000)#生成特征向量
    orb.setFastThreshold(0)
    kp1, des1 = orb.detectAndCompute(src1, None)
    kp2, des2 = orb.detectAndCompute(src2, None)
    matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches_all = matcher.match(des1, des2)
    matches_gms = matchGMS(src1.shape[:2], src2.shape[:2], kp1, kp2, matches_all, withScale=False, withRotation=False, thresholdFactor=6)
    x_sum = 0
    y_sum = 0
    #print (len(matches_gms))
    for i in range(len(matches_gms)):
       left = kp1[matches_gms[i].queryIdx].pt
       right = tuple(sum(x) for x in zip(kp2[matches_gms[i].trainIdx].pt, (src1.shape[1], 0)))
       x_offset_ = tuple(map(int, left))[0]-tuple(map(int, right))[0]+1920
       y_offset_ = tuple(map(int, left))[1]-tuple(map(int, right))[1]
       x_sum = x_sum + x_offset_
       y_sum = y_sum + y_offset_
    x_offset = int(x_sum/len(matches_gms))
    y_offset = int(y_sum/len(matches_gms))
    if isDebug == True:
        height = max(src1.shape[0], src2.shape[0])
        width = src1.shape[1] + src2.shape[1]
        output = np.zeros((height, width, 3), dtype=np.uint8)
        output[0:src1.shape[0], 0:src1.shape[1]] = src1
        output[0:src2.shape[0], src1.shape[1]:] = src2[:]
        for i in range(len(matches_gms)):
            left = kp1[matches_gms[i].queryIdx].pt
            right = tuple(sum(x) for x in zip(kp2[matches_gms[i].trainIdx].pt, (src1.shape[1], 0)))
            cv2.circle(output, tuple(map(int, left)), 1, (0, 255, 255), 2)
            cv2.circle(output, tuple(map(int, right)), 1, (0, 255, 0), 2)
            cv2.line(output, tuple(map(int, left)), tuple(map(int, right)), (255, 0, 0))
        debug_out_path = os.path.join(debugPath+'_calcOffsetByORB.jpg')
        cv2.imwrite(debug_out_path,output)
        #print (x_offset,y_offset)
    return x_offset,y_offset
    print(f"Brute force matching time {end - start}")

    flann = cv.FlannBasedMatcher(INDEX_PARAMS, SEARCH_PARAMS)
    start = time()
    flann_matches = flann.match(np.float32(descriptions1),
                                np.float32(descriptions2))
    end = time()

    print(f"Flann matching time {end - start}")

    print("Number of matches by flann", len(flann_matches))
    print("Number of matches by brute force", len(bf_matches))

    best_matches_flann = matchGMS(img1.shape[:2],
                                  img2.shape[:2],
                                  keypoints1,
                                  keypoints2,
                                  flann_matches,
                                  thresholdFactor=3)
    best_matches_bf = matchGMS(img1.shape[:2],
                               img2.shape[:2],
                               keypoints1,
                               keypoints2,
                               bf_matches,
                               thresholdFactor=3)

    print("Number of matches by flann after outlier rejection",
          len(best_matches_flann))
    print("Number of matches by brute force after outlier rejection",
          len(best_matches_bf))

    output_img_flann = cv.drawMatches(img1, keypoints1, img2, keypoints2,
예제 #3
0
            color = tuple(map(int, _colormap[colormap_idx, 0, :]))
            cv2.circle(output, tuple(map(int, left)), 1, color, 2)
            cv2.circle(output, tuple(map(int, right)), 1, color, 2)
    return output


if __name__ == '__main__':
    img1 = cv2.imread("../data/01.jpg")
    img2 = cv2.imread("../data/02.jpg")

    orb = cv2.ORB_create(10000)
    orb.setFastThreshold(0)

    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)
    matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches_all = matcher.match(des1, des2)

    start = time.time()
    matches_gms = matchGMS(img1.shape[:2], img2.shape[:2], kp1, kp2, matches_all, withScale=False, withRotation=False, thresholdFactor=6)
    end = time.time()

    print('Found', len(matches_gms), 'matches')
    print('GMS takes', end-start, 'seconds')

    output = draw_matches(img1, img2, kp1, kp2, matches_gms, DrawingType.ONLY_LINES)

    cv2.imshow("show", output)
    cv2.waitKey(0)
예제 #4
0
    # matcher = cv.BFMatcher_create()

    matches = matcher.match(descriptions1, descriptions2)
    print(f"Matcher before outlier rejection {len(matches)}")

    src_pts = np.float32([keypoints1[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    des_pts = np.float32([keypoints2[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    _, mask = cv.findHomography(src_pts, des_pts, cv.RANSAC, 15)
    print(mask.ravel().sum())

    best_matches = matchGMS(img1.shape[:2],
                            img2.shape[:2],
                            keypoints1,
                            keypoints2,
                            matches,
                            thresholdFactor=3)
    # best_matches = [match for match, score in zip(matches, mask.ravel()) if score == 1]

    if not best_matches:
        raise ValueError("Not enough matches left after GMS!")

    print(f"Best matches: {len(best_matches)}")

    output_img = cv.drawMatches(img1, keypoints1, img2, keypoints2,
                                best_matches, None)

    cv.imshow("FAST + FREAK", output_img)
    cv.waitKey(0)
    cv.destroyAllWindows()
예제 #5
0
                            detector_descriptor.detect_describe(prepared_template_img)
                        template_img_description_time_end = time()
                        template_img_description_time =\
                            template_img_description_time_end - template_img_description_time_begin

                        matcher = cv.BFMatcher_create() if descriptor_name in [SIFT, ROOT_SIFT] \
                            else cv.BFMatcher_create(cv.NORM_HAMMING, True)

                        feature_matching_time_begin = time()
                        matches = matcher.match(template_descriptions, test_descriptions)
                        feature_matching_time_end = time()
                        feature_matching_time = feature_matching_time_end - feature_matching_time_begin

                        outlier_rejection_time_begin = time()
                        best_matches = matchGMS(
                            prepared_template_img.shape[:2], prepared_test_img.shape[:2], template_keypoints, test_keypoints, matches,
                            withRotation=True, withScale=True, thresholdFactor=GMS_THRESHOLD
                        )
                        outlier_rejection_time_end = time()
                        outlier_rejection_time = outlier_rejection_time_end - outlier_rejection_time_begin

                        matches_counts.append(len(best_matches))
                    if np.argmax(matches_counts) == cls_idx:
                        detector_descriptor_score += 1
                        final_matches_cnt.append(len(best_matches))
                    total_matching_time.append(
                        test_img_description_time + template_img_description_time
                        + feature_matching_time + outlier_rejection_time
                    )
            descriptor_results.append([
                f"{detector_name}_{descriptor_name}", detector_descriptor_score / 20,
                np.around(np.mean(total_matching_time), 4),
예제 #6
0
def match_image_pair(recon, track_builder, features, vid1, vid2, matchertype):
    cross_check = matchertype == "gms"
    if featuretype == 'akaze':
        matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=cross_check)
    elif featuretype == 'sift':
        matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=cross_check)
    elif featuretype == "akaze_bin":
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=cross_check)

    if matchertype == "knn":
        matches = matcher.knnMatch(features[vid1]["descriptors"],
                                   features[vid2]["descriptors"],
                                   k=2)

        # Apply ratio test
        filtered_matches = []
        for m, n in matches:
            if m.distance < 0.8 * n.distance:
                filtered_matches.append(m)

    elif matchertype == "gms":
        matches12 = matcher.match(features[vid1]["descriptors"],
                                  features[vid2]["descriptors"])
        filtered_matches = matchGMS(features[vid1]["img_wh"],
                                    features[vid2]["img_wh"],
                                    features[vid1]["keypoints"],
                                    features[vid2]["keypoints"], matches12)

    print('Number of putative matches: {}'.format(len(filtered_matches)))

    if len(filtered_matches) < min_num_inlier_matches:
        print('Number of putative matches too low!')
        return False, None, None

    correspondences = correspondence_from_matches(filtered_matches,
                                                  features[vid1]["keypoints"],
                                                  features[vid2]["keypoints"])

    options = pt.sfm.EstimateTwoViewInfoOptions()
    options.max_sampson_error_pixels = 1.0
    options.max_ransac_iterations = 250
    if ransactype == 'ransac':
        options.ransac_type = pt.sfm.RansacType(0)
    elif ransactype == 'prosac':
        options.ransac_type = pt.sfm.RansacType(1)
    elif ransactype == 'lmed':
        options.ransac_type = pt.sfm.RansacType(2)

    prior1 = recon.View(vid1).Camera().CameraIntrinsicsPriorFromIntrinsics()
    prior2 = recon.View(vid2).Camera().CameraIntrinsicsPriorFromIntrinsics()
    success, twoview_info, inlier_indices = pt.sfm.EstimateTwoViewInfo(
        options, prior1, prior2, correspondences)

    print('Only {} matches survived after geometric verification'.format(
        len(inlier_indices)))

    if not success or len(inlier_indices) < min_num_inlier_matches:
        print(
            'Number of putative matches after geometric verification is too low!'
        )
        return False, None, None
    else:
        verified_matches = []
        for i in range(len(inlier_indices)):
            verified_matches.append(filtered_matches[inlier_indices[i]])

        correspondences_verified = correspondence_from_matches(
            verified_matches, features[vid1]["keypoints"],
            features[vid2]["keypoints"])

        for i in range(len(verified_matches)):
            track_builder.AddFeatureCorrespondence(
                vid1, correspondences_verified[i].feature1, vid2,
                correspondences_verified[i].feature2)

        return True, twoview_info, correspondences_verified
                matcher = cv.BFMatcher_create() if f"_{SIFT}" in detector_descriptor_name \
                    else cv.BFMatcher_create(cv.NORM_HAMMING, True)

                # if f"_{SIFT}" in detector_descriptor_name or f"-{SIFT}" in detector_descriptor_name:
                #

                feature_matching_time_begin = time()
                matches = matcher.match(templ_decs, test_decs)
                feature_matching_time_end = time()
                feature_matching_time = feature_matching_time_end - feature_matching_time_begin

                outlier_rejection_time_begin = time()
                best_matches = matchGMS(template_img.shape[:2],
                                        test_img.shape[:2],
                                        templ_kps,
                                        test_kps,
                                        matches,
                                        withRotation=True,
                                        withScale=True,
                                        thresholdFactor=GMS_THRESHOLD)
                outlier_rejection_time_end = time()
                outlier_rejection_time = outlier_rejection_time_end - outlier_rejection_time_begin

                descriptor_results.append([
                    detector_descriptor_name,
                    len(templ_kps),
                    len(test_kps),
                    len(matches),
                    len(matches) - len(best_matches),
                    len(best_matches),
                    first_img_detection_time,
                    second_img_detection_time,
예제 #8
0
    # detector_descriptor = cv2.KAZE_create()
    detector_descriptor = cv2.AKAZE_create()
    # detector_descriptor = cv2.BRISK_create()

    kp1, des1 = detector_descriptor.detectAndCompute(img1, None)
    print(f"Template image {len(kp1)} keypoints")
    kp2, des2 = detector_descriptor.detectAndCompute(img2, None)
    print(f"Input image {len(kp2)} keypoints")
    matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = matcher.match(des1, des2)
    print(f"Matches {len(matches)}")

    start = time.time()
    matches_gms = matchGMS(img1.shape[:2],
                           img2.shape[:2],
                           kp1,
                           kp2,
                           matches,
                           withScale=True,
                           withRotation=True,
                           thresholdFactor=4)
    end = time.time()

    print(f'Found GMS {len(matches_gms)} matches')
    print('GMS takes', end - start, 'seconds')

    output = cv2.drawKeypoints(img1, kp1, None)
    # output = cv2.drawMatches(img1, kp1, img2, kp2, matches_gms, None, flags=2)

    cv2.imshow("show", output)
    cv2.waitKey(0)