Esempio n. 1
0
def feature_match(img1, img2):
    originial = img2
    img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
    h, w = img1.shape
    h2, w2 = img2.shape

    if h > h2 and w > w2:
        img1 = resize_with_aspect_ratio(img1, height=h2 // 2)

    pts1, desc1 = ORB_descriptor(img1, 1000)
    pts2, desc2 = ORB_descriptor(img2, 10000)

    dmatches = get_matches(desc1, desc2)

    h, w = img1.shape
    dst = find_image_in_frame(dmatches, pts1, pts2, h, w)

    img2 = cv2.polylines(originial, [np.int32(dst)], True, (0, 0, 255), 10,
                         cv2.LINE_AA)
    res = cv2.drawMatches(img1,
                          pts1,
                          img2,
                          pts2,
                          dmatches[:5],
                          None,
                          flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)

    return res
Esempio n. 2
0
def q3(image, sift1):
    center = get_img_center(image)
    # a) Rotate the given image clockwise by 60 degrees.
    rot = imutils.rotate_bound(image, 60)
    #rot = rotate(image, center[0], center[1], 60)
    plt.imshow(rot), plt.show()
    # b) Extract the SIFT features and show the keypoints on the rotated image using the same
    # parameter setting as for Task 1 (for the reduced number of keypoints).
    keyPoints1, des1 = sift1.detector.detectAndCompute(image, None)
    keyPoints2, des2 = sift1.detector.detectAndCompute(rot, None)

    bf = cv2.BFMatcher()
    # c)the keypoints in both images similar which shows that they share the same common features.

    # d) Match the SIFT descriptors of the keypoints of the rotated image with those of the original
    #image using the nearest-neighbour distance ratio method
    matches = bf.match(des1, des2)
    # We sort them in ascending order of their distances so that best matches (with low distance) come to front
    matches = sorted(matches, key=lambda x: x.distance)

    # Show the keypoints of the 5 best-matching descriptors on both the original and the scaled image.
    img_q3 = cv2.drawMatches(
        image, keyPoints1, rot, keyPoints2, matches[:7], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    plt.imshow(img_q3), plt.show()
    img3 = cv2.drawKeypoints(image, keyPoints1, image)
    cv2.imwrite('b3.jpg', img3)
    cv2.imwrite('d3.jpg', img_q3)
Esempio n. 3
0
def orb_stitcher(imgs):
    # find the keypoints with ORB
    orb1 = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

    kp_master, des_master = orb1.detectAndCompute(imgs[0], None)
    kp_secondary, des_secondary = orb1.detectAndCompute(imgs[1], None)

    matches = bf.match(des_secondary, des_master)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    selected = []
    for m in matches:
        if m.distance < 40:
            selected.append(m)

    out_img = cv2.drawMatches(imgs[1], kp_secondary, imgs[0], kp_master,
                              selected, None)
    cv2.namedWindow('www', cv2.WINDOW_NORMAL)
    cv2.imshow('www', out_img)
    # cv2.imwrite('matches.jpg',out_img)
    cv2.waitKey(0)

    warped = None
    if len(selected) > 10:

        dst_pts = np.float32([kp_master[m.trainIdx].pt
                              for m in selected]).reshape(-1, 1, 2)
        src_pts = np.float32([kp_secondary[m.queryIdx].pt
                              for m in selected]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        h, w = imgs[0].shape[0:2]
        pts = np.float32([[0, 0], [w, 0], [w, h], [0, h],
                          [0, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        max_extent = np.max(dst, axis=0)[0].astype(np.int)[::-1]
        sz_out = (max(max_extent[1],
                      imgs[0].shape[1]), max(max_extent[0], imgs[0].shape[0]))

        # img2 = cv2.polylines(imgs[0], [np.int32(dst)], True, [0,255,0], 3, cv2.LINE_AA)

        cv2.namedWindow('w', cv2.WINDOW_NORMAL)

        warped = cv2.warpPerspective(imgs[1], M, dsize=sz_out)
        img_for_show = warped.copy()
        img_for_show[0:imgs[0].shape[0], 0:imgs[0].shape[1], 1] = imgs[0][:, :,
                                                                          1]
        cv2.imshow('w', img_for_show)
        cv2.waitKey(0)
    return warped
Esempio n. 4
0
def viewImage():
    img_ = right()
    # img_ = cv2.resize(img_, (0,0), fx=1, fy=1)
    img1 = cv2.cvtColor(img_, cv2.COLOR_BGR2GRAY)
    img = rear()

    # img = cv2.resize(img, (0,0), fx=1, fy=1)
    img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    sift = cv2.xfeatures2d.SIFT_create()
    # find the key points and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    match = cv2.BFMatcher()
    matches = match.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append(m)

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        flags=2)
    img3 = cv2.drawMatches(img_, kp1, img, kp2, good, None, **draw_params)
    cv2.imshow("original_image_drawMatches.jpg", img3)

    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img1.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
        cv2.imshow("original_image_overlapping.jpg", img2)

    # dst = np.concatenate((rightImg, rearImg), 1)
    # cv2.imshow("Image", )
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Esempio n. 5
0
def align_image(feature_image, target_image):
    ORB = cv2.ORB_create(MAX_FEATURE)
    feature_keypoints, feature_descriptors = ORB.detectAndCompute(
        feature_image, None)
    target_keypoints, target_descriptors = ORB.detectAndCompute(
        target_image, None)

    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(feature_descriptors, target_descriptors, None)

    matches.sort(key=lambda x: x.distance, reverse=False)

    num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:num_good_matches]

    img_matches = cv2.drawMatches(feature_image, feature_keypoints,
                                  target_image, target_keypoints, matches,
                                  None)
    cv2.imwrite("image-matches.jpg", img_matches)

    feature_points = np.zeros((len(matches), 2), dtype=np.float32)
    target_points = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        feature_points[i, :] = feature_keypoints[match.queryIdx].pt
        target_points[i, :] = target_keypoints[match.trainIdx].pt

    homography_matrix, mask = cv2.findHomography(target_points, feature_points,
                                                 cv2.RANSAC)

    height, width, channels = feature_image.shape

    perspective_corrected_image = cv2.warpPerspective(target_image,
                                                      homography_matrix,
                                                      (width, height))

    return perspective_corrected_image, homography_matrix
Esempio n. 6
0
def q2(image, sift):
    # a)Enlarge the given image by a scale percentage of 115.
    scale = 115
    scale = scale/100

    width = int(image.shape[1] * scale)
    height = int(image.shape[0] * scale)
    new_dim = (width, height)
    resized = cv2.resize(image, new_dim)
    

    # b) Extract the SIFT features and show the keypoints on the scaled image using the same
    # parameter setting as for Task 1 (for the reduced number of keypoints).
    ## find the keypoints and descriptors using sift detector
    keyPoints1, des1 = sift.detector.detectAndCompute(image, None)
    # Since I have  already found keypoints, call sift.compute() which computes the descriptors 
    # from the keypoints that has been already found
    keyPoints2, des2 = sift.detector.detectAndCompute(resized, None)
    img2 = cv2.drawKeypoints(image, keyPoints1, image)
    # Hint: Brute-force matching is available in OpenCV for feature matching.
    bf_matcher = cv2.BFMatcher()
    #use Matcher.match() method to get the best matches in two images
    matches = bf_matcher.match(des1, des2)
    #matches = bf_matcher.knnMatch(des1, des2, k=2)
    # c)the keypoints in both images similar which shows that they share the same common features.

    # d) Match the SIFT descriptors of the keypoints of the scaled image with those of the original image 
    # using the nearest-neighbour distance ratio method
    # We sort them in ascending order of their distances so that best matches (with low distance) come to front
    matches = sorted(matches, key=lambda x: x.distance)

    # Show the keypoints of the 5 best-matching descriptors on both the original and the scaled image.
    img_q2=cv2.drawMatches(image, keyPoints1, resized, keyPoints2,
                           matches[:6], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    plt.imshow(img_q2), plt.show()
    cv2.imwrite('d2.jpg', img_q2)
    cv2.imwrite('b2.jpg', img2)
Esempio n. 7
0
def rectify_pair(image_left, image_right, viz=False):
    #特征点匹配
    #1.用surf进行特征点检测
    grayL = cv2.cvtColor(image_left, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(image_right, cv2.COLOR_BGR2GRAY)
    surf = cv2.xfeatures2d.SURF_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = surf.detectAndCompute(grayL, None)
    kp2, des2 = surf.detectAndCompute(grayR, None)
    img = cv2.drawKeypoints(grayL, kp1, image_left)
    cv2.imshow("keyPointsOfLeft", img)

    #2.用BFMATCHER进行特征点匹配
    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)
    # 特征描述子匹配
    matches = bf.match(des1, des2)
    points1 = []
    points2 = []
    for match in matches:
        points1.append(kp1[match.queryIdx].pt)
        points2.append(kp2[match.trainIdx].pt)
    #matches=sorted(matches,key=lambda x:x.distance)
    # print(len(matches))
    img3 = cv2.drawMatches(grayL, kp1, grayR, kp2, matches[:20], None, flags=2)
    cv2.imshow('matches', img3)

    # find the fundamental matrix
    F, mask = cv2.findFundamentalMat(np.array(points1), np.array(points2),
                                     cv2.RANSAC, 3, 0.99)

    # rectify the images, produce the homographies: H_left and H_right
    retval, H_left, H_right = cv2.stereoRectifyUncalibrated(
        np.array(points1), np.array(points2), F, image_left.shape[:2])

    return F, H_left, H_right
#opencv----特征匹配----BFMatching
from cv2 import cv2
from matplotlib import pyplot as plt
#读取需要特征匹配的两张照片,格式为灰度图。
template = cv2.imread("template_adjust.jpg", 0)
target = cv2.imread("target1.jpg", 0)
orb = cv2.ORB_create()  #建立orb特征检测器
kp1, des1 = orb.detectAndCompute(template, None)  #计算template中的特征点和描述符
kp2, des2 = orb.detectAndCompute(target, None)  #计算target中的
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)  #建立匹配关系
mathces = bf.match(des1, des2)  #匹配描述符
mathces = sorted(mathces, key=lambda x: x.distance)  #据距离来排序
result = cv2.drawMatches(template,
                         kp1,
                         target,
                         kp2,
                         mathces[:40],
                         None,
                         flags=2)  #画出匹配关系
plt.imshow(result), plt.show()  #matplotlib描绘出来
Esempio n. 9
0
def tracking_orb():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    ret, frm = cap.read()
    img_chocolate = cv2.imread('marker.jpg')

    frm_count = 0
    key = None

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (frm.shape[1], frm.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_orb.mp4', fourcc, 30.0, image_size)

    while ret:

        ## Create ORB object and BF object(using HAMMING)
        orb = cv2.ORB_create()

        gray2 = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
        gray1 = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

        # gray2 = cv2.equalizeHist(gray2)
        # gray1 = cv2.equalizeHist(gray1)

        ## Find the keypoints and descriptors with ORB
        kpts1, descs1 = orb.detectAndCompute(gray1, None)
        kpts2, descs2 = orb.detectAndCompute(gray2, None)

        # create BFMatcher object
        ## match descriptors and sort them in the order of their distance
        bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)

        # Match descriptors.
        matches = bf.match(descs1, descs2)

        # Sort them in the order of their distance.
        dmatches = sorted(matches, key=lambda x: x.distance)

        ## extract the matched keypoints
        src_pts = np.float32([kpts1[m.queryIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kpts2[m.trainIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)

        ## find homography matrix and do perspective transform
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img_chocolate.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        ## draw found regions
        frm = cv2.polylines(frm, [np.int32(dst)], True, (0, 0, 255), 1,
                            cv2.LINE_AA)

        ## draw match lines
        res = cv2.drawMatches(img_chocolate,
                              kpts1,
                              frm,
                              kpts2,
                              dmatches[:8],
                              None,
                              flags=2)

        # writer.write(res)
        cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)
        # cv2.imshow("orb_match", frm)
        out.write(frm)
        cv2.imshow("orb_match", res)

        # Pause on pressing of space.
        if key == ord(' '):
            wait_period = 0
        else:
            wait_period = 30

        key = cv2.waitKey(wait_period)
        ret, frm = cap.read()
        frm_count += 1

    cv2.destroyAllWindows()
    cap.release()
    out.release()

    return 0
Esempio n. 10
0
print("[INFO] descriotion size of BRISK = {}".format(BRISK.descriptorSize()))

print("[INFO] stage 3: matching features!")

# create a BFMatcher instance
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

# Match descriptors.
matches = bf.match(des1, des2)

# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)

# Draw first 10 matches.
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:20], None, flags=2)

# showing matches keypoints
plt.title("BRISK")
plt.imshow(img3), plt.show()

# saving matches keypoints in file
cv2.imwrite("images/outputs/BRISK_result.jpg", img3)

print("[INFO] size of features = {}".format(len(matches[:10])))

print("[INFO] computing Precision!")
correct_matches = int(
    input("Please enter the number of correct matches:        "))
incorrect_matches = 10 - correct_matches
Esempio n. 11
0
kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)

########
# test #
########
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []

for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append(m)
good = sorted(good, key=lambda x: x.distance)

show = cv2.drawMatches(img1, kp1, img2, kp2, good[:20], None, flags=2)
imshow(imdiv(show, 1.5))
########
# test #
########

#记录特征点的x,y坐标
pos1 = np.float32([kp.pt for kp in kp1])
pos2 = np.float32([kp.pt for kp in kp2])

#特征值匹配
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []

for m, n in matches:
Esempio n. 12
0
start_time = time.time()
IM.detectAKAZE()
print(f'finished detect in {time.time()-start_time}')
start_time = time.time()

IM.KNNmatchAKAZE(projErr=5)
print(f'finished matching in {time.time()-start_time}')
start_time = time.time()

with open('./data/matching_pairs/ORB_matching_pair.npy', 'wb') as f:
    np.save(f, IM.src_pts)
    np.save(f, IM.dst_pts)

print(f'There are {len(IM.tar_kps)} feature points in target image.')
print(f'There are {len(IM.ref_kps)} feature points in reference image.')
print(f'There are {len(IM.matches)} pairs matching pairs.')
print(
    f'There are {len(IM.mask[IM.mask == 1])} pairs matching pairs after RANSAC.'
)
# print(f"Process finished --- {(time.time() - start_time)} seconds ---")
img4 = img4 = cv2.drawMatches(
    tar_img,
    IM.tar_kps,
    ref_img,
    IM.ref_kps,
    IM.matches,
    None,
    flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS,
    matchesMask=IM.mask)
cv2.imwrite('Match.jpg', img4)