コード例 #1
0
ファイル: cv2_helpers.py プロジェクト: syedomair0/CV-learning
def create_flann_picture(self, left_image, right_image):
    orb = cv2.ORB_create(nfeatures=2000)
    left_keypoint, left_descriptors = orb.detectAndCompute(left_image, None)
    right_keypoints, right_descriptors = orb.detectAndCompute(
        right_image, None)

    FLANN_INDEX_LSH = 6
    index_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  # 12
        key_size=12,  # 20
        multi_probe_level=1)  #2

    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(left_descriptors, right_descriptors, k=2)

    matchesMask = [[0, 0] for i in range(len(matches))]

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            matchesMask[i] = [1, 0]
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=cv2.DrawMatchesFlags_DEFAULT)
    final_image = cv2.drawMatchesKnn(left_image, left_keypoint, right_image,
                                     right_keypoints, matches, None,
                                     **draw_params)
    cv2.imshow("something", final_image)
    if cv2.waitKey(25) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
コード例 #2
0
def getPoints_SIFT(im1, im2):
    p1 = []
    p2 = []
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(im1, None)
    kp2, des2 = sift.detectAndCompute(im2, None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.4 * n.distance:
            good.append([m])
            img1_idx = m.queryIdx
            img2_idx = m.trainIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            p1.append((x1, y1))
            p2.append((x2, y2))

    # cv2.drawMatchesKnn expects list of lists as matches.
    img_match = np.zeros(
        (im1.shape[0] + im2.shape[0], im1.shape[1] + im2.shape[1]))
    img_match = cv2.drawMatchesKnn(im1,
                                   kp1,
                                   im2,
                                   kp2,
                                   good,
                                   img_match,
                                   flags=2)
    # print(len(p1))
    plt.imshow(img_match), plt.show()
    p1 = np.asarray(p1).T
    p2 = np.asarray(p2).T
    # p1, p2 = get_coordinate_from_sift(matches, kp1, kp2)
    return p1, p2
コード例 #3
0
## As up
img2 = cv2.imread(imgname2)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
kpts2, descs2 = sift.detectAndCompute(gray2,None)

## Ratio testold
matches = matcher.knnMatch(descs1, descs2, 2)
matchesMask = [[0,0] for i in range(len(matches))]
for i, (m1,m2) in enumerate(matches):
    if m1.distance < 0.7 * m2.distance:
        matchesMask[i] = [1,0]
        ## Notice: How to get the index
        pt1 = kpts1[m1.queryIdx].pt
        pt2 = kpts2[m1.trainIdx].pt
        print(i, pt1,pt2 )
        if i % 5 ==0:
            ## Draw pairs in purple, to make sure the result is ok
            cv2.circle(img1, (int(pt1[0]),int(pt1[1])), 5, (255,0,255), -1)
            cv2.circle(img2, (int(pt2[0]),int(pt2[1])), 5, (255,0,255), -1)


## Draw match in blue, error in red
draw_params = dict(matchColor = (255, 0,0),
                   singlePointColor = (0,0,255),
                   matchesMask = matchesMask,
                   flags = 0)

res = cv2.drawMatchesKnn(img1,kpts1,img2,kpts2,matches,None,**draw_params)
cv2.imshow("Result", res);cv2.waitKey();cv2.destroyAllWindows()
コード例 #4
0
good = []
pts1 = []
pts2 = []

for i, (m, n) in enumerate(matches):
    if m.distance < 0.7 * n.distance:
        good.append(m)
        pts2.append(k2[m.trainIdx].pt)
        pts1.append(k1[m.queryIdx].pt)
        matches_mask[i] = [1, 0]

draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=(255, 0, 0),
                   matchesMask=matches_mask,
                   flags=0)
img3 = cv.drawMatchesKnn(img1_gray, k1, img2_gray, k2, matches, None,
                         **draw_params)
plt.imshow(img3, )
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.savefig('descriptors.jpg', dpi=600)
plt.show()

rows, cols = img1_1.shape[:2]
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
    # print(([k1[m.queryIdx].pt for m in good]).shape)
    img1_pts = np.float32([k1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    img2_pts = np.float32([k2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv.findHomography(img1_pts, img2_pts, cv.RANSAC, 5.0)
    warp_img = cv.warpPerspective(img2_1,
コード例 #5
0
from cv2 import cv2
import numpy as np

img1 = cv2.imread(r'Image Test/tiger.jpg', 0)
img2 = cv2.imread(r'Image Train/tiger.jpg', 0)

orb = cv2.ORB_create(nfeatures=1000)
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append([m])
print(len(good))
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow('img1', img1)
cv2.imshow('img2', img2)
cv2.imshow('img3', img3)
cv2.waitKey(0)
# 特徴量の抽出と画像間チング
# 特徴量:ORB
import cv2.cv2 as cv

# 画像ファイルの読み込み
img1 = cv.imread('./imagedata/image6.jpg')
img1 = cv.resize(img1, (600, 400))
img2 = cv.imread('./imagedata/image2.jpg')
img2 = cv.resize(img2, (600, 400))
# ORB特徴検出器
detector = cv.ORB_create()
# 特徴量抽出
kp1, des1 = detector.detectAndCompute(img1, None)
kp2, des2 = detector.detectAndCompute(img2, None)
bf = cv.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
#マッチングリスト
matched = []
for match1, match2 in matches:
    ratio = match1.distance / match2.distance
    if ratio < 0.8:
        matched.append([match1])
# 画像表示
imgmatches = cv.drawMatchesKnn(img1, kp1, img2, kp2, matched, None, flags=2)
cv.imshow("image matching", imgmatches)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #7
0
orb = cv2.ORB_create()

sta = clock()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)

flann = cv2.FlannBasedMatcher(indexParams, searchParams)
matches = flann.knnMatch(des1, des2, k=2)
matchesMask = [[0, 0] for i in range(len(matches))]

for i in range(len(matches)):
    if len(matches[i]) >= 2:
        (m, n) = matches[i]
        if m.distance < 0.7 * n.distance:
            matchesMask[i] = [1, 0]
fin = clock()

draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=(255, 0, 0),
                   matchesMask=matchesMask,
                   flags=0)

img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)

print(fin - sta)

cv2.imshow('sqwd', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()