def match_image_to_model(X, model_des, img, using_rootsift, threshold=0.75):

    sift = cv.SIFT_create()
    kp_query, query_des = sift.detectAndCompute(img, None)
    if using_rootsift:
        query_des /= query_des.sum(axis=1, keepdims=True)
        query_des = np.sqrt(query_des)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(model_des, query_des, k=2)

    # Need to draw only good matches
    good = []

    # ratio test as per Lowe's paper
    matched_idx = [False] * X.shape[1]
    for i, (m, n) in enumerate(matches):
        if m.distance < threshold * n.distance:
            good.append(m)
            matched_idx[i] = True

    print(f"Found {len(good)} matches with distance threshold = {threshold}")

    matched_2D_points = np.array([kp_query[m.trainIdx].pt for m in good])
    matched_3D_points = X[:, matched_idx]

    return matched_2D_points, matched_3D_points
Exemple #2
0
def computeHomography(im1, im2):
    ''' '''
    sift = cv2.SIFT_create()
    # find the keypoints with SIFT
    kp1, des1 = sift.detectAndCompute(im1, None)
    kp2, des2 = sift.detectAndCompute(im2, None)

    des1 = np.float32(des1)
    des2 = np.float32(des2)
    # create flann basedMatcher
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    # Match descriptors.
    matches = flann.knnMatch(des1, des2, k=2)

    #good matches as per Lowe's ratio test.
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC)

    return H
Exemple #3
0
 def _match(self, kp1, kp2, des1, des2, MIN_MATCH_COUNT=10):
     '''
     Return homography H.
     '''
     FLANN_INDEX_KDTREE = 1  # Using KD tree
     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     search_params = dict(checks=50)
     flann = cv.FlannBasedMatcher(index_params, search_params)
     matches = flann.knnMatch(np.float32(des1), np.float32(des2), k=2)
     # store all the good matches as per Lowe's ratio test.
     good = []
     for m, n in matches:
         if m.distance < 0.7 * n.distance:
             good.append(m)
     if len(good) > MIN_MATCH_COUNT:  # Compute homography
         src_pts = np.float32([kp1[m.queryIdx].pt
                               for m in good]).reshape(-1, 1, 2)
         dst_pts = np.float32([kp2[m.trainIdx].pt
                               for m in good]).reshape(-1, 1, 2)
         H, _ = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
         return H
     else:
         print("Not enough matches are found - {}/{}".format(
             len(good), MIN_MATCH_COUNT))
         return None
Exemple #4
0
def find_matches(desc1, desc2, kps2):
    """
    Finds a list of matches between two images, and groups them in clusters

    :param desc1: numpy.ndarray of descriptors from the first image
    :param desc2: numpy.ndarray of descriptors from the second image
    :param kps2: list of keypoints from the second image (list[cv2.KeyPoint])
    :return: None if not enough matches were found, or a dictionary of
        clusters, where the key is the number of cluster, and the value is a
        list of matches
    """
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(desc1, desc2, k=2)

    good_matches = []
    for match in matches:
        if len(match) > 1 and match[0].distance < match[1].distance * 0.7:
            good_matches.append(match[0])

    if len(good_matches) < MIN_MATCH_COUNT:
        return None

    clustered_matches = matches_filter(good_matches, kps2)

    return clustered_matches
def create_flann_picture(self, left_image, right_image):
    orb = cv2.ORB_create(nfeatures=2000)
    left_keypoint, left_descriptors = orb.detectAndCompute(left_image, None)
    right_keypoints, right_descriptors = orb.detectAndCompute(
        right_image, None)

    FLANN_INDEX_LSH = 6
    index_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  # 12
        key_size=12,  # 20
        multi_probe_level=1)  #2

    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(left_descriptors, right_descriptors, k=2)

    matchesMask = [[0, 0] for i in range(len(matches))]

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            matchesMask[i] = [1, 0]
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=cv2.DrawMatchesFlags_DEFAULT)
    final_image = cv2.drawMatchesKnn(left_image, left_keypoint, right_image,
                                     right_keypoints, matches, None,
                                     **draw_params)
    cv2.imshow("something", final_image)
    if cv2.waitKey(25) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
def FLANN_matching(img1, img2, threshold=0.75):

    # Initiate SIFT detector
    sift = cv.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < threshold * n.distance:
            good.append(m)

    print(f"Found {len(good)} matches with distance threshold = {threshold}")

    p1 = np.array([kp1[m.queryIdx].pt for m in good])
    p2 = np.array([kp2[m.trainIdx].pt for m in good])

    des = des1[[m.queryIdx for m in good], :]

    uv1 = np.vstack((p1.T, np.ones(p1.shape[0])))
    uv2 = np.vstack((p2.T, np.ones(p2.shape[0])))

    return p1, p2, des
Exemple #7
0
def full_view(filename1, filename2, dirname):
    leftgray, rightgray = cv2.imread(dirname +
                                     filename1), cv2.imread(dirname +
                                                            filename2)

    hessian = 400
    surf = cv2.xfeatures2d.SURF_create(
        hessian)  # 将Hessian Threshold设置为400,阈值越大能检测的特征就越少
    kp1, des1 = surf.detectAndCompute(leftgray, None)  # 查找关键点和描述符
    kp2, des2 = surf.detectAndCompute(rightgray, None)

    FLANN_INDEX_KDTREE = 0  # 建立FLANN匹配器的参数
    indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)  # 配置索引,密度树的数量为5
    searchParams = dict(checks=50)  # 指定递归次数
    # FlannBasedMatcher:是目前最快的特征匹配算法(最近邻搜索)
    flann = cv2.FlannBasedMatcher(indexParams, searchParams)  # 建立匹配器
    matches = flann.knnMatch(des1, des2, k=2)  # 得出匹配的关键点

    good = []
    # 提取优秀的特征点
    for m, n in matches:
        # if m.distance < 0.7 * n.distance:  # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
        if m.distance < 0.3 * n.distance:
            good.append(m)
    src_pts = np.array([kp1[m.queryIdx].pt for m in good])  # 查询图像的特征描述子索引
    dst_pts = np.array([kp2[m.trainIdx].pt for m in good])  # 训练(模板)图像的特征描述子索引
    H = cv2.findHomography(src_pts, dst_pts)  # 生成变换矩阵

    h, w = leftgray.shape[:2]
    h1, w1 = rightgray.shape[:2]
    shft = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]])
    M = np.dot(shft, H[0])  # 获取左边图像到右边图像的投影映射关系

    dst_corners = cv2.warpPerspective(leftgray, M,
                                      (w * 2, h))  # 透视变换,新图像可容纳完整的两幅图
    # cv2.imshow('before add right', dst_corners)
    # dst_corners[0:h, 0:w] = leftgray
    dst_corners[0:h, w:w + w1] = rightgray  # 将第二幅图放在右侧

    # 删除空白列
    sum_col = np.sum(np.sum(dst_corners, axis=0), axis=1)

    result_img = np.zeros(shape=(dst_corners.shape[0], 1, 3))

    for i in range(len(sum_col)):
        if sum_col[i] != 0:
            result_img = np.hstack([result_img, dst_corners[:, i:i + 1, :]])

    result_img = result_img[:, 1:]

    # cv2.imshow('dest', dst_corners)
    result_name = get_full_view_result_name(filename1, filename2)

    cv2.imwrite(dirname + result_name, result_img)

    cv2.waitKey()
    cv2.destroyAllWindows()

    return result_name
    def get_matches(self):

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(self.query_descs, self.train_descs, k=2)

        self.good = []

        for m, n in matches:
            if m.distance < DISTANCE_CORRECT * n.distance:
                self.good.append(m)

        return self.good
Exemple #9
0
 def SURFmatch(self, previous_car):
     #https://www.cnblogs.com/Lin-Yi/p/9435824.html
     kp1, des1 = CarClass.surf.detectAndCompute(self.img, None)
     kp2, des2 = CarClass.surf.detectAndCompute(previous_car.img, None)
     # kdtree建立索引方式的常量参数
     FLANN_INDEX_KDTREE = 0
     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     search_params = dict(checks=50)  # checks指定索引树要被遍历的次数
     flann = cv2.FlannBasedMatcher(index_params, search_params)
     # 进行匹配搜索
     matches = flann.knnMatch(des1, des2, k=2)
     # 寻找距离近的放入good列表
     good = []
     for m, n in matches:
         if m.distance < 0.7 * n.distance:
             good.append(m)
     return len(good)
Exemple #10
0
def detect_image_repost(urls: List[str], url: str) -> int:
    ''' 
    Returns image that most closely resembles a repost
    returns image in the form of a list that gives the url to the image,
    the confidence that the image is a repost
    '''
    image1 = io.imread(url)
    sift = cv2.SIFT_create()
    k1, d1 = sift.detectAndCompute(image1, None)

    index = {'algorithm': 0, 'trees': 5}
    search = {}
    flann = cv2.FlannBasedMatcher(index, search)

    max_confidence = -1
    final_url = urls[0]

    for u in urls:
        if u == url:
            continue
        image2 = io.imread(u)
        k2, d2 = sift.detectAndCompute(image2, None)
        matches = flann.knnMatch(d1, d2, k=2)
        points = []
        for m, n in matches:
            if m.distance < .6 * n.distance:
                points.append(m)
        number_keypoints = 0
        if len(k1) < len(k2):
            number_keypoints = len(k1)
        else:
            number_keypoints = len(k2)
        confidence = (len(points) / number_keypoints) * 100
        if confidence > max_confidence:
            max_confidence = confidence
            final_url = u

    return [url, final_url, max_confidence]
def FLANN_matching(img1, img2, using_rootsift, threshold=0.75):
    # Initiate SIFT detector
    sift = cv.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    if using_rootsift:
        des1 /= des1.sum(axis=1, keepdims=True)
        des1 = np.sqrt(des1)
        des2 /= des2.sum(axis=1, keepdims=True)
        des2 = np.sqrt(des2)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches
    good = []

    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < threshold * n.distance:
            good.append(m)

    print(f"Found {len(good)} matches with distance threshold = {threshold}")

    p1 = np.array([kp1[m.queryIdx].pt for m in good])
    p2 = np.array([kp2[m.trainIdx].pt for m in good])

    des = des1[[m.queryIdx for m in good], :]

    return p1, p2, des
Exemple #12
0
    def Features(self, comp_H=0, comp_V=0):
        h1, w1 = self.img1_.shape[:2]
        h2, w2 = self.img2_.shape[:2]

        img1 = cv2.resize(self.img1_,
                          (int(self.ratio * w1), int(self.ratio * h1)))
        img2 = cv2.resize(self.img2_,
                          (int(self.ratio * w2), int(self.ratio * h2)))

        surf = cv2.xfeatures2d.SURF_create(self.KK, upright=True)
        # kp 及 des 為特徵點擷取後資訊
        kp1, des1 = surf.detectAndCompute(img2, None)
        kp2, des2 = surf.detectAndCompute(img1, None)

        # 建立匹配器相關功能
        index_params = dict(algorithm=1, trees=5)
        search_params = dict(checks=50)
        FBM_ = cv2.FlannBasedMatcher(index_params, search_params)
        # 由特徵擷取後的des進行匹配
        matches = FBM_.knnMatch(des1, des2, k=2)

        data_ = self.FindGoodAndPoints(matches, kp1, kp2, comp_H, comp_V)

        return data_
Exemple #13
0
def init_feature(name):
    chunks = name.split('-')

    if chunks[0] == 'sift':
        detector = cv2.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None  # Return None if unknown detector name

    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        else:
            flann_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  # 2

        matcher = cv2.FlannBasedMatcher(flann_params)
    else:
        matcher = cv2.BFMatcher(norm)

    return detector, matcher
def realtive_scale(X1, X2, des1, des2, threshold=0.75):

    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < threshold * n.distance:
            good.append(m)

    p1 = np.array([X1[:, m.queryIdx] for m in good])
    p2 = np.array([X2[:, m.trainIdx] for m in good])

    diff1 = np.linalg.norm(np.diff(p1, axis=1), axis=0)
    diff2 = np.linalg.norm(np.diff(p2, axis=1), axis=0)

    scale = np.mean(diff1 / (diff2))

    return scale
Exemple #15
0
import numpy as np
from cv2 import cv2
from matplotlib import pyplot as plt
imgname = "3.ppm"          # query image (large scene)
imgname2 = "4.ppm"   # train image (small object)

## Create SIFT object
sift = cv2.xfeatures2d.SIFT_create()

## Create flann matcher
FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
#matcher = cv2.FlannBasedMatcher_create()
matcher = cv2.FlannBasedMatcher(flann_params, {})

## Detect and compute
img1 = cv2.imread(imgname)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
kpts1, descs1 = sift.detectAndCompute(gray1,None)

## As up
img2 = cv2.imread(imgname2)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
kpts2, descs2 = sift.detectAndCompute(gray2,None)

## Ratio testold
matches = matcher.knnMatch(descs1, descs2, 2)
matchesMask = [[0,0] for i in range(len(matches))]
for i, (m1,m2) in enumerate(matches):
    if m1.distance < 0.7 * m2.distance:
        matchesMask[i] = [1,0]
#img1gray=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
#img2gray=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create(10000,
                                   nOctaves=4,
                                   extended=False,
                                   upright=True)
#surf=cv2.xfeatures2d.SIFT_create()#可以改为SIFT
kp1, descrip1 = surf.detectAndCompute(img1, None)
kp2, descrip2 = surf.detectAndCompute(img2, None)

FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)

flann = cv2.FlannBasedMatcher(indexParams, searchParams)
match = flann.knnMatch(descrip1, descrip2, k=2)

good = []
for i, (m, n) in enumerate(match):
    if (m.distance < 0.75 * n.distance):
        good.append(m)

if len(good) > MIN:
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    ano_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv2.findHomography(src_pts, ano_pts, cv2.RANSAC, 5.0)
    warpImg = cv2.warpPerspective(
        img2, np.linalg.inv(M), (img1.shape[1] + img2.shape[1], img2.shape[0]))
    direct = warpImg.copy()
    direct[0:img1.shape[0], 0:img1.shape[1]] = img1
Exemple #17
0
                           right,
                           cv.BORDER_CONSTANT,
                           value=(0, 0, 0))
img1_gray = cv.cvtColor(img1_1, cv.COLOR_BGR2GRAY)
img2_gray = cv.cvtColor(img2_1, cv.COLOR_BGR2GRAY)
sift = cv.xfeatures2d_SIFT().create()

# find key points and descriptors
k1, d1 = sift.detectAndCompute(img1_gray, None)
k2, d2 = sift.detectAndCompute(img2_gray, None)

# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(d1, d2, k=2)

# draw good matches
matches_mask = [[0, 0] for i in range(len(matches))]
good = []
pts1 = []
pts2 = []

for i, (m, n) in enumerate(matches):
    if m.distance < 0.7 * n.distance:
        good.append(m)
        pts2.append(k2[m.trainIdx].pt)
        pts1.append(k1[m.queryIdx].pt)
        matches_mask[i] = [1, 0]