def __init__(self):
     self.sift = cv2.xfeatures2d_SIFT().create()
     # FLANN parameters
     FLANN_INDEX_KDTREE = 1
     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     search_params = dict(checks=50)
     self.flann = cv2.FlannBasedMatcher(index_params, search_params)
def SIFT(imgA, imgB):
    # 彩色轉灰階
    imgAgray = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    imgBgray = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)

    # SIFT
    sift = cv2.xfeatures2d_SIFT().create()

    # find the keypoints and descriptors with SIFT
    # SIFT產生KeyPoint和其descriptor
    kp1, des1 = sift.detectAndCompute(imgAgray, None)
    kp2, des2 = sift.detectAndCompute(imgBgray, None)
    return kp1, des1, kp2, des2
                                    400,
                                    cv2.BORDER_CONSTANT,
                                    value=(0, 0, 0))

        testImg = cv2.resize(testImg, (400, 300))
        cv2.imshow("b", testImg)
        testImg = cv2.copyMakeBorder(testImg,
                                     move_top * 1,
                                     0,
                                     400,
                                     0,
                                     cv2.BORDER_CONSTANT,
                                     value=(0, 0, 0))
        img1gray = cv2.cvtColor(srcImg, cv2.COLOR_BGR2GRAY)
        img2gray = cv2.cvtColor(testImg, cv2.COLOR_BGR2GRAY)
        sift = cv2.xfeatures2d_SIFT().create()
        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1gray, None)
        kp2, des2 = sift.detectAndCompute(img2gray, None)
        # FLANN parameters
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # Need to draw only good matches, so create a mask
        matchesMask = [[0, 0] for i in range(len(matches))]

        good = []
        pts1 = []
Esempio n. 4
0
    def stitchtwo(self, img1, img2):
        a = time.time()
        # size matches
        rows1, cols1 = img1.shape[:2]
        rows2, cols2 = img2.shape[:2]
        h = rows1 - rows2
        w = cols1 - cols2
        if (h > 0 or w > 0):
            top, bot, left, right = 0, h, w, 0
            img2 = cv.copyMakeBorder(img2,
                                     top,
                                     bot,
                                     left,
                                     right,
                                     cv.BORDER_CONSTANT,
                                     value=(0, 0, 0))
        top, bot, left, right = 0, 500, 400, 0
        srcImg = cv.copyMakeBorder(img1,
                                   top,
                                   bot,
                                   left,
                                   right,
                                   cv.BORDER_CONSTANT,
                                   value=(0, 0, 0))
        testImg = cv.copyMakeBorder(img2,
                                    top,
                                    bot,
                                    left,
                                    right,
                                    cv.BORDER_CONSTANT,
                                    value=(0, 0, 0))
        img1gray = cv.cvtColor(srcImg, cv.COLOR_BGR2GRAY)
        img2gray = cv.cvtColor(testImg, cv.COLOR_BGR2GRAY)
        # sift = cv.xfeatures2d_SIFT().create(5000)
        sift = cv.xfeatures2d_SIFT().create(10000)
        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1gray, None)
        kp2, des2 = sift.detectAndCompute(img2gray, None)
        # FLANN parameters
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # Need to draw only good matches, so create a mask
        matchesMask = [[0, 0] for i in range(len(matches))]

        good = []
        pts1 = []
        pts2 = []
        # ratio test as per Lowe's paper
        for i, (m, n) in enumerate(matches):
            # if m.distance < 0.7 * n.distance:
            if m.distance < 0.5 * n.distance:
                good.append(m)
                pts2.append(kp2[m.trainIdx].pt)
                pts1.append(kp1[m.queryIdx].pt)
                matchesMask[i] = [1, 0]

        # draw matches
        # draw_params = dict(matchColor=(0, 255, 0),
        #                    singlePointColor=(255, 0, 0),
        #                    matchesMask=matchesMask,
        #                    flags=0)
        # img3 = cv.drawMatchesKnn(img1gray, kp1, img2gray, kp2, matches, None, **draw_params)
        # plt.imshow(img3, ), plt.show()

        rows, cols = srcImg.shape[:2]
        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)

            warpImg = cv.warpPerspective(testImg,
                                         np.array(M),
                                         (testImg.shape[1], testImg.shape[0]),
                                         flags=cv.WARP_INVERSE_MAP)

            self.calROICorners(warpImg)

            res = np.zeros([rows, cols, 3], np.uint8)

            for row in range(0, rows):
                for col in range(0, cols):

                    if warpImg[row, col, 0]:
                        res[row, col] = warpImg[row, col]
                    else:
                        res[row, col] = srcImg[row, col]

            # opencv is bgr, matplotlib is rgb
            # res = cv.cvtColor(res, cv.COLOR_BGR2RGB)
            # show the result
            # plt.figure(), plt.imshow(res), plt.show()
            print("interval3: ", time.time() - a)
            return res
        else:
            print("Not enough matches are found - {}/{}".format(
                len(good), MIN_MATCH_COUNT))
            matchesMask = None
Esempio n. 5
0
def test(leftimg, rightimg):
    # top, bot, left, right = 100, 100, 0, 500
    top, bot, left, right = 10, 10, 10, 10
    # img1 = cv2.imread('1.jpg')
    # img2 = cv2.imread('2.jpg')
    srcImg = cv2.copyMakeBorder(leftimg,
                                top,
                                bot,
                                left,
                                right,
                                cv2.BORDER_CONSTANT,
                                value=(0, 0, 0))
    testImg = cv2.copyMakeBorder(rightimg,
                                 top,
                                 bot,
                                 left,
                                 right,
                                 cv2.BORDER_CONSTANT,
                                 value=(0, 0, 0))
    print(srcImg.shape)
    # cv2.imshow('1',srcImg)
    # cv2.imshow('2',testImg)
    img1gray = cv2.cvtColor(srcImg, cv2.COLOR_BGR2GRAY)
    img2gray = cv2.cvtColor(testImg, cv2.COLOR_BGR2GRAY)
    sift = cv2.xfeatures2d_SIFT().create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1gray, None)
    kp2, des2 = sift.detectAndCompute(img2gray, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in range(len(matches))]

    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [1, 0]

    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    img3 = cv2.drawMatchesKnn(img1gray, kp1, img2gray, kp2, matches, None,
                              **draw_params)
    # plt.imshow(img3, ), plt.show()

    rows, cols = srcImg.shape[:2]
    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        warpImg = cv2.warpPerspective(testImg,
                                      np.array(M),
                                      (testImg.shape[1], testImg.shape[0]),
                                      flags=cv2.WARP_INVERSE_MAP)

        for col in range(0, cols):
            if srcImg[:, col].any() and warpImg[:, col].any():
                left = col
                break
        for col in range(cols - 1, 0, -1):
            if srcImg[:, col].any() and warpImg[:, col].any():
                right = col
                break

        res = np.zeros([rows, cols, 3], np.uint8)
        for row in range(0, rows):
            for col in range(0, cols):
                if not srcImg[row, col].any():
                    res[row, col] = warpImg[row, col]
                elif not warpImg[row, col].any():
                    res[row, col] = srcImg[row, col]
                else:
                    srcImgLen = float(abs(col - left))
                    testImgLen = float(abs(col - right))
                    alpha = srcImgLen / (srcImgLen + testImgLen)
                    res[row, col] = np.clip(
                        srcImg[row, col] * (1 - alpha) +
                        warpImg[row, col] * alpha, 0, 255)

        # opencv is bgr, matplotlib is rgb
        res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        print(res.shape)
        # show the result
        # plt.figure()
        # plt.imshow(res)
        # plt.show()
        # cv2.imshow('result',res)
        # cv2.waitKey(0)
        return res
    else:
        print("Not enough matches are found - {}/{}".format(
            len(good), MIN_MATCH_COUNT))
        matchesMask = None
Esempio n. 6
0
import cv2
import numpy as np

img = cv2.imread(
    '/Users/rawassizadeh/EVERYTHING/Work/TEACHING/CS688_WebAnalyticsMining/toGithub/Session 6/funny-chicken.png'
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#sift = cv2.SIFT()

sift = cv2.xfeatures2d_SIFT()
kp = sift.detect(gray, None)

img = cv2.drawKeypoints(gray, kp)

cv2.imwrite(
    '/Users/rawassizadeh/EVERYTHING/Work/TEACHING/CS688_WebAnalyticsMining/toGithub/Session 6/sifteeed.jpg',
    img)
Esempio n. 7
0
def test1(pic1, pic2, right):
    top, bot, left = 100, 100, 0
    img1 = cv.imread(pic1)
    img2 = cv.imread(pic2)
    img1 = RotateAntiClockWise90(img1)
    img2 = RotateAntiClockWise90(img2)

    srcImg = cv.copyMakeBorder(img1,
                               top,
                               bot,
                               left,
                               right,
                               cv.BORDER_CONSTANT,
                               value=(0, 0, 0))
    testImg = cv.copyMakeBorder(img2,
                                top,
                                bot,
                                left,
                                right,
                                cv.BORDER_CONSTANT,
                                value=(0, 0, 0))

    # exit(1)
    # img1gray = cv.cvtColor(srcImg, cv.COLOR_RGB2GRAY)
    # img2gray = cv.cvtColor(testImg, cv.COLOR_RGB2GRAY)
    img1gray = srcImg
    img2gray = testImg
    sift = cv.xfeatures2d_SIFT().create()
    # sift =  cv.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1gray, None)
    kp2, des2 = sift.detectAndCompute(img2gray, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in range(len(matches))]

    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [1, 0]

    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    img3 = cv.drawMatchesKnn(img1gray, kp1, img2gray, kp2, matches, None,
                             **draw_params)
    plt.imshow(img3, ), plt.show()

    rows, cols = srcImg.shape[:2]
    # print(rows, cols)
    # exit(1)
    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
        warpImg = cv.warpPerspective(testImg,
                                     np.array(M),
                                     (testImg.shape[1], testImg.shape[0]),
                                     flags=cv.WARP_INVERSE_MAP)

        for col in range(0, cols):
            if srcImg[:, col].any() and warpImg[:, col].any():
                left = col
                break
        for col in range(cols - 1, 0, -1):
            if srcImg[:, col].any() and warpImg[:, col].any():
                right = col
                break

        # res = np.zeros([rows, cols, 3], np.uint8)
        res = np.zeros([rows, cols, 3], np.uint8)
        for row in range(0, rows):
            for col in range(0, cols):
                if not srcImg[row, col].any():
                    res[row, col] = warpImg[row, col]
                elif not warpImg[row, col].any():
                    res[row, col] = srcImg[row, col]
                else:
                    srcImgLen = float(abs(col - left))
                    testImgLen = float(abs(col - right))
                    alpha = srcImgLen / (srcImgLen + testImgLen)
                    res[row, col] = np.clip(
                        srcImg[row, col] * (1 - alpha) +
                        warpImg[row, col] * alpha, 0, 255)

        # opencv is bgr, matplotlib is rgb
        #res = cv.cvtColor(res, cv.COLOR_BGR2RGB)
        res = cv.cvtColor(res, cv.COLOR_RGB2BGR)

        return res
Esempio n. 8
0
def attach_homo(left_image, right_image):
    left_image = np.array(left_image)
    right_image = np.array(right_image)
    # cv.imshow('src',left_image)
    # cv.imshow('warp',right_image)
    top, bot, left, right = 100, 100, 150, 150
    srcImg = cv.copyMakeBorder(left_image,
                               top,
                               bot,
                               left,
                               right,
                               cv.BORDER_CONSTANT,
                               value=(0, 0, 0))
    testImg = cv.copyMakeBorder(right_image,
                                top,
                                bot,
                                left,
                                right,
                                cv.BORDER_CONSTANT,
                                value=(0, 0, 0))
    img1gray = cv.cvtColor(srcImg, cv.COLOR_BGR2GRAY)
    img2gray = cv.cvtColor(testImg, cv.COLOR_BGR2GRAY)

    img1gray = downsample_image(img1gray, 1)
    img2gray = downsample_image(img2gray, 1)

    sift = cv.xfeatures2d_SIFT().create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1gray, None)
    kp2, des2 = sift.detectAndCompute(img2gray, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in range(len(matches))]

    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.5 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [1, 0]

    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    # print(matches)
    img3 = cv.drawMatchesKnn(img1gray, kp1, img2gray, kp2, matches, None,
                             **draw_params)
    # cv.imshow('drawmatch',img3)
    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
        print(M)
        np.save("zed_M", M)
Esempio n. 9
0
def sift_detect(img1, img2, detector='surf'):
    if detector.startswith('si'):
        print("sift detector......")
        sift = cv2.xfeatures2d_SIFT().create()
    else:
        print("surf detector......")
        sift = cv2.xfeatures2d.SURF_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # img3 = cv2.drawKeypoints(image_a,kp1,image_a,color=(255,0,255)) #画出特征点,并显示为红色圆圈 ;此处输入输出图像作同名处理
    # cv2.imshow('1',img3)

    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    # Apply ratio test
    good = []
    p1 = []
    p2 = []
    for m, n in matches:
        if m.distance < 0.5 * n.distance:
            good.append([m])
            p1.append(kp1[m.queryIdx].pt)
            p2.append(kp1[m.trainIdx].pt)
    p1 = np.array(p1)
    p2 = np.array(p2)
    print(good)
    # good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]
    # print(len(good))
    # for _ in range(len(kp1)):
    #     p1.append(kp1[_].pt)
    # for _ in range(len(kp2)):
    #     p2.append(kp2[_].pt)
    # p1 = np.array(p1)
    # p2 = np.array(p2)
    print(len(p1), len(p2))

    # cv2.drawMatchesKnn expects list of lists as matches.
    match_img = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)

    # Read camera parameters
    K = np.load('./Calibration/camera_params/K.npy')
    F = np.load('./Calibration/camera_params/FocalLength.npy')
    pp_x = float("{:.2f}".format(K[0][2]))
    pp_y = float("{:.2f}".format(K[1][2]))
    # E, mask = cv2.findEssentialMat(p1, p2, focal=F, pp=(pp_x,pp_y), method=cv2.RANSAC, prob=0.999, threshold=1.0)
    E, mask = cv2.findEssentialMat(p1,
                                   p2,
                                   focal=F,
                                   pp=(pp_x, pp_y),
                                   method=cv2.RANSAC,
                                   prob=0.999,
                                   threshold=1.0)
    # print(E,mask)
    _, R, T, mask = cv2.recoverPose(E, p1, p2, focal=F, pp=(pp_x, pp_y))
    # print(R,T)
    # Trianglepoints
    standard = np.mat(np.eye(3, 3, dtype=float))
    standard_zero = np.mat(np.zeros((3, 1), dtype=float))
    standard = np.hstack((standard, standard_zero))
    reference = np.hstack((R, T))
    # print(p1.shape,standard,reference)
    point4d = cv2.triangulatePoints(standard, reference, p1.T, p2.T)
    print(point4d, point4d.shape)
    test_deal(point4d)
    # BFMatcher with default params
    return bgr_rgb(match_img)
Esempio n. 10
0
def ImageStitching(left_img_dir, right_img_dir):
    #上下左右边界填充像素设定
    top, bot, left, right = 100, 100, 0, 500
    #读入图片
    left_image = cv.imread(left_img_dir)
    right_image = cv.imread(right_img_dir)
    # 扩充图像的边界(填黑)
    left_image_bord = cv.copyMakeBorder(left_image,
                                        top,
                                        bot,
                                        left,
                                        right,
                                        cv.BORDER_CONSTANT,
                                        value=(0, 0, 0))  #常数填充:边界填充黑色
    right_image_bord = cv.copyMakeBorder(right_image,
                                         top,
                                         bot,
                                         left,
                                         right,
                                         cv.BORDER_CONSTANT,
                                         value=(0, 0, 0))  #常数填充:边界填充黑色
    left_image_bord_gray = cv.cvtColor(left_image_bord,
                                       cv.COLOR_BGR2GRAY)  #转成灰度图
    right_image_bord_gray = cv.cvtColor(right_image_bord,
                                        cv.COLOR_BGR2GRAY)  #转成灰度图

    # find the keypoints and descriptors with SIFT
    sift = cv.xfeatures2d_SIFT().create()  #SIFT初始化
    kp1, des1 = sift.detectAndCompute(left_image_bord_gray,
                                      None)  #descriptors(128维),keypoints
    kp2, des2 = sift.detectAndCompute(right_image_bord_gray, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in range(len(matches))]
    good = []  #筛选出最佳匹配的点
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:  #获得的K个最佳匹配中取出来第一个和第二个,进行比值,比值小于0.7,则为好的匹配点
            good.append(m)
            matchesMask[i] = [1, 0]  #匹配连线(最佳点有效)

    #画图参数设定,单个点颜色是红色,连线点为绿色,matchesMask只匹配最佳点(最佳点有效),flags连线样式
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    #画特征点匹配连线图
    match_img = cv.drawMatchesKnn(left_image_bord_gray, kp1,
                                  right_image_bord_gray, kp2, matches, None,
                                  **draw_params)

    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        # 利用RANSAC算法获得最佳的单应性矩阵
        M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
        # 利用上面得到的单应性矩阵对右图进行逆变换
        warpImg = cv.warpPerspective(
            right_image_bord,
            np.array(M),
            (right_image_bord.shape[1], right_image_bord.shape[0]),
            flags=cv.WARP_INVERSE_MAP)
        #图像重叠部分线性合并
        output_img = ImageMerge(left_image_bord, warpImg)

    else:
        print("Not enough matches are found - {}/{}".format(
            len(good), MIN_MATCH_COUNT))
        output_img = None

    return output_img, match_img
def imCalibration(baseIm, targetIm):
    import numpy as np
    import cv2 as cv
    from matplotlib import pyplot as plt

    [height, width, depth] = targetIm.shape
    [height2, width2, depth] = baseIm.shape

    top, bot, left, right = 200, 200, 200, 200

    srcImg = cv.copyMakeBorder(targetIm,
                               top,
                               bot,
                               left,
                               right,
                               cv.BORDER_CONSTANT,
                               value=(0, 0, 0))
    testImg = cv.copyMakeBorder(baseIm,
                                top,
                                bot + (height - height2),
                                left,
                                right + (width - width2),
                                cv.BORDER_CONSTANT,
                                value=(0, 0, 0))

    grayIm = cv.cvtColor(srcImg, cv.COLOR_BGR2GRAY)
    targetImgray = cv.cvtColor(testImg, cv.COLOR_BGR2GRAY)

    sift = cv.xfeatures2d_SIFT().create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(grayIm, None)
    kp2, des2 = sift.detectAndCompute(targetImgray, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in range(len(matches))]

    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [1, 0]

    # draw_params = dict(matchColor=(0, 255, 0),
    #                    singlePointColor=(255, 0, 0),
    #                    matchesMask=matchesMask,
    #                    flags=0)
    #img3 = cv.drawMatchesKnn(grayIm, kp1, targetImgray, kp2, matches, None, **draw_params)
    #plt.imshow(img3, ), plt.show()

    rows, cols = srcImg.shape[:2]
    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
        warpImg = cv.warpPerspective(testImg,
                                     np.array(M),
                                     (testImg.shape[1], testImg.shape[0]),
                                     flags=cv.WARP_INVERSE_MAP)

        # for col in range(0, cols):
        #     if srcImg[:, col].any() and warpImg[:, col].any():
        #         tleft = col
        #         break
        # for col in range(cols-1, 0, -1):
        #     if srcImg[:, col].any() and warpImg[:, col].any():
        #         tright = col
        #         break

        #        res = np.zeros([rows, cols, 3], np.uint8)
        #        for row in range(0, rows):
        #            for col in range(0, cols):
        #                if srcImg[row, col].any():
        #                    res[row, col] = srcImg[row, col]
        #                if warpImg[row, col].any() and srcImg[row, col].any():
        #                    res[row, col] = warpImg[row, col]

        # opencv is bgr, matplotlib is rgb

        mergedIm = warpImg[top:(height + top), left:(
            width + left)]  #res[top: (height+top), left: (width+left)]
        #cv.imwrite('mergedIm.jpg',mergedIm)
        #cv.imwrite('res.jpg',res)
        #cv.imwrite('warpImg.jpg',warpImg)
        #mergedIm = cv.cvtColor(res, cv.COLOR_BGR2RGB)
        # show the result
        #plt.figure()
        #plt.imshow(res)
        #plt.show()
    else:
        print("Not enough matches are found - {}/{}".format(
            len(good), MIN_MATCH_COUNT))
        matchesMask = None
        mergedIm = targetIm

    return mergedIm
Esempio n. 12
0
def feature_method_sift(image):
    # obtain feature by SIFT
    sift = cv.xfeatures2d_SIFT()
    sift = sift.create(nfeatures=100)
    return sift.detectAndCompute(image=image, mask=None)