예제 #1
0
 def _match(self, kp1, kp2, des1, des2, MIN_MATCH_COUNT=10):
     '''
     Return homography H.
     '''
     FLANN_INDEX_KDTREE = 1  # Using KD tree
     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     search_params = dict(checks=50)
     flann = cv.FlannBasedMatcher(index_params, search_params)
     matches = flann.knnMatch(np.float32(des1), np.float32(des2), k=2)
     # store all the good matches as per Lowe's ratio test.
     good = []
     for m, n in matches:
         if m.distance < 0.7 * n.distance:
             good.append(m)
     if len(good) > MIN_MATCH_COUNT:  # Compute homography
         src_pts = np.float32([kp1[m.queryIdx].pt
                               for m in good]).reshape(-1, 1, 2)
         dst_pts = np.float32([kp2[m.trainIdx].pt
                               for m in good]).reshape(-1, 1, 2)
         H, _ = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
         return H
     else:
         print("Not enough matches are found - {}/{}".format(
             len(good), MIN_MATCH_COUNT))
         return None
예제 #2
0
def computeHomography(im1, im2):
    ''' '''
    sift = cv2.SIFT_create()
    # find the keypoints with SIFT
    kp1, des1 = sift.detectAndCompute(im1, None)
    kp2, des2 = sift.detectAndCompute(im2, None)

    des1 = np.float32(des1)
    des2 = np.float32(des2)
    # create flann basedMatcher
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    # Match descriptors.
    matches = flann.knnMatch(des1, des2, k=2)

    #good matches as per Lowe's ratio test.
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC)

    return H
예제 #3
0
def draw_object(img1, kp1, img2, kps2, matches):
    """
    Draws the object found on the second image

    :param img1: First image
    :param kp1: List of keypoints from the first image
    :param img2: Second image
    :param kps2: List of keypoints from the second image
    :param matches: List of matches between the two images
    :return: modified image
    """
    src_pts = np.float32([kp1[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kps2[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    M = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)[0]

    h, w = img1.shape[:2]
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                      [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)
    img_res = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

    return img_res
def rectify_3d_with_db(painting_roi, ranked_list, dst_points,
                       src_points) -> bool:
    best = max(ranked_list, key=ranked_list.get)
    match = cv2.imread(best)

    h_match = int(match.shape[0])
    w_match = int(match.shape[1])

    src_points = np.squeeze(src_points, axis=1).astype(np.float32)
    dst_points = np.squeeze(dst_points, axis=1).astype(np.float32)
    src_points = np.array(
        utils.remove_points_outside_roi(src_points, w_match, h_match))

    if src_points.shape[0] < 4:
        return None
    else:
        H, _ = cv2.findHomography(dst_points, src_points, cv2.RANSAC, 5.0)
        if H is None:
            print(
                "[ERROR] Homography matrix can't be estimated. Rectification aborted."
            )
            return None
        img_dataset_warped = cv2.warpPerspective(
            match, H, (painting_roi.shape[1], painting_roi.shape[0]))

        print("[SUCCESS] Warped from keypoints")

        mask = np.all(img_dataset_warped == [0, 0, 0], axis=-1)
        img_dataset_warped[mask] = painting_roi[mask]
        show_img(img_dataset_warped)
        return img_dataset_warped
예제 #5
0
def findMatchPair_ORB(tar_img,
                      ref_img,
                      ptsNum=10000,
                      save=False,
                      fileName='ORB_matching_pair.npy',
                      RANSAC=True,
                      projError=50):
    mask = None
    H = None
    # Initiate ORB detector

    orb = cv2.ORB_create(ptsNum)
    # find the keypoints and descriptors with ORB
    ref_kp, ref_des = orb.detectAndCompute(ref_img, None)
    tar_kp, tar_des = orb.detectAndCompute(tar_img, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(ref_des, tar_des)
    src_pts = np.array([tar_kp[match.trainIdx].pt for match in matches])
    dst_pts = np.array([ref_kp[match.queryIdx].pt for match in matches])
    if RANSAC:
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projError)
        src_pts = np.array([
            tar_kp[match.trainIdx].pt for idx, match in enumerate(matches)
            if mask[idx] == 1
        ])
        dst_pts = np.array([
            ref_kp[match.queryIdx].pt for idx, match in enumerate(matches)
            if mask[idx] == 1
        ])

    return src_pts, dst_pts, tar_kp, ref_kp, matches, mask
def rectify_with_db(painting_roi, ranked_list, dst_points, src_points) -> bool:
    best = max(ranked_list, key=ranked_list.get)
    match = cv2.imread(best)

    h_match = int(match.shape[0])
    w_match = int(match.shape[1])

    src_points = np.squeeze(src_points, axis=1).astype(np.float32)
    dst_points = np.squeeze(dst_points, axis=1).astype(np.float32)
    # src_points = np.array(utils.remove_points_outside_roi(
    #    src_points, w_match, h_match))

    if src_points.shape[0] < 4:
        src_points, bbox = get_corners(painting_roi, draw=True)

        if len(src_points) < 4:
            print("[ERROR] Can't find enough corners")
            return None
        src_points = utils.order_corners(src_points)

        # dst_point((x, y), (x+w, y), (x+w, y+h), (x, y+h))

        x, y, w, h = bbox
        dst_points = np.array([(0, 0), (w, 0), (0, h), (w, h)])

        H, _ = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0)
        if H is None:
            print(
                "[ERROR] Homography matrix can't be estimated. Rectification aborted."
            )
            return None
        painting_roi = cv2.warpPerspective(painting_roi, H, (w, h))
        print("[SUCCESS] Warped from corners")
        show_img(painting_roi)
    else:

        H, _ = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0)
        if H is None:
            print(
                "[ERROR] Homography matrix can't be estimated. Rectification aborted."
            )
            return None
        painting_roi = cv2.warpPerspective(painting_roi, H, (w_match, h_match))
        #rectify_from_3d(src_points, dst_points, match, painting_roi)  # ----------------------------------------------------------#
        print("[SUCCESS] Warped from keypoints")
        show_img(painting_roi)
    return True
예제 #7
0
def full_view(filename1, filename2, dirname):
    leftgray, rightgray = cv2.imread(dirname +
                                     filename1), cv2.imread(dirname +
                                                            filename2)

    hessian = 400
    surf = cv2.xfeatures2d.SURF_create(
        hessian)  # 将Hessian Threshold设置为400,阈值越大能检测的特征就越少
    kp1, des1 = surf.detectAndCompute(leftgray, None)  # 查找关键点和描述符
    kp2, des2 = surf.detectAndCompute(rightgray, None)

    FLANN_INDEX_KDTREE = 0  # 建立FLANN匹配器的参数
    indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)  # 配置索引,密度树的数量为5
    searchParams = dict(checks=50)  # 指定递归次数
    # FlannBasedMatcher:是目前最快的特征匹配算法(最近邻搜索)
    flann = cv2.FlannBasedMatcher(indexParams, searchParams)  # 建立匹配器
    matches = flann.knnMatch(des1, des2, k=2)  # 得出匹配的关键点

    good = []
    # 提取优秀的特征点
    for m, n in matches:
        # if m.distance < 0.7 * n.distance:  # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
        if m.distance < 0.3 * n.distance:
            good.append(m)
    src_pts = np.array([kp1[m.queryIdx].pt for m in good])  # 查询图像的特征描述子索引
    dst_pts = np.array([kp2[m.trainIdx].pt for m in good])  # 训练(模板)图像的特征描述子索引
    H = cv2.findHomography(src_pts, dst_pts)  # 生成变换矩阵

    h, w = leftgray.shape[:2]
    h1, w1 = rightgray.shape[:2]
    shft = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]])
    M = np.dot(shft, H[0])  # 获取左边图像到右边图像的投影映射关系

    dst_corners = cv2.warpPerspective(leftgray, M,
                                      (w * 2, h))  # 透视变换,新图像可容纳完整的两幅图
    # cv2.imshow('before add right', dst_corners)
    # dst_corners[0:h, 0:w] = leftgray
    dst_corners[0:h, w:w + w1] = rightgray  # 将第二幅图放在右侧

    # 删除空白列
    sum_col = np.sum(np.sum(dst_corners, axis=0), axis=1)

    result_img = np.zeros(shape=(dst_corners.shape[0], 1, 3))

    for i in range(len(sum_col)):
        if sum_col[i] != 0:
            result_img = np.hstack([result_img, dst_corners[:, i:i + 1, :]])

    result_img = result_img[:, 1:]

    # cv2.imshow('dest', dst_corners)
    result_name = get_full_view_result_name(filename1, filename2)

    cv2.imwrite(dirname + result_name, result_img)

    cv2.waitKey()
    cv2.destroyAllWindows()

    return result_name
def create_transformed(baseImg):
    ## transform image and place it onto the second image
    movePoints = [[0, 0], [0, newSize[1]], [newSize[0], newSize[1]],
                  [newSize[0], 0]]
    H, _ = cv2.findHomography(np.array(locations), np.array(movePoints))
    # warp and resize
    warped = cv2.warpPerspective(baseImg, H, newSize)
    return warped
예제 #9
0
def orb_stitcher(imgs):
    # find the keypoints with ORB
    orb1 = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

    kp_master, des_master = orb1.detectAndCompute(imgs[0], None)
    kp_secondary, des_secondary = orb1.detectAndCompute(imgs[1], None)

    matches = bf.match(des_secondary, des_master)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    selected = []
    for m in matches:
        if m.distance < 40:
            selected.append(m)

    out_img = cv2.drawMatches(imgs[1], kp_secondary, imgs[0], kp_master,
                              selected, None)
    cv2.namedWindow('www', cv2.WINDOW_NORMAL)
    cv2.imshow('www', out_img)
    # cv2.imwrite('matches.jpg',out_img)
    cv2.waitKey(0)

    warped = None
    if len(selected) > 10:

        dst_pts = np.float32([kp_master[m.trainIdx].pt
                              for m in selected]).reshape(-1, 1, 2)
        src_pts = np.float32([kp_secondary[m.queryIdx].pt
                              for m in selected]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        h, w = imgs[0].shape[0:2]
        pts = np.float32([[0, 0], [w, 0], [w, h], [0, h],
                          [0, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        max_extent = np.max(dst, axis=0)[0].astype(np.int)[::-1]
        sz_out = (max(max_extent[1],
                      imgs[0].shape[1]), max(max_extent[0], imgs[0].shape[0]))

        # img2 = cv2.polylines(imgs[0], [np.int32(dst)], True, [0,255,0], 3, cv2.LINE_AA)

        cv2.namedWindow('w', cv2.WINDOW_NORMAL)

        warped = cv2.warpPerspective(imgs[1], M, dsize=sz_out)
        img_for_show = warped.copy()
        img_for_show[0:imgs[0].shape[0], 0:imgs[0].shape[1], 1] = imgs[0][:, :,
                                                                          1]
        cv2.imshow('w', img_for_show)
        cv2.waitKey(0)
    return warped
예제 #10
0
 def matchAKAZE(self, projErr=5, crossCheck=True):
     matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
     matches = matcher.match(self.tar_des, self.ref_des)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for match in matches])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for match in matches])
     _, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projErr)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for idx,
                         match in enumerate(matches) if mask[idx] == 1])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for idx,
                         match in enumerate(matches) if mask[idx] == 1])
     self.mask = mask
     self.matches = matches
     self.matchNum = len(mask==1)
     self.src_pts = src_pts
     self.dst_pts = dst_pts
예제 #11
0
def find_image_in_frame(dmatches, train_pts, new_pts, train_img_h,
                        train_img_w):
    src_pts = np.float32([train_pts[m.queryIdx].pt
                          for m in dmatches]).reshape(-1, 1, 2)
    dst_pts = np.float32([new_pts[m.trainIdx].pt
                          for m in dmatches]).reshape(-1, 1, 2)

    homography_matrix, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,
                                              5.0)
    pts = np.float32([[0, 0], [0, train_img_h - 1],
                      [train_img_w - 1, train_img_h - 1],
                      [train_img_w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, homography_matrix)

    return dst
예제 #12
0
def viewImage():
    img_ = right()
    # img_ = cv2.resize(img_, (0,0), fx=1, fy=1)
    img1 = cv2.cvtColor(img_, cv2.COLOR_BGR2GRAY)
    img = rear()

    # img = cv2.resize(img, (0,0), fx=1, fy=1)
    img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    sift = cv2.xfeatures2d.SIFT_create()
    # find the key points and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    match = cv2.BFMatcher()
    matches = match.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append(m)

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        flags=2)
    img3 = cv2.drawMatches(img_, kp1, img, kp2, good, None, **draw_params)
    cv2.imshow("original_image_drawMatches.jpg", img3)

    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img1.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
        cv2.imshow("original_image_overlapping.jpg", img2)

    # dst = np.concatenate((rightImg, rearImg), 1)
    # cv2.imshow("Image", )
    cv2.waitKey(0)
    cv2.destroyAllWindows()
예제 #13
0
def findMatchPair_GMS(tar_img,
                      ref_img,
                      ptsNum=10000,
                      save=False,
                      fileName='GMS_matching_pair.npy',
                      RANSAC=True,
                      projError=50):
    # Initiate ORB detector
    orb = cv2.ORB_create(ptsNum, fastThreshold=0)
    # find the keypoints and descriptors with ORB
    ref_kp, ref_des = orb.detectAndCompute(ref_img, None)
    tar_kp, tar_des = orb.detectAndCompute(tar_img, None)

    # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches = bf.match(ref_des, tar_des)
    matches_GMS = cv2.xfeatures2d.matchGMS(ref_img.shape[0:2],
                                           tar_img.shape[0:2],
                                           ref_kp,
                                           tar_kp,
                                           matches,
                                           withRotation=True)
    src_pts = np.array([tar_kp[match.trainIdx].pt for match in matches_GMS])
    dst_pts = np.array([ref_kp[match.queryIdx].pt for match in matches_GMS])
    if RANSAC:
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projError)
        src_pts = np.array([
            tar_kp[match.trainIdx].pt for idx, match in enumerate(matches_GMS)
            if mask[idx] == 1
        ])
        dst_pts = np.array([
            ref_kp[match.queryIdx].pt for idx, match in enumerate(matches_GMS)
            if mask[idx] == 1
        ])

    if save:
        with open(fileName, 'wb') as f:
            np.save(f, src_pts)
            np.save(f, dst_pts)

        print('GMS matching pairs saved')
    return src_pts, dst_pts, tar_kp, ref_kp, matches_GMS, mask
예제 #14
0
 def KNNmatchAKAZE(self,projErr=5):
     matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
     matches = matcher.knnMatch(self.tar_des, self.ref_des, k=2)
     good_matches = []
     for m,n in matches:
         if m.distance < 0.75*n.distance:
             good_matches.append(m)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for match in good_matches])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for match in good_matches])
     
     _, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projErr)
     src_pts = np.array([self.tar_kps[match.queryIdx].pt for idx,
                         match in enumerate(good_matches) if mask[idx] == 1])
     dst_pts = np.array([self.ref_kps[match.trainIdx].pt for idx,
                         match in enumerate(good_matches) if mask[idx] == 1])
     self.mask = mask
     self.matches = good_matches
     self.matchNum = len(mask[mask==1])
     self.src_pts = src_pts
     self.dst_pts = dst_pts
def rectify_without_db(painting_roi) -> bool:
    src_points, bbox = get_corners(painting_roi, draw=True)

    if len(src_points) < 4:
        print("[ERROR] Can't find enough corners")
        return None
    src_points = utils.order_corners(src_points)

    x, y, w, h = bbox
    dst_points = np.array([(x, y), (x + w, y), (x + w, y + h), (x, y + h)])

    H, _ = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0)
    if H is None:
        print(
            "[ERROR] Homography matrix can't be estimated. Rectification aborted."
        )
        return None
    painting_roi = cv2.warpPerspective(painting_roi, H, (w, h))
    print("[SUCCESS] Warped from corners")
    show_img(painting_roi)
    return True
예제 #16
0
def process_instance(im, im_info):
    points = np.array(im_info['canonical_board']['tl_tr_br_bl'])

    matrix, _ = cv2.findHomography(points, reference_points)

    warped_im = cv2.warpPerspective(im, matrix, (1000, 1000))
    gray_im = cv2.cvtColor(warped_im, cv2.COLOR_BGR2GRAY)
    circles = cv2.HoughCircles(gray_im, cv2.HOUGH_GRADIENT, 1, 60, param1=400, param2=15, minRadius=30, maxRadius=40)

    checkers_count = {
        'top': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 
        'bottom': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    }

    circles = np.uint16(np.around(circles))
    for i in circles[0,:]:
        cv2.circle(warped_im,(i[0],i[1]),i[2],(0,255,0),2)
        cv2.circle(warped_im,(i[0],i[1]),2,(0,0,255),3)

        x, y, _ = i

        if 480 < x < 530:
            continue

        if y < 400:
            spot = 'top'
        elif y > 600:
            spot = 'bottom'
        else:
            continue

        pip = math.floor(x/85)
        checkers_count[spot][pip] += 1

    plt.figure()
    plt.imshow(cv2.cvtColor(warped_im, cv2.COLOR_BGR2RGB))
    plt.show()

    return checkers_count, warped_im
예제 #17
0
def align_image(feature_image, target_image):
    ORB = cv2.ORB_create(MAX_FEATURE)
    feature_keypoints, feature_descriptors = ORB.detectAndCompute(
        feature_image, None)
    target_keypoints, target_descriptors = ORB.detectAndCompute(
        target_image, None)

    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(feature_descriptors, target_descriptors, None)

    matches.sort(key=lambda x: x.distance, reverse=False)

    num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:num_good_matches]

    img_matches = cv2.drawMatches(feature_image, feature_keypoints,
                                  target_image, target_keypoints, matches,
                                  None)
    cv2.imwrite("image-matches.jpg", img_matches)

    feature_points = np.zeros((len(matches), 2), dtype=np.float32)
    target_points = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        feature_points[i, :] = feature_keypoints[match.queryIdx].pt
        target_points[i, :] = target_keypoints[match.trainIdx].pt

    homography_matrix, mask = cv2.findHomography(target_points, feature_points,
                                                 cv2.RANSAC)

    height, width, channels = feature_image.shape

    perspective_corrected_image = cv2.warpPerspective(target_image,
                                                      homography_matrix,
                                                      (width, height))

    return perspective_corrected_image, homography_matrix
예제 #18
0
def main(path1, path2, name, thre=2000000):

    K = getInternalCalibrationMatrix(path1)

    img1 = cv2.imread(path1)
    img2 = cv2.imread(path2)
    img1H, corners1 = CornersDetector(path1, thre)
    img2H, corners2 = CornersDetector(path2, thre)
    cornersXY1 = getFeaturePointsCoordinates(corners1)
    cornersXY2 = getFeaturePointsCoordinates(corners2)
    points1, points2 = SADloop(img1, img2, cornersXY1, cornersXY2)
    # im = DrawImageCorresponding(img1,img2,points1,points2)

    src1 = np.float32(points1)
    src2 = np.float32(points2)
    H = cv2.findHomography(src2, src1, cv2.RANSAC, 5.0)
    num, Rs, Ts, Ns = cv2.decomposeHomographyMat(H[0], K)

    for i in range(num):
        print("R%d is " % i)
        print(Rs[i])
        print("T%d is " % i)
        print(Ts[i])

    model = cv2.imread("model.png", 0)
    print(K)
    P = projection_matrix(K, H[0])
    obj = OBJ(os.path.join(".\\fox.obj"), swapyz=False)
    im = render(img1, obj, P, model)

    # plt.subplot(2,1,1)
    # plt.imshow(img1H)
    # plt.subplot(2,1,2)
    # plt.imshow(img2H)
    # plt.imshow(im)
    # plt.show()
    cv2.imwrite(name, im)
예제 #19
0
    def create_map(config, augmentation_deg=None):

        src_img_size = config["perspective_info"]["image_size"]
        ground_size = config["model_info"]["input_image_size"]

        w, h = src_img_size
        gw, gh = ground_size

        # calc homography (TuSimple fake)
        imgP = [
            config["perspective_info"]["image_p0"],
            config["perspective_info"]["image_p1"],
            config["perspective_info"]["image_p2"],
            config["perspective_info"]["image_p3"]
        ]
        groundP = [
            config["perspective_info"]["ground_p0"],
            config["perspective_info"]["ground_p1"],
            config["perspective_info"]["ground_p2"],
            config["perspective_info"]["ground_p3"]
        ]
        ground_scale_width = config["model_info"]["ground_scale_width"]
        ground_scale_height = config["model_info"]["ground_scale_height"]

        # We only use one perspective matrix for image transform, therefore, all images
        # at dataset must have same size, or the perspective transormation may fail.
        # In default, we assume the camera image input size is 1280x720, so the following
        # step will resize the image point for size fitting.
        for i in range(len(imgP)):
            imgP[i][0] *= w / 1280.0
            imgP[i][1] *= h / 720.0

        # Scale the ground points, we assume the camera position is center of perspectived image,
        # as shown at following codes :
        #     (perspectived image with ground_size)
        #     ################################
        #     #             +y               #
        #     #              ^               #
        #     #              |               #
        #     #              |               #
        #     #              |               #
        #     #          p0 --- p1           #
        #     #          |      |            #
        #     #          p3 --- p2           #
        #     #              |               #
        #     # -x ----------C------------+x #
        #     ################################
        #
        for i in range(len(groundP)):
            groundP[i][0] = groundP[i][0] * ground_scale_width + gw / 2.0
            groundP[i][1] = gh - groundP[i][1] * ground_scale_height

        list_H = []
        list_map_x = []
        list_map_y = []

        groud_center = tuple(np.average(groundP, axis=0))
        if augmentation_deg is None:
            augmentation_deg = [0.0]

        for deg in augmentation_deg:
            R = cv2.getRotationMatrix2D(groud_center, deg, 1.0)
            rotate_groupP = []
            for gp in groundP:
                pp = np.matmul(R, [[gp[0]], [gp[1]], [1.0]])
                rotate_groupP.append([pp[0], pp[1]])

            H, _ = cv2.findHomography(np.float32(imgP),
                                      np.float32(rotate_groupP))
            _, invH = cv2.invert(H)

            map_x = np.zeros((gh, gw), dtype=np.float32)
            map_y = np.zeros((gh, gw), dtype=np.float32)

            for gy in range(gh):
                for gx in range(gw):
                    nx, ny, nz = np.matmul(invH, [[gx], [gy], [1.0]])
                    nx /= nz
                    ny /= nz
                    if (nx >= 0 and nx < w and ny >= 0 and ny < h):
                        map_x[gy][gx] = nx
                        map_y[gy][gx] = ny
                    else:
                        map_x[gy][gx] = -1
                        map_y[gy][gx] = -1

            list_H.append(H)
            list_map_x.append(map_x)
            list_map_y.append(map_y)

        return list_H, list_map_x, list_map_y
예제 #20
0
img3 = cv.drawMatchesKnn(img1_gray, k1, img2_gray, k2, matches, None,
                         **draw_params)
plt.imshow(img3, )
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.savefig('descriptors.jpg', dpi=600)
plt.show()

rows, cols = img1_1.shape[:2]
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
    # print(([k1[m.queryIdx].pt for m in good]).shape)
    img1_pts = np.float32([k1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    img2_pts = np.float32([k2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv.findHomography(img1_pts, img2_pts, cv.RANSAC, 5.0)
    warp_img = cv.warpPerspective(img2_1,
                                  np.array(M),
                                  (img2_1.shape[1], img2_1.shape[0]),
                                  flags=cv.WARP_INVERSE_MAP)

for col in range(0, cols):
    if img1_1[:, col].any() and warp_img[:, col].any():
        left = col
    break
for col in range(cols - 1, 0, -1):
    if img1_1[:, col].any() and warp_img[:, col].any():
        right = col
    break

# merge two pictures
matches = bf.knnMatch(des1, des2, 2)
good = []
for m, n in matches:
    if m.distance < 0.9 * n.distance:
        good.append([m])

# img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags = 2)
matchesMask = None
if len(good) > MIN_MATCH_COUNT:
    # what is it ?
    src_pts = np.float_([kp1[m[0].queryIdx].pt
                         for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float_([kp2[m[0].trainIdx].pt
                         for m in good]).reshape(-1, 1, 2)

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    matchesMask = mask.ravel().tolist()

    h, w = img1.shape
    pts = np.float_([[0, 0], [0, h - 1], [w - 1, h - 1],
                     [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)

    img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

else:
    print "Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT)
    matchesMask = None

# img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,(0,255,0),None,matchesMask,2)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None)
예제 #22
0
def main():
    homography = None
    # matrix of camera parameters (made up but works quite well for me)
    camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])

    # create ORB - Oriented FAST and Rotated BRIEF - keypoint detector
    orb = cv2.ORB_create(nfeatures=1000)  # retain max 1000 features

    # create BFMatcher object
    bf = cv2.BFMatcher()

    # image target
    model = cv2.imread('target_img.jpg')

    # calculate key point and description
    kp_model, des_model = orb.detectAndCompute(
        model, None)  # kp: key point, des: description

    # obj file
    obj = OBJ('wolf.obj', swapyz=True)

    # Webcam
    webcam = cv2.VideoCapture(0)

    while True:
        success, imgwebcam = webcam.read()
        # find and draw the keypoints of the frame
        kp_webcam, des_webcam = orb.detectAndCompute(imgwebcam, None)

        # finding match between 2 img
        matches = bf.knnMatch(des_model, des_webcam, k=2)
        # Taking good keypoints
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        # compute Homography if enough matches are found
        if len(good) > 15:
            # differenciate between source points and destination points
            srcpts = np.float32([kp_model[m.queryIdx].pt
                                 for m in good]).reshape(-1, 1, 2)
            dstpts = np.float32([kp_webcam[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)

            # compute Homography
            homography, mask = cv2.findHomography(srcpts, dstpts, cv2.RANSAC,
                                                  5)

            #find boundary around model
            h, w, channel = model.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            # project corners into frame
            dst = cv2.perspectiveTransform(pts, homography)
            # connect them with lines
            #imgwebcam = cv2.polylines(imgwebcam,[np.int32(dst)], True, 255, 3, cv2.LINE_AA)

            # if a valid homography matrix was found render object on model plane
            if homography is not None:
                # obtain 3D projection matrix from homography matrix and camera parameters
                projection = projection_matrix(camera_parameters, homography)
                # render object
                imgwebcam = render(imgwebcam, obj, projection, model)
                #imgwebcam = render(imgwebcam, model, projection)

        cv2.imshow('result', imgwebcam)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    webcam.release()
    cv2.destroyAllWindows()
    return 0
예제 #23
0
                    #Delete above statement
                    if digits[number].any() != 0:
                        continue

                    bottom = squares[number][0][0] + 0.25 * side
                    left = squares[number][1][1] - 0.25 * side
                    final_image = cv2.putText(final_image,
                                              solved[number],
                                              (int(bottom), int(left)),
                                              cv2.FONT_HERSHEY_SIMPLEX,
                                              0.5, (0, 0, 255),
                                              thickness=1)

                cv2.imwrite('/home/sahil/SudokuSolver/output/Final_sol.png',
                            final_image)
                h, mask = cv2.findHomography(sudoku_coordinates,
                                             np.array(corners))
                final_image = cv2.warpPerspective(final_image, h,
                                                  (width, height))
                final_image = cv2.addWeighted(final_image, 0.5, original_frame,
                                              0.5, 1)
                cv2.imshow('Sudoku solver', final_image)
                for i in range(60):
                    out.write(final_image)
                    cv2.imshow('Sudoku solver', final_image)

                break
            #Stop the webcam now and wait 5 seconds to disply the output. Also save the image

    cv2.imshow('Sudoku solver', original_frame)

    # Exit on ESC
예제 #24
0
def asift_main(image1: str, image2: str, detector_name: str = "sift-flann"):
    """
    Main function of ASIFT Python implementation.

    :param image1: Path for first image
    :param image2: Path for second image
    :param detector_name: (sift|surf|orb|akaze|brisk)[-flann] Detector type to use, default as SIFT. Add '-flann' to use FLANN matching.
    :return: None (Will return coordinate pairs in future)
    """
    # It seems that FLANN has performance issues, may be replaced by CUDA in future

    # Read images
    ori_img1 = cv2.imread(image1, cv2.IMREAD_GRAYSCALE)
    ori_img2 = cv2.imread(image2, cv2.IMREAD_GRAYSCALE)

    # Initialize feature detector and keypoint matcher
    detector, matcher = init_feature(detector_name)

    # Exit when reading empty image
    if ori_img1 is None or ori_img2 is None:
        print("Failed to load images")
        sys.exit(1)

    # Exit when encountering unknown detector parameter
    if detector is None:
        print(f"Unknown detector: {detector_name}")
        sys.exit(1)

    ratio_1 = 1
    ratio_2 = 1

    if ori_img1.shape[0] > MAX_SIZE or ori_img1.shape[1] > MAX_SIZE:
        ratio_1 = MAX_SIZE / ori_img1.shape[1]
        print("Large input detected, image 1 will be resized")
        img1 = image_resize(ori_img1, ratio_1)
    else:
        img1 = ori_img1

    if ori_img2.shape[0] > MAX_SIZE or ori_img2.shape[1] > MAX_SIZE:
        ratio_2 = MAX_SIZE / ori_img2.shape[1]
        print("Large input detected, image 2 will be resized")
        img2 = image_resize(ori_img2, ratio_2)
    else:
        img2 = ori_img2

    print(f"Using {detector_name.upper()} detector...")

    # Profile time consumption of keypoints extraction
    with Timer(f"Extracting {detector_name.upper()} keypoints..."):
        pool = ThreadPool(processes=cv2.getNumberOfCPUs())
        kp1, desc1 = affine_detect(detector, img1, pool=pool)
        kp2, desc2 = affine_detect(detector, img2, pool=pool)

    print(f"img1 - {len(kp1)} features, img2 - {len(kp2)} features")

    # Profile time consumption of keypoints matching
    with Timer('Matching...'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)

    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)

    if len(p1) >= 4:
        # TODO: The effect of resizing on homography matrix needs to be investigated.
        # TODO: Investigate function consistency when image aren't resized.
        for index in range(len(p1)):
            pt = p1[index]
            p1[index] = pt / ratio_1

        for index in range(len(p2)):
            pt = p2[index]
            p2[index] = pt / ratio_2

        for index in range(len(kp_pairs)):
            element = kp_pairs[index]
            kp1, kp2 = element

            new_kp1 = cv2.KeyPoint(kp1.pt[0] / ratio_1, kp1.pt[1] / ratio_1,
                                   kp1.size)
            new_kp2 = cv2.KeyPoint(kp2.pt[0] / ratio_2, kp2.pt[1] / ratio_2,
                                   kp2.size)

            kp_pairs[index] = (new_kp1, new_kp2)

        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        print(f"{np.sum(status)} / {len(status)}  inliers/matched")
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print(f"{len(p1)} matches found, not enough for homography estimation")

    # kp_pairs: list[(cv2.KeyPoint, cv2.KeyPoint)]

    draw_match("ASIFT Match Result", ori_img1, ori_img2, kp_pairs, None,
               H)  # Visualize result
    cv2.waitKey()

    log_keypoints(
        kp_pairs,
        "sample/keypoints.txt")  # Save keypoint pairs for further inspection

    print('Done')
예제 #25
0
def tracking_lucas_kanade():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    img_chocolate = cv2.imread('marker.jpg')
    gray_chocolate = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.2,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(35, 35),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (1000, 3))

    # Take first frame and find corners in it

    ret, old_frame = cap.read()

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

    orb = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    kpts1, descs1 = orb.detectAndCompute(gray_chocolate, None)

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (old_frame.shape[1], old_frame.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_lucas_kanade.avi', fourcc, 30.0,
                          image_size)

    frno = 0
    restart = False
    while (1):
        frno += 1
        ret, frame = cap.read()
        if ret:

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if restart:
                orb = cv2.ORB_create(1000, 1.1, 13)
                kpts2, descs2 = orb.detectAndCompute(frame_gray, None)
                restart = False

            kpts2, descs2 = orb.detectAndCompute(frame_gray, None)

            matches = bf.match(descs1, descs2)
            # Sort them in the order of their distance.
            dmatches = sorted(matches, key=lambda x: x.distance)

            ## extract the matched keypoints
            src_pts = np.float32([kpts1[m.queryIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kpts2[m.trainIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)

            ## find homography matrix and do perspective transform
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            h, w = img_chocolate.shape[:2]
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            ## draw found regions
            frm = cv2.polylines(frame, [np.int32(dst)], True, (0, 0, 255), 1,
                                cv2.LINE_AA)

            # ## draw match lines
            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches[:8], None, flags=2)

            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                   dst_pts, None, **lk_params)
            successful = (st == 1)
            if np.sum(successful) == 0:
                restart = True
            # Select good points
            good_new = p1[successful]
            good_old = dst_pts[successful]

            # draw the tracks
            count_of_moved = 0
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                velocity = np.sqrt((a - c)**2 + (b - d)**2)
                if velocity > 1:
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)
                    count_of_moved += 1

            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches, None, flags=2) #[:8]
            out.write(frame)

            cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)

            cv2.imshow('orb_match', frame)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)

        else:
            break

    cv2.destroyAllWindows()
    cap.release()
    out.release()
예제 #26
0
def tracking_orb():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    ret, frm = cap.read()
    img_chocolate = cv2.imread('marker.jpg')

    frm_count = 0
    key = None

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (frm.shape[1], frm.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_orb.mp4', fourcc, 30.0, image_size)

    while ret:

        ## Create ORB object and BF object(using HAMMING)
        orb = cv2.ORB_create()

        gray2 = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
        gray1 = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

        # gray2 = cv2.equalizeHist(gray2)
        # gray1 = cv2.equalizeHist(gray1)

        ## Find the keypoints and descriptors with ORB
        kpts1, descs1 = orb.detectAndCompute(gray1, None)
        kpts2, descs2 = orb.detectAndCompute(gray2, None)

        # create BFMatcher object
        ## match descriptors and sort them in the order of their distance
        bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)

        # Match descriptors.
        matches = bf.match(descs1, descs2)

        # Sort them in the order of their distance.
        dmatches = sorted(matches, key=lambda x: x.distance)

        ## extract the matched keypoints
        src_pts = np.float32([kpts1[m.queryIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kpts2[m.trainIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)

        ## find homography matrix and do perspective transform
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img_chocolate.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        ## draw found regions
        frm = cv2.polylines(frm, [np.int32(dst)], True, (0, 0, 255), 1,
                            cv2.LINE_AA)

        ## draw match lines
        res = cv2.drawMatches(img_chocolate,
                              kpts1,
                              frm,
                              kpts2,
                              dmatches[:8],
                              None,
                              flags=2)

        # writer.write(res)
        cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)
        # cv2.imshow("orb_match", frm)
        out.write(frm)
        cv2.imshow("orb_match", res)

        # Pause on pressing of space.
        if key == ord(' '):
            wait_period = 0
        else:
            wait_period = 30

        key = cv2.waitKey(wait_period)
        ret, frm = cap.read()
        frm_count += 1

    cv2.destroyAllWindows()
    cap.release()
    out.release()

    return 0
예제 #27
0
#记录特征点的x,y坐标
pos1 = np.float32([kp.pt for kp in kp1])
pos2 = np.float32([kp.pt for kp in kp2])

#特征值匹配
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []

for m, n in matches:

    #通过knn算法筛选优良匹配
    if m.distance < 0.75 * n.distance:

        #记录优良匹配的两个匹配点的index
        good.append((m.trainIdx, m.queryIdx))

#记录匹配点对中左图点的x, y坐标放在pts1中
pts1 = np.float32([pos1[i] for (_, i) in good])

#记录匹配点对中右图点的x, y坐标放在pts2中
pts2 = np.float32([pos2[i] for (i, _) in good])

#通过这些点获得单应性矩阵H(线性变换矩阵)
(H, status) = cv2.findHomography(pts2, pts1, cv2.RANSAC, 4.0)

#变换
res = cv2.warpPerspective(img2, H,
                          (img1.shape[1] + img2.shape[1], img1.shape[0]))
res[0:img1.shape[0], 0:img1.shape[1]] = img1
imshow(res)