def calibrate_relative_poses_interactive(image_sets, cameraMatrixs, distCoeffss, imageSizes, boardSizes, board_scales, board_rvecs, board_tvecs):
    """
    Make an estimate of the relative poses (as 4x4 projection matrices) between many cameras.
    Base these relative poses to the first camera.
    
    Each camera should be looking at its own chessboard,
    use different sizes of chessboards if a camera sees chessboard that are not associated with that camera.
    'board_scales' scales the chessboard-units to world-units.
    'board_rvecs' and 'board_tvecs' transform the rescaled local chessboard-coordinates to world-coordinates.
    
    The inverse of the reprojection error is used for weighting.
    """
    num_cams = len(image_sets)
    num_images = len(image_sets[0])
    reproj_error_max = 0
    
    # Preprocess object-points of the different boards
    board_objps = []
    for boardSize, board_scale, board_rvec, board_tvec in zip(
            boardSizes, board_scales, board_rvecs, board_tvecs ):
        objp = np.ones((np.prod(boardSize), 4))
        objp[:, 0:3] = calibration_tools.grid_objp(boardSize) * board_scale
        objp = objp.dot(trfm.P_from_R_and_t(cvh.Rodrigues(board_rvec), np.array(board_tvec).reshape(3, 1))[0:3, :].T)
        board_objps.append(objp)
    
    # Calculate all absolute poses
    Ps = np.zeros((num_images, num_cams, 4, 4))
    weights = np.zeros((num_images, 1, 1, 1))
    for i, images in enumerate(zip(*image_sets)):
        reproj_error = 0
        for c, (image, cameraMatrix, distCoeffs, imageSize, boardSize, board_objp) in enumerate(zip(
                images, cameraMatrixs, distCoeffss, imageSizes, boardSizes, board_objps )):
            img = cv2.imread(image)
            ret, corners = cvh.extractChessboardFeatures(img, boardSize)
            if not ret:
                print ("Error: Image '%s' didn't contain a chessboard of size %s." % (image, boardSize))
                return False, None
            
            # Draw and display the corners
            cv2.drawChessboardCorners(
                    img, boardSize, corners, ret )
            cv2.imshow("img", img)
            cv2.waitKey(100)
            
            ret, rvec, tvec = cv2.solvePnP(board_objp, corners, cameraMatrix, distCoeffs)
            Ps[i, c, :, :] = trfm.P_from_R_and_t(cvh.Rodrigues(rvec), tvec)
            reproj_error = max(reprojection_error(    # max: worst case
                    [board_objp], [corners], cameraMatrix, distCoeffs, [rvec], [tvec] )[1], reproj_error)
        reproj_error_max = max(reproj_error, reproj_error_max)
        weights[i] = 1. / reproj_error
    
    # Apply weighting on Ps, and rebase against first camera
    Ps *= weights / weights.sum()
    Ps = Ps.sum(axis=0)
    Pref_inv = trfm.P_inv(Ps[0, :, :])    # use first cam as reference
    return True, [P.dot(Pref_inv) for P in Ps], reproj_error_max
def main():
    global boardSize, filename_base_chessboards, filename_intrinsics, filename_distorted, filename_triangl_pose_est_left, filename_triangl_pose_est_right, filename_base_extrinsics, filenames_extra_chessboards, filenames_extra_intrinsics, extra_boardSizes, extra_board_scales, extra_board_rvecs, extra_board_tvecs, device_id
    boardSize = (8, 6)
    filename_base_chessboards = join_path(
        "data", "chessboards",
        "chessboard*.jpg")  # calibration images of the base camera
    filename_intrinsics = join_path("results", "camera_intrinsics.txt")
    filename_distorted = join_path(
        "data", "chessboards", "chessboard07.jpg")  # a randomly chosen image
    #filename_triangl_pose_est_left = join_path("data", "chessboards", "chessboard07.jpg")    # a randomly chosen image
    #filename_triangl_pose_est_right = join_path("data", "chessboards", "chessboard08.jpg")    # a randomly chosen image
    filename_triangl_pose_est_left = join_path(
        "data", "chessboards_and_nonplanar",
        "image-0001.jpeg")  # a randomly chosen image
    filename_triangl_pose_est_right = join_path(
        "data", "chessboards_and_nonplanar",
        "image-0056.jpeg")  # a randomly chosen image
    filename_base_extrinsics = join_path("results", "chessboards_extrinsic",
                                         "chessboard")
    filenames_extra_chessboards = (
        join_path(
            "data", "chessboards_front",
            "front-*.jpg"),  # sets of calibration images of extra cameras
        join_path("data", "chessboards_bottom", "bottom-*.jpg"))
    filenames_extra_intrinsics = (join_path("results",
                                            "camera_intrinsics_front.txt"),
                                  join_path("results",
                                            "camera_intrinsics_bottom.txt")
                                  )  # intrinsics of extra cameras
    extra_boardSizes = ((8, 6), (8, 6))
    extra_board_scales = (1., 1.)
    extra_board_rvecs = ((0., 0., 0.), (0., -pi / 2, 0.))
    extra_board_tvecs = ((0., 0., 0.), (6., 0., (1200. - 193.) / 27.6 + 1.))
    device_id = 1  # webcam

    nonplanar_left = nonplanar_right = np.zeros((0, 2))

    help_text = """\
    Choose between: (in order)
        1: prepare_object_points (required)
        2: calibrate_camera_interactive (required for "reprojection_error")
        3: save_camera_intrinsics
        4: load_camera_intrinsics (required)
        5: undistort_image
        6: reprojection_error
        7: triangl_pose_est_interactive
        8: realtime_pose_estimation (recommended)
        9: calibrate_relative_poses_interactive
        q: quit
    
    Info: Sometimes you will be prompted: 'someVariable [defaultValue]: ',
          in that case you can type a new value,
          or simply press ENTER to preserve the default value.
    """
    from textwrap import dedent
    print(dedent(help_text))

    inp = ""
    while inp.lower() != "q":
        inp = raw_input("\n: ").strip()

        if inp == "1":
            get_variable("boardSize", lambda x: eval("(%s)" % x))
            print()  # add new-line

            objp = calibration_tools.grid_objp(boardSize)

        elif inp == "2":
            get_variable("filename_base_chessboards")
            from glob import glob
            images = sorted(glob(filename_base_chessboards))
            print()  # add new-line

            reproj_error, cameraMatrix, distCoeffs, rvecs, tvecs, objectPoints, imagePoints, imageSize = \
                    calibrate_camera_interactive(images, objp, boardSize)
            print("cameraMatrix:\n", cameraMatrix)
            print("distCoeffs:\n", distCoeffs)
            print("reproj_error:", reproj_error)

            cv2.destroyAllWindows()

        elif inp == "3":
            get_variable("filename_intrinsics")
            print()  # add new-line

            calibration_tools.save_camera_intrinsics(filename_intrinsics,
                                                     cameraMatrix, distCoeffs,
                                                     imageSize)

        elif inp == "4":
            get_variable("filename_intrinsics")
            print()  # add new-line

            cameraMatrix, distCoeffs, imageSize = \
                    calibration_tools.load_camera_intrinsics(filename_intrinsics)

        elif inp == "5":
            get_variable("filename_distorted")
            img = cv2.imread(filename_distorted)
            print()  # add new-line

            img_undistorted, roi = calibration_tools.undistort_image(
                img, cameraMatrix, distCoeffs, imageSize)
            cv2.imshow("Original Image", img)
            cv2.imshow("Undistorted Image", img_undistorted)
            print("Press any key to continue.")
            cv2.waitKey()

            cv2.destroyAllWindows()

        elif inp == "6":
            mean_error, square_error = reprojection_error(
                objectPoints, imagePoints, cameraMatrix, distCoeffs, rvecs,
                tvecs)
            print("mean absolute error:", mean_error)
            print("square error:", square_error)

        elif inp == "7":
            print(triangl_pose_est_interactive.__doc__)

            get_variable("filename_triangl_pose_est_left")
            img_left = cv2.imread(filename_triangl_pose_est_left)
            get_variable("filename_triangl_pose_est_right")
            img_right = cv2.imread(filename_triangl_pose_est_right)
            print()  # add new-line

            nonplanar_left, nonplanar_right = \
                    triangl_pose_est_interactive(img_left, img_right, cameraMatrix, distCoeffs, imageSize, objp, boardSize, nonplanar_left, nonplanar_right)

            cv2.destroyAllWindows()

        elif inp == "8":
            print(realtime_pose_estimation.__doc__)

            get_variable("device_id", int)
            get_variable("filename_base_extrinsics")
            print()  # add new-line

            realtime_pose_estimation(device_id, filename_base_extrinsics,
                                     cameraMatrix, distCoeffs, objp, boardSize)

            cv2.destroyAllWindows()

        elif inp == "9":
            print(calibrate_relative_poses_interactive.__doc__)

            get_variable("filenames_extra_chessboards",
                         lambda x: eval("(%s)" % x))
            from glob import glob
            image_sets = [
                sorted(glob(images)) for images in filenames_extra_chessboards
            ]
            get_variable("filenames_extra_intrinsics",
                         lambda x: eval("(%s)" % x))
            cameraMatrixs, distCoeffss, imageSizes = zip(
                *map(calibration_tools.load_camera_intrinsics,
                     filenames_extra_intrinsics))
            get_variable("extra_boardSizes", lambda x: eval("(%s)" % x))
            get_variable("extra_board_scales", lambda x: eval("(%s)" % x))
            get_variable("extra_board_rvecs", lambda x: eval("(%s)" % x))
            get_variable("extra_board_tvecs", lambda x: eval("(%s)" % x))
            print()  # add new-line

            ret, Ps, reproj_error_max = \
                    calibrate_relative_poses_interactive(image_sets, cameraMatrixs, distCoeffss, imageSizes,
                                                         extra_boardSizes, extra_board_scales, extra_board_rvecs, extra_board_tvecs)
            if ret:
                print("Ps:")
                for P in Ps:
                    print(P)
                print("reproj_error_max:", reproj_error_max)
def calibrate_relative_poses_interactive(image_sets, cameraMatrixs,
                                         distCoeffss, imageSizes, boardSizes,
                                         board_scales, board_rvecs,
                                         board_tvecs):
    """
    Make an estimate of the relative poses (as 4x4 projection matrices) between many cameras.
    Base these relative poses to the first camera.
    
    Each camera should be looking at its own chessboard,
    use different sizes of chessboards if a camera sees chessboard that are not associated with that camera.
    'board_scales' scales the chessboard-units to world-units.
    'board_rvecs' and 'board_tvecs' transform the rescaled local chessboard-coordinates to world-coordinates.
    
    The inverse of the reprojection error is used for weighting.
    """
    num_cams = len(image_sets)
    num_images = len(image_sets[0])
    reproj_error_max = 0

    # Preprocess object-points of the different boards
    board_objps = []
    for boardSize, board_scale, board_rvec, board_tvec in zip(
            boardSizes, board_scales, board_rvecs, board_tvecs):
        objp = np.ones((np.prod(boardSize), 4))
        objp[:, 0:3] = calibration_tools.grid_objp(boardSize) * board_scale
        objp = objp.dot(
            trfm.P_from_R_and_t(cvh.Rodrigues(board_rvec),
                                np.array(board_tvec).reshape(3, 1))[0:3, :].T)
        board_objps.append(objp)

    # Calculate all absolute poses
    Ps = np.zeros((num_images, num_cams, 4, 4))
    weights = np.zeros((num_images, 1, 1, 1))
    for i, images in enumerate(zip(*image_sets)):
        reproj_error = 0
        for c, (image, cameraMatrix, distCoeffs, imageSize, boardSize,
                board_objp) in enumerate(
                    zip(images, cameraMatrixs, distCoeffss, imageSizes,
                        boardSizes, board_objps)):
            img = cv2.imread(image)
            ret, corners = cvh.extractChessboardFeatures(img, boardSize)
            if not ret:
                print(
                    "Error: Image '%s' didn't contain a chessboard of size %s."
                    % (image, boardSize))
                return False, None

            # Draw and display the corners
            cv2.drawChessboardCorners(img, boardSize, corners, ret)
            cv2.imshow("img", img)
            cv2.waitKey(100)

            ret, rvec, tvec = cv2.solvePnP(board_objp, corners, cameraMatrix,
                                           distCoeffs)
            Ps[i, c, :, :] = trfm.P_from_R_and_t(cvh.Rodrigues(rvec), tvec)
            reproj_error = max(
                reprojection_error(  # max: worst case
                    [board_objp], [corners], cameraMatrix, distCoeffs, [rvec],
                    [tvec])[1],
                reproj_error)
        reproj_error_max = max(reproj_error, reproj_error_max)
        weights[i] = 1. / reproj_error

    # Apply weighting on Ps, and rebase against first camera
    Ps *= weights / weights.sum()
    Ps = Ps.sum(axis=0)
    Pref_inv = trfm.P_inv(Ps[0, :, :])  # use first cam as reference
    return True, [P.dot(Pref_inv) for P in Ps], reproj_error_max
def main():
    # Initially known data
    boardSize = (8, 6)
    objp = calibration_tools.grid_objp(boardSize)

    cameraMatrix, distCoeffs, imageSize = calibration_tools.load_camera_intrinsics(
        os.path.join("..", "..", "datasets", "webcam", "camera_intrinsics.txt")
    )

    # Initiate 2d 3d arrays
    objectPoints = []
    imagePoints = []

    # Select working (or 'testing') set
    from glob import glob

    images = sorted(glob(os.path.join("..", "..", "datasets", "webcam", "captures2", "*.jpeg")))

    def main_loop(left_points, left_gray, left_img, right_img, triangl_idxs, chessboard_idxs):
        # Detect right (key)points
        right_keypoints = fast.detect(right_img)
        right_FAST_points = np.array([kp.pt for kp in right_keypoints], dtype=np.float32)

        # Visualize right_FAST_points
        print("Visualize right_FAST_points")
        cv2.imshow(
            "img", cv2.drawKeypoints(right_img, [cv2.KeyPoint(p[0], p[1], 7.0) for p in right_FAST_points], color=blue)
        )  # blue markers with size 7
        cv2.waitKey()

        # Calculate optical flow (= 'OF') field from left to right
        right_gray = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
        right_OF_points, status_OF, err_OF = cv2.calcOpticalFlowPyrLK(
            left_gray, right_gray, left_points
        )  # points to start from
        err_OF = err_OF.reshape(-1)

        def match_OF_based(
            right_OF_points,
            right_FAST_points,
            err_OF,
            status_OF,
            max_radius_OF_to_FAST,
            max_dist_ratio,
            left_point_idxs=None,
        ):  # if not None, left_point_idxs specifies mask
            # Filter out the OF points with high error
            right_OF_points, right_OF_to_left_idxs = zip(
                *[
                    (p, i)
                    for i, p in enumerate(right_OF_points)
                    if status_OF[i]
                    and err_OF[i] < max_OF_error  # only include correct OF-points
                    and (left_point_idxs == None or i in left_point_idxs)  # error should be low enough
                ]
            )  # apply mask
            right_OF_points = np.array(right_OF_points)

            # Visualize right_OF_points
            print("Visualize right_OF_points")
            cv2.imshow(
                "img",
                cv2.drawKeypoints(right_img, [cv2.KeyPoint(p[0], p[1], 7.0) for p in right_OF_points], color=blue),
            )  # blue markers with size 7
            cv2.waitKey()

            # Align right_OF_points with right_FAST_points by matching them
            matches_twoNN = matcher.radiusMatch(
                right_OF_points, right_FAST_points, max_radius_OF_to_FAST  # query points  # train points
            )

            # Filter out ambiguous matches by a ratio-test, and prevent duplicates
            best_dist_matches_by_trainIdx = {}  # duplicate prevention: trainIdx -> match_best_dist
            for query_matches in matches_twoNN:
                # Ratio test
                if not (
                    len(query_matches) == 1
                    or (  # only one match, probably a good one
                        len(query_matches) > 1
                        and query_matches[0].distance  # if more than one, first two shouldn't lie too close
                        / query_matches[1].distance
                        < max_dist_ratio
                    )
                ):
                    continue

                # Relink match to use 'left_point' indices
                match = cv2.DMatch(
                    right_OF_to_left_idxs[query_matches[0].queryIdx],  # queryIdx: left_points
                    query_matches[0].trainIdx,  # trainIdx: right_FAST_points
                    query_matches[0].distance,
                )

                # Duplicate prevention
                if (
                    not match.trainIdx in best_dist_matches_by_trainIdx
                    or err_OF[match.queryIdx]  # no duplicate found
                    < err_OF[  # replace duplicate if inferior, based on err_OF
                        best_dist_matches_by_trainIdx[match.trainIdx].queryIdx
                    ]
                ):
                    best_dist_matches_by_trainIdx[match.trainIdx] = match

            return best_dist_matches_by_trainIdx

        # Match between FAST -> FAST features
        matches_by_trainIdx = match_OF_based(
            right_OF_points, right_FAST_points, err_OF, status_OF, max_radius_OF_to_FAST["FAST"], max_dist_ratio["FAST"]
        )

        if allow_chessboard_matcher_and_refiner and chessboard_idxs:
            # Match between chessboard -> chessboard features
            matches_by_trainIdx_chessboard = match_OF_based(
                right_OF_points,
                right_FAST_points,
                err_OF,
                status_OF,
                max_radius_OF_to_FAST["chessboard"],
                max_dist_ratio["chessboard"],
                chessboard_idxs,
            )  # set mask

            # Overwrite FAST -> FAST feature matches by chessboard -> chessboard feature matches
            matches_by_trainIdx.update(matches_by_trainIdx_chessboard)

            # Refine chessboard features
            chessboard_corners_idxs = list(matches_by_trainIdx_chessboard)
            chessboard_corners = right_FAST_points[chessboard_corners_idxs]
            cv2.cornerSubPix(
                right_gray,
                chessboard_corners,
                (11, 11),  # window
                (-1, -1),  # deadzone
                (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001),
            )  # termination criteria
            right_FAST_points[chessboard_corners_idxs] = chessboard_corners

            # Update chessboard_idxs
            chessboard_idxs = set(matches_by_trainIdx_chessboard)
            print("Amount of chessboard features tracked in the new image:", len(chessboard_idxs))

        # Calculate average filtered OF vector
        trainIdxs = list(matches_by_trainIdx)
        queryIdxs = [matches_by_trainIdx[trainIdx].queryIdx for trainIdx in trainIdxs]
        mean_OF_vector = (right_FAST_points[trainIdxs] - left_points[queryIdxs]).mean(axis=0)
        mean_OF_vector_length = np.linalg.norm(mean_OF_vector)
        print(
            "mean_OF_vector (from LEFT to RIGHT):", mean_OF_vector, ";    mean_OF_vector_length:", mean_OF_vector_length
        )

        # Partition matches to make a distinction between previously triangulated points and non-triangl.
        matches_left_triangl_to_right_FAST = []
        matches_left_non_triangl_to_right_FAST = []
        for trainIdx in matches_by_trainIdx:
            match = matches_by_trainIdx[trainIdx]
            if matches_by_trainIdx[trainIdx].queryIdx in triangl_idxs:
                matches_left_triangl_to_right_FAST.append(match)
            else:
                matches_left_non_triangl_to_right_FAST.append(match)
        # and all matches together
        matches_left_to_right_FAST = matches_left_triangl_to_right_FAST + matches_left_non_triangl_to_right_FAST

        # Visualize (previously triangulated) left_points of corresponding outlier-filtered right_FAST_points
        print("Visualize LEFT  points (previously triangulated)")
        cv2.imshow(
            "img",
            cv2.drawKeypoints(
                left_img,
                [
                    cv2.KeyPoint(left_points[m.queryIdx][0], left_points[m.queryIdx][1], 7.0)
                    for m in matches_left_triangl_to_right_FAST
                ],
                color=blue,
            ),
        )  # blue markers with size 7
        cv2.waitKey()

        # Visualize (previously triangulated) outlier-filtered right_FAST_points
        print("Visualize RIGHT points (previously triangulated)")
        cv2.imshow(
            "img",
            cv2.drawKeypoints(
                right_img,
                [
                    cv2.KeyPoint(right_FAST_points[m.trainIdx][0], right_FAST_points[m.trainIdx][1], 7.0)
                    for m in matches_left_triangl_to_right_FAST
                ],
                color=blue,
            ),
        )  # blue markers with size 7
        cv2.waitKey()

        # Visualize (not yet triangulated) outlier-filtered right_FAST_points
        print("Visualize LEFT  points (not yet triangulated)")
        cv2.imshow(
            "img",
            cv2.drawKeypoints(
                left_img,
                [
                    cv2.KeyPoint(left_points[m.queryIdx][0], left_points[m.queryIdx][1], 7.0)
                    for m in matches_left_non_triangl_to_right_FAST
                ],
                color=blue,
            ),
        )  # blue markers with size 7
        cv2.waitKey()

        # Visualize (not yet triangulated) outlier-filtered right_FAST_points
        print("Visualize RIGHT points (not yet triangulated)")
        cv2.imshow(
            "img",
            cv2.drawKeypoints(
                right_img,
                [
                    cv2.KeyPoint(right_FAST_points[m.trainIdx][0], right_FAST_points[m.trainIdx][1], 7.0)
                    for m in matches_left_non_triangl_to_right_FAST
                ],
                color=blue,
            ),
        )  # blue markers with size 7
        cv2.waitKey()

        # Pose estimation and Triangulation
        # ...

        # Update triangl_idxs    TODO: filter using outlier-removal by epipolar constraints
        triangl_idxs = set(matches_by_trainIdx)

        return right_FAST_points, right_gray, triangl_idxs, chessboard_idxs

    ###----------------------------- Frame 0 (init) -----------------------------###

    print("###---------------------- Frame 0 (init) ----------------------###")

    left_img = cv2.imread(images[0])
    left_gray = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
    right_img = cv2.imread(images[1])

    # Detect left (key)points
    ret, left_points = cvh.extractChessboardFeatures(left_img, boardSize)
    if not ret:
        raise Exception("No chessboard features detected.")

    # Set masks to alter matches' priority
    chessboard_idxs = set(range(len(left_points)))  # all chessboard corners are in sight
    triangl_idxs = set(chessboard_idxs)  # the chessboard feature points are already triangulated

    # Invoke main loop
    right_FAST_points, right_gray, triangl_idxs, chessboard_idxs = main_loop(
        left_points, left_gray, left_img, right_img, triangl_idxs, chessboard_idxs
    )

    for i in range(2, 14):
        ###----------------------------- Frame i -----------------------------###

        print("###---------------------- Frame %s ----------------------###" % i)

        # Update data for new frame
        left_img = right_img
        left_gray = right_gray
        right_img = cv2.imread(images[i])

        # Use previous feature points
        left_points = right_FAST_points

        # Invoke main loop
        right_FAST_points, right_gray, triangl_idxs, chessboard_idxs = main_loop(
            left_points, left_gray, left_img, right_img, triangl_idxs, chessboard_idxs
        )
def main():
    global boardSize, filename_base_chessboards, filename_intrinsics, filename_distorted, filename_triangl_pose_est_left, filename_triangl_pose_est_right, filename_base_extrinsics, filenames_extra_chessboards, filenames_extra_intrinsics, extra_boardSizes, extra_board_scales, extra_board_rvecs, extra_board_tvecs, device_id
    boardSize = (8, 6)
    filename_base_chessboards = join_path("data", "chessboards", "chessboard*.jpg")    # calibration images of the base camera
    filename_intrinsics = join_path("results", "camera_intrinsics.txt")
    filename_distorted = join_path("data", "chessboards", "chessboard07.jpg")    # a randomly chosen image
    #filename_triangl_pose_est_left = join_path("data", "chessboards", "chessboard07.jpg")    # a randomly chosen image
    #filename_triangl_pose_est_right = join_path("data", "chessboards", "chessboard08.jpg")    # a randomly chosen image
    filename_triangl_pose_est_left = join_path("data", "chessboards_and_nonplanar", "image-0001.jpeg")    # a randomly chosen image
    filename_triangl_pose_est_right = join_path("data", "chessboards_and_nonplanar", "image-0056.jpeg")    # a randomly chosen image
    filename_base_extrinsics = join_path("results", "chessboards_extrinsic", "chessboard")
    filenames_extra_chessboards = (join_path("data", "chessboards_front", "front-*.jpg"),    # sets of calibration images of extra cameras
                                   join_path("data", "chessboards_bottom", "bottom-*.jpg"))
    filenames_extra_intrinsics = (join_path("results", "camera_intrinsics_front.txt"),
                                  join_path("results", "camera_intrinsics_bottom.txt"))    # intrinsics of extra cameras
    extra_boardSizes = ((8, 6), (8, 6))
    extra_board_scales = (1., 1.)
    extra_board_rvecs = ((0., 0., 0.), (0., -pi/2, 0.))
    extra_board_tvecs = ((0., 0., 0.), (6., 0., (1200.-193.)/27.6+1.))
    device_id = 1    # webcam

    nonplanar_left = nonplanar_right = np.zeros((0, 2))

    help_text = """\
    Choose between: (in order)
        1: prepare_object_points (required)
        2: calibrate_camera_interactive (required for "reprojection_error")
        3: save_camera_intrinsics
        4: load_camera_intrinsics (required)
        5: undistort_image
        6: reprojection_error
        7: triangl_pose_est_interactive
        8: realtime_pose_estimation (recommended)
        9: calibrate_relative_poses_interactive
        q: quit
    
    Info: Sometimes you will be prompted: 'someVariable [defaultValue]: ',
          in that case you can type a new value,
          or simply press ENTER to preserve the default value.
    """
    from textwrap import dedent
    print (dedent(help_text))
    
    inp = ""
    while inp.lower() != "q":
        inp = raw_input("\n: ").strip()
        
        if inp == "1":
            get_variable("boardSize", lambda x: eval("(%s)" % x))
            print ()    # add new-line
            
            objp = calibration_tools.grid_objp(boardSize)
        
        elif inp == "2":
            get_variable("filename_base_chessboards")
            from glob import glob
            images = sorted(glob(filename_base_chessboards))
            print ()    # add new-line
            
            reproj_error, cameraMatrix, distCoeffs, rvecs, tvecs, objectPoints, imagePoints, imageSize = \
                    calibrate_camera_interactive(images, objp, boardSize)
            print ("cameraMatrix:\n", cameraMatrix)
            print ("distCoeffs:\n", distCoeffs)
            print ("reproj_error:", reproj_error)
            
            cv2.destroyAllWindows()
        
        elif inp == "3":
            get_variable("filename_intrinsics")
            print ()    # add new-line
            
            calibration_tools.save_camera_intrinsics(
                    filename_intrinsics, cameraMatrix, distCoeffs, imageSize )
        
        elif inp == "4":
            get_variable("filename_intrinsics")
            print ()    # add new-line
            
            cameraMatrix, distCoeffs, imageSize = \
                    calibration_tools.load_camera_intrinsics(filename_intrinsics)
        
        elif inp == "5":
            get_variable("filename_distorted")
            img = cv2.imread(filename_distorted)
            print ()    # add new-line
            
            img_undistorted, roi = calibration_tools.undistort_image(
                    img, cameraMatrix, distCoeffs, imageSize )
            cv2.imshow("Original Image", img)
            cv2.imshow("Undistorted Image", img_undistorted)
            print ("Press any key to continue.")
            cv2.waitKey()
            
            cv2.destroyAllWindows()
        
        elif inp == "6":
            mean_error, square_error = reprojection_error(
                    objectPoints, imagePoints, cameraMatrix, distCoeffs, rvecs, tvecs )
            print ("mean absolute error:", mean_error)
            print ("square error:", square_error)
        
        elif inp == "7":
            print (triangl_pose_est_interactive.__doc__)
            
            get_variable("filename_triangl_pose_est_left")
            img_left = cv2.imread(filename_triangl_pose_est_left)
            get_variable("filename_triangl_pose_est_right")
            img_right = cv2.imread(filename_triangl_pose_est_right)
            print ()    # add new-line
            
            nonplanar_left, nonplanar_right = \
                    triangl_pose_est_interactive(img_left, img_right, cameraMatrix, distCoeffs, imageSize, objp, boardSize, nonplanar_left, nonplanar_right)
            
            cv2.destroyAllWindows()
        
        elif inp == "8":
            print (realtime_pose_estimation.__doc__)
            
            get_variable("device_id", int)
            get_variable("filename_base_extrinsics")
            print ()    # add new-line
            
            realtime_pose_estimation(device_id, filename_base_extrinsics, cameraMatrix, distCoeffs, objp, boardSize)
            
            cv2.destroyAllWindows()
        
        elif inp == "9":
            print (calibrate_relative_poses_interactive.__doc__)
            
            get_variable("filenames_extra_chessboards", lambda x: eval("(%s)" % x))
            from glob import glob
            image_sets = [sorted(glob(images)) for images in filenames_extra_chessboards]
            get_variable("filenames_extra_intrinsics", lambda x: eval("(%s)" % x))
            cameraMatrixs, distCoeffss, imageSizes = zip(*map(
                    calibration_tools.load_camera_intrinsics, filenames_extra_intrinsics ))
            get_variable("extra_boardSizes", lambda x: eval("(%s)" % x))
            get_variable("extra_board_scales", lambda x: eval("(%s)" % x))
            get_variable("extra_board_rvecs", lambda x: eval("(%s)" % x))
            get_variable("extra_board_tvecs", lambda x: eval("(%s)" % x))
            print ()    # add new-line
            
            ret, Ps, reproj_error_max = \
                    calibrate_relative_poses_interactive(image_sets, cameraMatrixs, distCoeffss, imageSizes,
                                                         extra_boardSizes, extra_board_scales, extra_board_rvecs, extra_board_tvecs)
            if ret:
                print ("Ps:")
                for P in Ps: print (P)
                print ("reproj_error_max:", reproj_error_max)
Exemple #6
0
def main():
    # Initially known data
    boardSize = (8, 6)
    objp = calibration_tools.grid_objp(boardSize)

    cameraMatrix, distCoeffs, imageSize = calibration_tools.load_camera_intrinsics(
        os.path.join("..", "..", "datasets", "webcam",
                     "camera_intrinsics.txt"))

    # Initiate 2d 3d arrays
    objectPoints = []
    imagePoints = []

    # Select working (or 'testing') set
    from glob import glob
    images = sorted(
        glob(
            os.path.join("..", "..", "datasets", "webcam", "captures2",
                         "*.jpeg")))

    def main_loop(left_points, left_gray, left_img, right_img, triangl_idxs,
                  chessboard_idxs):
        # Detect right (key)points
        right_keypoints = fast.detect(right_img)
        right_FAST_points = np.array([kp.pt for kp in right_keypoints],
                                     dtype=np.float32)

        # Visualize right_FAST_points
        print("Visualize right_FAST_points")
        cv2.imshow(
            "img",
            cv2.drawKeypoints(
                right_img,
                [cv2.KeyPoint(p[0], p[1], 7.) for p in right_FAST_points],
                color=blue))  # blue markers with size 7
        cv2.waitKey()

        # Calculate optical flow (= 'OF') field from left to right
        right_gray = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
        right_OF_points, status_OF, err_OF = cv2.calcOpticalFlowPyrLK(
            left_gray, right_gray, left_points)  # points to start from
        err_OF = err_OF.reshape(-1)

        def match_OF_based(right_OF_points,
                           right_FAST_points,
                           err_OF,
                           status_OF,
                           max_radius_OF_to_FAST,
                           max_dist_ratio,
                           left_point_idxs=None
                           ):  # if not None, left_point_idxs specifies mask
            # Filter out the OF points with high error
            right_OF_points, right_OF_to_left_idxs = \
                    zip(*[ (p, i) for i, p in enumerate(right_OF_points)
                                    if status_OF[i] and    # only include correct OF-points
                                    err_OF[i] < max_OF_error and    # error should be low enough
                                    (left_point_idxs == None or i in left_point_idxs) ])    # apply mask
            right_OF_points = np.array(right_OF_points)

            # Visualize right_OF_points
            print("Visualize right_OF_points")
            cv2.imshow(
                "img",
                cv2.drawKeypoints(
                    right_img,
                    [cv2.KeyPoint(p[0], p[1], 7.) for p in right_OF_points],
                    color=blue))  # blue markers with size 7
            cv2.waitKey()

            # Align right_OF_points with right_FAST_points by matching them
            matches_twoNN = matcher.radiusMatch(
                right_OF_points,  # query points
                right_FAST_points,  # train points
                max_radius_OF_to_FAST)

            # Filter out ambiguous matches by a ratio-test, and prevent duplicates
            best_dist_matches_by_trainIdx = {
            }  # duplicate prevention: trainIdx -> match_best_dist
            for query_matches in matches_twoNN:
                # Ratio test
                if not (
                        len(query_matches) == 1
                        or  # only one match, probably a good one
                    (
                        len(query_matches) > 1
                        and  # if more than one, first two shouldn't lie too close
                        query_matches[0].distance / query_matches[1].distance <
                        max_dist_ratio)):
                    continue

                # Relink match to use 'left_point' indices
                match = cv2.DMatch(
                    right_OF_to_left_idxs[
                        query_matches[0].queryIdx],  # queryIdx: left_points
                    query_matches[0].trainIdx,  # trainIdx: right_FAST_points
                    query_matches[0].distance)

                # Duplicate prevention
                if (not match.trainIdx in best_dist_matches_by_trainIdx
                        or  # no duplicate found
                        err_OF[match.queryIdx]
                        <  # replace duplicate if inferior, based on err_OF
                        err_OF[best_dist_matches_by_trainIdx[
                            match.trainIdx].queryIdx]):
                    best_dist_matches_by_trainIdx[match.trainIdx] = match

            return best_dist_matches_by_trainIdx

        # Match between FAST -> FAST features
        matches_by_trainIdx = match_OF_based(right_OF_points,
                                             right_FAST_points, err_OF,
                                             status_OF,
                                             max_radius_OF_to_FAST["FAST"],
                                             max_dist_ratio["FAST"])

        if allow_chessboard_matcher_and_refiner and chessboard_idxs:
            # Match between chessboard -> chessboard features
            matches_by_trainIdx_chessboard = match_OF_based(
                right_OF_points, right_FAST_points, err_OF, status_OF,
                max_radius_OF_to_FAST["chessboard"],
                max_dist_ratio["chessboard"], chessboard_idxs)  # set mask

            # Overwrite FAST -> FAST feature matches by chessboard -> chessboard feature matches
            matches_by_trainIdx.update(matches_by_trainIdx_chessboard)

            # Refine chessboard features
            chessboard_corners_idxs = list(matches_by_trainIdx_chessboard)
            chessboard_corners = right_FAST_points[chessboard_corners_idxs]
            cv2.cornerSubPix(
                right_gray,
                chessboard_corners,
                (11, 11),  # window
                (-1, -1),  # deadzone
                (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                 0.001))  # termination criteria
            right_FAST_points[chessboard_corners_idxs] = chessboard_corners

            # Update chessboard_idxs
            chessboard_idxs = set(matches_by_trainIdx_chessboard)
            print("Amount of chessboard features tracked in the new image:",
                  len(chessboard_idxs))

        # Calculate average filtered OF vector
        trainIdxs = list(matches_by_trainIdx)
        queryIdxs = [
            matches_by_trainIdx[trainIdx].queryIdx for trainIdx in trainIdxs
        ]
        mean_OF_vector = (right_FAST_points[trainIdxs] -
                          left_points[queryIdxs]).mean(axis=0)
        mean_OF_vector_length = np.linalg.norm(mean_OF_vector)
        print("mean_OF_vector (from LEFT to RIGHT):", mean_OF_vector,
              ";    mean_OF_vector_length:", mean_OF_vector_length)

        # Partition matches to make a distinction between previously triangulated points and non-triangl.
        matches_left_triangl_to_right_FAST = []
        matches_left_non_triangl_to_right_FAST = []
        for trainIdx in matches_by_trainIdx:
            match = matches_by_trainIdx[trainIdx]
            if matches_by_trainIdx[trainIdx].queryIdx in triangl_idxs:
                matches_left_triangl_to_right_FAST.append(match)
            else:
                matches_left_non_triangl_to_right_FAST.append(match)
        # and all matches together
        matches_left_to_right_FAST = matches_left_triangl_to_right_FAST + matches_left_non_triangl_to_right_FAST

        # Visualize (previously triangulated) left_points of corresponding outlier-filtered right_FAST_points
        print("Visualize LEFT  points (previously triangulated)")
        cv2.imshow("img",
                   cv2.drawKeypoints(left_img, [
                       cv2.KeyPoint(left_points[m.queryIdx][0],
                                    left_points[m.queryIdx][1], 7.)
                       for m in matches_left_triangl_to_right_FAST
                   ],
                                     color=blue))  # blue markers with size 7
        cv2.waitKey()

        # Visualize (previously triangulated) outlier-filtered right_FAST_points
        print("Visualize RIGHT points (previously triangulated)")
        cv2.imshow("img",
                   cv2.drawKeypoints(right_img, [
                       cv2.KeyPoint(right_FAST_points[m.trainIdx][0],
                                    right_FAST_points[m.trainIdx][1], 7.)
                       for m in matches_left_triangl_to_right_FAST
                   ],
                                     color=blue))  # blue markers with size 7
        cv2.waitKey()

        # Visualize (not yet triangulated) outlier-filtered right_FAST_points
        print("Visualize LEFT  points (not yet triangulated)")
        cv2.imshow("img",
                   cv2.drawKeypoints(left_img, [
                       cv2.KeyPoint(left_points[m.queryIdx][0],
                                    left_points[m.queryIdx][1], 7.)
                       for m in matches_left_non_triangl_to_right_FAST
                   ],
                                     color=blue))  # blue markers with size 7
        cv2.waitKey()

        # Visualize (not yet triangulated) outlier-filtered right_FAST_points
        print("Visualize RIGHT points (not yet triangulated)")
        cv2.imshow("img",
                   cv2.drawKeypoints(right_img, [
                       cv2.KeyPoint(right_FAST_points[m.trainIdx][0],
                                    right_FAST_points[m.trainIdx][1], 7.)
                       for m in matches_left_non_triangl_to_right_FAST
                   ],
                                     color=blue))  # blue markers with size 7
        cv2.waitKey()

        # Pose estimation and Triangulation
        # ...

        # Update triangl_idxs    TODO: filter using outlier-removal by epipolar constraints
        triangl_idxs = set(matches_by_trainIdx)

        return right_FAST_points, right_gray, triangl_idxs, chessboard_idxs

    ###----------------------------- Frame 0 (init) -----------------------------###

    print("###---------------------- Frame 0 (init) ----------------------###")

    left_img = cv2.imread(images[0])
    left_gray = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
    right_img = cv2.imread(images[1])

    # Detect left (key)points
    ret, left_points = cvh.extractChessboardFeatures(left_img, boardSize)
    if not ret:
        raise Exception("No chessboard features detected.")

    # Set masks to alter matches' priority
    chessboard_idxs = set(range(
        len(left_points)))  # all chessboard corners are in sight
    triangl_idxs = set(
        chessboard_idxs
    )  # the chessboard feature points are already triangulated

    # Invoke main loop
    right_FAST_points, right_gray, triangl_idxs, chessboard_idxs = \
            main_loop(left_points, left_gray, left_img, right_img, triangl_idxs, chessboard_idxs)

    for i in range(2, 14):
        ###----------------------------- Frame i -----------------------------###

        print("###---------------------- Frame %s ----------------------###" %
              i)

        # Update data for new frame
        left_img = right_img
        left_gray = right_gray
        right_img = cv2.imread(images[i])

        # Use previous feature points
        left_points = right_FAST_points

        # Invoke main loop
        right_FAST_points, right_gray, triangl_idxs, chessboard_idxs = \
                main_loop(left_points, left_gray, left_img, right_img, triangl_idxs, chessboard_idxs)