Example #1
0
def check_triangulation_input(base_img, new_img, imgp0, imgp1, rvec_keyfr,
                              tvec_keyfr, rvec, tvec, cameraMatrix,
                              distCoeffs):
    img = cvh.drawKeypointsAndMotion(base_img, imgp1, imgp0, rgb(0, 0, 255))
    draw_axis_system(img, rvec_keyfr, tvec_keyfr, cameraMatrix, distCoeffs)
    print "base_img check"
    cv2.imshow("img", img)
    cv2.waitKey()

    img = cvh.drawKeypointsAndMotion(new_img, imgp0, imgp1, rgb(0, 0, 255))
    draw_axis_system(img, rvec, tvec, cameraMatrix, distCoeffs)
    print "new_img check"
    cv2.imshow("img", img)
    cv2.waitKey()
def check_triangulation_input(base_img, new_img,
                              imgp0, imgp1,
                              rvec_keyfr, tvec_keyfr, rvec, tvec,
                              cameraMatrix, distCoeffs):
    img = cvh.drawKeypointsAndMotion(base_img, imgp1, imgp0, rgb(0,0,255))
    draw_axis_system(img, rvec_keyfr, tvec_keyfr, cameraMatrix, distCoeffs)
    print "base_img check"
    cv2.imshow("img", img)
    cv2.waitKey()
    
    img = cvh.drawKeypointsAndMotion(new_img, imgp0, imgp1, rgb(0,0,255))
    draw_axis_system(img, rvec, tvec, cameraMatrix, distCoeffs)
    print "new_img check"
    cv2.imshow("img", img)
    cv2.waitKey()
def draw_axis_system(img, rvec, tvec, cameraMatrix, distCoeffs):
    axis_system_objp = np.array([ [0., 0., 0.],      # Origin (black)
                                  [4., 0., 0.],      # X-axis (red)
                                  [0., 4., 0.],      # Y-axis (green)
                                  [0., 0., 4.] ])    # Z-axis (blue)
    imgp_reproj, jacob = cv2.projectPoints(
            axis_system_objp, rvec, tvec, cameraMatrix, distCoeffs )
    origin, xAxis, yAxis, zAxis = np.rint(imgp_reproj.reshape(-1, 2)).astype(np.int32)    # round to nearest int
    if not (0 <= origin[0] < img.shape[1] and 0 <= origin[1] < img.shape[0]):    # projected origin lies out of the image
        return img    # so don't draw axis-system
    cvh.line(img, origin, xAxis, rgb(255,0,0), thickness=2, lineType=cv2.CV_AA)
    cvh.line(img, origin, yAxis, rgb(0,255,0), thickness=2, lineType=cv2.CV_AA)
    cvh.line(img, origin, zAxis, rgb(0,0,255), thickness=2, lineType=cv2.CV_AA)
    cvh.circle(img, origin, 4, rgb(0,0,0), thickness=-1)    # filled circle, radius 4
    cvh.circle(img, origin, 5, rgb(255,255,255), thickness=2)    # white 'O', radius 5
    return img
Example #4
0
    def draw(
            self, img, rvec, tvec, status, cameraMatrix, distCoeffs,
            triangl_idxs, nontriangl_idxs, all_idxs_tmp, new_imgp,
            imgp_to_objp_idxs, objp, objp_groups, group_id, color_palette,
            color_palette_size):  # TODO: move these variables to another class
        """
        Draw 2D composite view.
        
        'status' can have the following values:
            0: bad frame
            1: good frame but not a keyframe
            2: good frame and also a keyframe
        """

        # Start drawing on copy of current image
        self.img[:, :, :] = img

        if status:
            # Draw world axis-system
            draw_axis_system(self.img, rvec, tvec, cameraMatrix, distCoeffs)

            # Draw to-be-triangulated point as dots with text indicating depth w.r.t. the camera
            imgp = idxs_get_new_imgp_by_idxs(triangl_idxs, new_imgp,
                                             all_idxs_tmp)
            text_imgp = np.array(imgp)
            text_imgp += [(-15, 10)]  # adjust for text-position
            objp_idxs = imgp_to_objp_idxs[np.array(sorted(triangl_idxs))]
            groups = objp_groups[objp_idxs]
            P = trfm.P_from_R_and_t(cvh.Rodrigues(rvec), tvec)
            objp_depth = trfm.projection_depth(objp[objp_idxs], P)
            objp_colors = color_palette[
                groups % color_palette_size]  # set color by group id
            for ip, ipt, opd, opg, color in zip(imgp, text_imgp, objp_depth,
                                                groups, objp_colors):
                cvh.circle(self.img, ip, 2, color,
                           thickness=-1)  # draw triangulated point
                cvh.putText(self.img, "%.3f" % opd, ipt, fontFace, fontScale,
                            color)  # draw depths

            # Draw to-be-triangulated point as a cross
            nontriangl_imgp = idxs_get_new_imgp_by_idxs(
                nontriangl_idxs, new_imgp, all_idxs_tmp).astype(int)
            color = color_palette[group_id % color_palette_size]
            for p in nontriangl_imgp:
                cvh.line(self.img, (p[0] - 2, p[1]), (p[0] + 2, p[1]), color)
                cvh.line(self.img, (p[0], p[1] - 2), (p[0], p[1] + 2), color)

        else:
            # Draw red corner around image: it's a bad frame
            for (p1, p2) in self.image_box:
                cvh.line(self.img, p1, p2, rgb(255, 0, 0), thickness=4)

        # Display image
        cv2.imshow(self.img_title, self.img)
        cv2.waitKey()
Example #5
0
def draw_axis_system(img, rvec, tvec, cameraMatrix, distCoeffs):
    axis_system_objp = np.array([
        [0., 0., 0.],  # Origin (black)
        [4., 0., 0.],  # X-axis (red)
        [0., 4., 0.],  # Y-axis (green)
        [0., 0., 4.]
    ])  # Z-axis (blue)
    imgp_reproj, jacob = cv2.projectPoints(axis_system_objp, rvec, tvec,
                                           cameraMatrix, distCoeffs)
    origin, xAxis, yAxis, zAxis = np.rint(imgp_reproj.reshape(-1, 2)).astype(
        np.int32)  # round to nearest int
    if not (0 <= origin[0] < img.shape[1] and 0 <= origin[1] < img.shape[0]
            ):  # projected origin lies out of the image
        return img  # so don't draw axis-system
    cvh.line(img,
             origin,
             xAxis,
             rgb(255, 0, 0),
             thickness=2,
             lineType=cv2.CV_AA)
    cvh.line(img,
             origin,
             yAxis,
             rgb(0, 255, 0),
             thickness=2,
             lineType=cv2.CV_AA)
    cvh.line(img,
             origin,
             zAxis,
             rgb(0, 0, 255),
             thickness=2,
             lineType=cv2.CV_AA)
    cvh.circle(img, origin, 4, rgb(0, 0, 0),
               thickness=-1)  # filled circle, radius 4
    cvh.circle(img, origin, 5, rgb(255, 255, 255),
               thickness=2)  # white 'O', radius 5
    return img
        def run(self):
            print("Select your matches. Press SPACE when done.")
            while True:
                img = cv2.drawKeypoints(self.images[self.img_idx], [
                    cv2.KeyPoint(p[0], p[1], 7.)
                    for p in self.points[self.img_idx]
                ],
                                        color=rgb(0, 0, 255))
                cv2.imshow(self.window_name, img)
                key = cv2.waitKey(50) & 0xFF
                if key == ord(' '): break  # finish if SPACE is pressed

            num_points_diff = len(self.points[0]) - len(self.points[1])
            if num_points_diff:
                del self.points[num_points_diff < 0][-abs(num_points_diff):]
            print("Selected", len(self.points[0]), "pairs of matches.")
 def draw(self, img, rvec, tvec, status,
         cameraMatrix, distCoeffs, triangl_idxs, nontriangl_idxs, all_idxs_tmp, new_imgp, imgp_to_objp_idxs, objp, objp_groups, group_id, color_palette, color_palette_size):    # TODO: move these variables to another class
     """
     Draw 2D composite view.
     
     'status' can have the following values:
         0: bad frame
         1: good frame but not a keyframe
         2: good frame and also a keyframe
     """
     
     # Start drawing on copy of current image
     self.img[:, :, :] = img
     
     if status:
         # Draw world axis-system
         draw_axis_system(self.img, rvec, tvec, cameraMatrix, distCoeffs)
         
         # Draw to-be-triangulated point as dots with text indicating depth w.r.t. the camera
         imgp = idxs_get_new_imgp_by_idxs(triangl_idxs, new_imgp, all_idxs_tmp)
         text_imgp = np.array(imgp)
         text_imgp += [(-15, 10)]    # adjust for text-position
         objp_idxs = imgp_to_objp_idxs[np.array(sorted(triangl_idxs))]
         groups = objp_groups[objp_idxs]
         P = trfm.P_from_R_and_t(cvh.Rodrigues(rvec), tvec)
         objp_depth = trfm.projection_depth(objp[objp_idxs], P)
         objp_colors = color_palette[groups % color_palette_size]    # set color by group id
         for ip, ipt, opd, opg, color in zip(imgp, text_imgp, objp_depth, groups, objp_colors):
             cvh.circle(self.img, ip, 2, color, thickness=-1)    # draw triangulated point
             cvh.putText(self.img, "%.3f" % opd, ipt, fontFace, fontScale, color)    # draw depths
         
         # Draw to-be-triangulated point as a cross
         nontriangl_imgp = idxs_get_new_imgp_by_idxs(nontriangl_idxs, new_imgp, all_idxs_tmp).astype(int)
         color = color_palette[group_id % color_palette_size]
         for p in nontriangl_imgp:
             cvh.line(self.img, (p[0]-2, p[1]), (p[0]+2, p[1]), color)
             cvh.line(self.img, (p[0], p[1]-2), (p[0], p[1]+2), color)
     
     else:
         # Draw red corner around image: it's a bad frame
         for (p1, p2) in self.image_box:
             cvh.line(self.img, p1, p2, rgb(255,0,0), thickness=4)
     
     # Display image
     cv2.imshow(self.img_title, self.img)
     cv2.waitKey()
def triangl_pose_est_interactive(img_left, img_right, cameraMatrix, distCoeffs, imageSize, objp, boardSize, nonplanar_left, nonplanar_right):
    """ (TODO: remove debug-prints)
    Triangulation and relative pose estimation will be performed from LEFT to RIGHT image.
    
    Both images have to contain the whole chessboard,
    in order to make a decent estimation of the relative pose based on 'solvePnP'.
    
    Then the user can manually create matches between non-planar objects.
    If the user omits this step, only triangulation of the chessboard corners will be performed,
    and they will be compared to the real 3D points.
    Otherwise the coordinates of the triangulated points of the manually matched points will be printed,
    and a relative pose estimation will be performed (using the essential matrix),
    this pose estimation will be compared with the decent 'solvePnP' estimation.
    In that case you will be asked whether you want to mute the chessboard corners in all calculations,
    note that this requires at least 8 pairs of matches corresponding with non-planar geometry.
    
    During manual matching process,
    switching between LEFT and RIGHT image will be done in a zigzag fashion.
    To stop selecting matches, press SPACE.
    
    Additionally, you can also introduce predefined non-planar geometry,
    this is e.g. useful if you don't want to select non-planar geometry manually.
    """
    
    ### Setup initial state, and calc some very accurate poses to compare against with later
    
    K_left = cameraMatrix
    K_right = cameraMatrix
    K_inv_left = cvh.invert(K_left)
    K_inv_right = cvh.invert(K_right)
    distCoeffs_left = distCoeffs
    distCoeffs_right = distCoeffs
    
    # Extract chessboard features, together they form a set of 'planar' points
    ret_left, planar_left = cvh.extractChessboardFeatures(img_left, boardSize)
    ret_right, planar_right = cvh.extractChessboardFeatures(img_right, boardSize)
    if not ret_left or not ret_right:
        print ("Chessboard is not (entirely) in sight, aborting.")
        return
    
    # Save exact 2D and 3D points of the chessboard
    num_planar = planar_left.shape[0]
    planar_left_orig, planar_right_orig = planar_left, planar_right
    objp_orig = objp
    
    # Calculate P matrix of left pose, in reality this one is known
    ret, rvec_left, tvec_left = cv2.solvePnP(
            objp, planar_left, K_left, distCoeffs_left )
    P_left = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_left), tvec_left)
    
    # Calculate P matrix of right pose, in reality this one is unknown
    ret, rvec_right, tvec_right = cv2.solvePnP(
            objp, planar_right, K_right, distCoeffs_right )
    P_right = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_right), tvec_right)
    
    # Load previous non-planar inliers, if desired
    if nonplanar_left.size and not raw_input("Type 'no' if you don't want to use previous non-planar inliers? ").strip().lower() == "no":
        nonplanar_left = map(tuple, nonplanar_left)
        nonplanar_right = map(tuple, nonplanar_left)
    else:
        nonplanar_left = []
        nonplanar_right = []
    
    
    ### Create predefined non-planar 3D geometry, to enable automatic selection of non-planar points
    
    if raw_input("Type 'yes' if you want to create and select predefined non-planar geometry? ").strip().lower() == "yes":
        
        # A cube
        cube_coords = np.array([(0., 0., 0.), (1., 0., 0.), (1., 1., 0.), (0., 1., 0.),
                                (0., 0., 1.), (1., 0., 1.), (1., 1., 1.), (0., 1., 1.)])
        cube_coords *= 2    # scale
        cube_edges = np.array([(0, 1), (1, 2), (2, 3), (3, 0),
                               (4, 5), (5, 6), (6, 7), (7, 4),
                               (0, 4), (1, 5), (2, 6), (3, 7)])
        
        # An 8-point circle
        s2 = 1. / np.sqrt(2)
        circle_coords = np.array([( 1.,  0., 0.), ( s2,  s2, 0.),
                                  ( 0.,  1., 0.), (-s2,  s2, 0.),
                                  (-1.,  0., 0.), (-s2, -s2, 0.),
                                  ( 0., -1., 0.), ( s2, -s2, 0.)])
        circle_edges = np.array([(i, (i+1) % 8) for i in range(8)])
        
        # Position 2 cubes and 2 circles in the scene
        cube1 = np.array(cube_coords)
        cube1[:, 1] -= 1
        cube2 = np.array(cube_coords)
        cube2 = cvh.Rodrigues((0., 0., pi/4)).dot(cube2.T).T
        cube2[:, 0] += 4
        cube2[:, 1] += 3
        cube2[:, 2] += 2
        circle1 = np.array(circle_coords)
        circle1 *= 2
        circle1[:, 1] += 5
        circle1[:, 2] += 2
        circle2 = np.array(circle_coords)
        circle2 = cvh.Rodrigues((pi/2, 0., 0.)).dot(circle2.T).T
        circle2[:, 1] += 5
        circle2[:, 2] += 2
        
        # Print output to be used in Blender (see "calibrate_pose_visualization.blend")
        print ()
        print ("Cubes")
        print ("edges_cube = \\\n", map(list, cube_edges))
        print ("coords_cube1 = \\\n", map(list, cube1))
        print ("coords_cube2 = \\\n", map(list, cube2))
        print ()
        print ("Circles")
        print ("edges_circle = \\\n", map(list, circle_edges))
        print ("coords_circle1 = \\\n", map(list, circle1))
        print ("coords_circle2 = \\\n", map(list, circle2))
        print ()
        
        color = rgb(0, 200, 150)
        for verts, edges in zip([cube1, cube2, circle1, circle2],
                                [cube_edges, cube_edges, circle_edges, circle_edges]):
            out_left = cvh.wireframe3DGeometry(
                    img_left, verts, edges, color, rvec_left, tvec_left, K_left, distCoeffs_left )
            out_right = cvh.wireframe3DGeometry(
                    img_right, verts, edges, color, rvec_right, tvec_right, K_right, distCoeffs_right )
            valid_match_idxs = [i for i, (pl, pr) in enumerate(zip(out_left, out_right))
                                    if 0 <= min(pl[0], pr[0]) <= max(pl[0], pr[0]) < imageSize[0] and 
                                        0 <= min(pl[1], pr[1]) <= max(pl[1], pr[1]) < imageSize[1]
                                ]
            nonplanar_left += map(tuple, out_left[valid_match_idxs])    # concatenate
            nonplanar_right += map(tuple, out_right[valid_match_idxs])    # concatenate
        
        nonplanar_left = np.rint(nonplanar_left).astype(int)
        nonplanar_right = np.rint(nonplanar_right).astype(int)
    
    
    ### User can manually create matches between non-planar objects
    
    class ManualMatcher:
        def __init__(self, window_name, images, points):
            self.window_name = window_name
            self.images = images
            self.points = points
            self.img_idx = 0    # 0: left; 1: right
            cv2.namedWindow(window_name)
            cv2.setMouseCallback(window_name, self.onMouse)
        
        def onMouse(self, event, x, y, flags, userdata):
            if event == cv2.EVENT_LBUTTONDOWN:
                self.points[self.img_idx].append((x, y))
            
            elif event == cv2.EVENT_LBUTTONUP:
                # Switch images in a ping-pong fashion
                if len(self.points[0]) != len(self.points[1]):
                    self.img_idx = 1 - self.img_idx
        
        def run(self):
            print ("Select your matches. Press SPACE when done.")
            while True:
                img = cv2.drawKeypoints(self.images[self.img_idx], [cv2.KeyPoint(p[0],p[1], 7.) for p in self.points[self.img_idx]], color=rgb(0,0,255))
                cv2.imshow(self.window_name, img)
                key = cv2.waitKey(50) & 0xFF
                if key == ord(' '): break    # finish if SPACE is pressed
            
            num_points_diff = len(self.points[0]) - len(self.points[1])
            if num_points_diff:
                del self.points[num_points_diff < 0][-abs(num_points_diff):]
            print ("Selected", len(self.points[0]), "pairs of matches.")
    
    # Execute the manual matching
    nonplanar_left, nonplanar_right = list(nonplanar_left), list(nonplanar_right)
    ManualMatcher("Select match-points of non-planar objects", [img_left, img_right], [nonplanar_left, nonplanar_right]).run()
    num_nonplanar = len(nonplanar_left)
    has_nonplanar = (num_nonplanar > 0)
    if 0 < num_nonplanar < 8:
        print ("Warning: you've selected less than 8 pairs of matches.")
    nonplanar_left = np.array(nonplanar_left).reshape(num_nonplanar, 2)
    nonplanar_right = np.array(nonplanar_right).reshape(num_nonplanar, 2)
    
    mute_chessboard_corners = False
    if num_nonplanar >= 8 and raw_input("Type 'yes' if you want to exclude chessboard corners from calculations? ").strip().lower() == "yes":
        mute_chessboard_corners = True
        num_planar = 0
        planar_left = np.zeros((0, 2))
        planar_right = np.zeros((0, 2))
        objp = np.zeros((0, 3))
        print ("Chessboard corners muted.")
    
    has_prev_triangl_points = not mute_chessboard_corners    # normally in this example, this should always be True, unless user forces False
    
    
    ### Undistort points => normalized coordinates
    
    allfeatures_left = np.concatenate((planar_left, nonplanar_left))
    allfeatures_right = np.concatenate((planar_right, nonplanar_right))
    
    allfeatures_nrm_left = cv2.undistortPoints(np.array([allfeatures_left]), K_left, distCoeffs_left)[0]
    allfeatures_nrm_right = cv2.undistortPoints(np.array([allfeatures_right]), K_right, distCoeffs_right)[0]
    
    planar_nrm_left, nonplanar_nrm_left = allfeatures_nrm_left[:planar_left.shape[0]], allfeatures_nrm_left[planar_left.shape[0]:]
    planar_nrm_right, nonplanar_nrm_right = allfeatures_nrm_right[:planar_right.shape[0]], allfeatures_nrm_right[planar_right.shape[0]:]
    
    
    ### Calculate relative pose using the essential matrix
    
    # Only do pose estimation if we've got 2D points of non-planar geometry
    if has_nonplanar:
    
        # Determine inliers by calculating the fundamental matrix on all points,
        # except when mute_chessboard_corners is True: only use nonplanar_nrm_left and nonplanar_nrm_right
        F, status = cv2.findFundamentalMat(allfeatures_nrm_left, allfeatures_nrm_right, cv2.FM_RANSAC, 0.006 * np.amax(allfeatures_nrm_left), 0.99)    # threshold from [Snavely07 4.1]
        # OpenCV BUG: "status" matrix is not initialized with zeros in some cases, reproducable with 2 sets of 8 2D points equal to (0., 0.)
        #   maybe because "_mask.create()" is not called:
        #   https://github.com/Itseez/opencv/blob/7e2bb63378dafb90063af40caff20c363c8c9eaf/modules/calib3d/src/ptsetreg.cpp#L185
        # Workaround to test on outliers: use "!= 1", instead of "== 0"
        print (status.T)
        inlier_idxs = np.where(status == 1)[0]
        print ("Removed", allfeatures_nrm_left.shape[0] - inlier_idxs.shape[0], "outlier(s).")
        num_planar = np.where(inlier_idxs < num_planar)[0].shape[0]
        num_nonplanar = inlier_idxs.shape[0] - num_planar
        print ("num chessboard inliers:", num_planar)
        allfeatures_left, allfeatures_right = allfeatures_left[inlier_idxs], allfeatures_right[inlier_idxs]
        allfeatures_nrm_left, allfeatures_nrm_right = allfeatures_nrm_left[inlier_idxs], allfeatures_nrm_right[inlier_idxs]
        if not mute_chessboard_corners:
            objp = objp[inlier_idxs[:num_planar]]
            planar_left, planar_right = allfeatures_left[:num_planar], allfeatures_right[:num_planar]
            planar_nrm_left, planar_nrm_right = allfeatures_nrm_left[:num_planar], allfeatures_nrm_right[:num_planar]
        nonplanar_nrm_left, nonplanar_nrm_right = allfeatures_nrm_left[num_planar:], allfeatures_nrm_right[num_planar:]
        
        # Calculate first solution of relative pose
        if allfeatures_nrm_left.shape[0] >= 8:
            F, status = cv2.findFundamentalMat(allfeatures_nrm_left, allfeatures_nrm_right, cv2.FM_8POINT)
            print ("F:")
            print (F)
        else:
            print ("Error: less than 8 pairs of inliers found, I can't perform the 8-point algorithm.")
        #E = (K_right.T) .dot (F) .dot (K_left)    # "Multiple View Geometry in CV" by Hartley&Zisserman (9.12)
        E = F    # K = I because we already normalized the coordinates
        print ("Correct determinant of essential matrix?", (abs(cv2.determinant(E)) <= 1e-7))
        w, u, vt = cv2.SVDecomp(E, flags=cv2.SVD_MODIFY_A)
        print (w)
        if ((w[0] < w[1] and w[0]/w[1]) or (w[1] < w[0] and w[1]/w[0]) or 0) < 0.7:
            print ("Essential matrix' 'w' vector deviates too much from expected")
        W = np.array([[0., -1., 0.],    # Hartley&Zisserman (9.13)
                      [1.,  0., 0.],
                      [0.,  0., 1.]])
        R = (u) .dot (W) .dot (vt)    # Hartley&Zisserman result 9.19
        det_R = cv2.determinant(R)
        print ("Coherent rotation?", (abs(det_R) - 1 <= 1e-7))
        if det_R - (-1) < 1e-7:    # http://en.wikipedia.org/wiki/Essential_matrix#Showing_that_it_is_valid
            # E *= -1:
            vt *= -1    # svd(-E) = u * w * (-v).T
            R *= -1     # => u * W * (-v).T = -R
            print ("det(R) == -1, compensated.")
        t = u[:, 2:3]    # Hartley&Zisserman result 9.19
        P = trfm.P_from_R_and_t(R, t)
        
        # Select right solution where the (center of the) 3d points are/is in front of both cameras,
        # when has_prev_triangl_points == True, we can do it a lot faster.
        
        test_point = np.ones((4, 1))    # 3D point to test the solutions with
        
        if has_prev_triangl_points:
            print ("Using advantage of already triangulated points' position")
            print (P)
            
            # Select the closest already triangulated cloudpoint idx to the center of the cloud
            center_of_mass = objp.sum(axis=0) / objp.shape[0]
            center_objp_idx = np.argmin(((objp - center_of_mass)**2).sum(axis=1))
            print ("center_objp_idx:", center_objp_idx)
            
            # Select the corresponding image points
            center_imgp_left = planar_nrm_left[center_objp_idx]
            center_imgp_right = planar_nrm_right[center_objp_idx]
            
            # Select the corresponding 3D point
            test_point[0:3, :] = objp[center_objp_idx].reshape(3, 1)
            print ("test_point:")
            print (test_point)
            test_point = P_left.dot(test_point)    # set the reference axis-system to the one of the left camera, note that are_points_in_front_of_left_camera is automatically True
            
            center_objp_triangl, triangl_status = iterative_LS_triangulation(
                    center_imgp_left.reshape(1, 2), np.eye(4),
                    center_imgp_right.reshape(1, 2), P )
            print ("triangl_status:", triangl_status)
            
            if (center_objp_triangl) .dot (test_point[0:3, 0:1]) < 0:
                P[0:3, 3:4] *= -1    # do a baseline reversal
                print (P, "fixed triangulation inversion")
            
            if P[0:3, :].dot(test_point)[2, 0] < 0:    # are_points_in_front_of_right_camera is False
                P[0:3, 0:3] = (u) .dot (W.T) .dot (vt)    # use the other solution of the twisted pair ...
                P[0:3, 3:4] *= -1    # ... and also do a baseline reversal
                print (P, "fixed camera projection inversion")
        
        elif num_nonplanar > 0:
            print ("Doing all ambiguity checks since there are no already triangulated points")
            print (P)
            
            for i in range(4):    # check all 4 solutions
                objp_triangl, triangl_status = iterative_LS_triangulation(
                        nonplanar_nrm_left, np.eye(4),
                        nonplanar_nrm_right, P )
                print ("triangl_status:", triangl_status)
                center_of_mass = objp_triangl.sum(axis=0) / len(objp_triangl)    # select the center of the triangulated cloudpoints
                test_point[0:3, :] = center_of_mass.reshape(3, 1)
                print ("test_point:")
                print (trfm.P_inv(P_left) .dot (test_point))
                
                if np.eye(3, 4).dot(test_point)[2, 0] > 0 and P[0:3, :].dot(test_point)[2, 0] > 0:    # are_points_in_front_of_cameras is True
                    break
                
                if i % 2:
                    P[0:3, 0:3] = (u) .dot (W.T) .dot (vt)   # use the other solution of the twisted pair
                    print (P, "using the other solution of the twisted pair")
                
                else:
                    P[0:3, 3:4] *= -1    # do a baseline reversal
                    print (P, "doing a baseline reversal")
        
        
        are_points_in_front_of_left_camera = (np.eye(3, 4).dot(test_point)[2, 0] > 0)
        are_points_in_front_of_right_camera = (P[0:3, :].dot(test_point)[2, 0] > 0)
        print ("are_points_in_front_of_cameras?", are_points_in_front_of_left_camera, are_points_in_front_of_right_camera)
        if not (are_points_in_front_of_left_camera and are_points_in_front_of_right_camera):
            print ("No valid solution found!")
        
        P_left_result = trfm.P_inv(P).dot(P_right)
        P_right_result = P.dot(P_left)
        
        print ("P_left")
        print (P_left)
        print ("P_rel")
        print (P)
        print ("P_right")
        print (P_right)
        print ("=> error:", reprojection_error(
                [objp_orig] * 2, [planar_left_orig, planar_right_orig],
                cameraMatrix, distCoeffs,
                [rvec_left, rvec_right],
                [tvec_left, tvec_right] )[1])
        
        print ("P_left_result")
        print (P_left_result)
        print ("P_right_result")
        print (P_right_result)
        # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
        print ("=> error:", reprojection_error(
                [objp_orig] * 2, [planar_left_orig, planar_right_orig],
                cameraMatrix, distCoeffs,
                [cvh.Rodrigues(P_left[0:3, 0:3]), cvh.Rodrigues(P_right_result[0:3, 0:3])],
                [P_left[0:3, 3], P_right_result[0:3, 3]] )[1])
    
    
    ### Triangulate
    
    objp_result = np.zeros((0, 3))
    
    if has_prev_triangl_points:
        # Do triangulation of all points
        # NOTICE: in a real case, we should only use not yet triangulated points that are in sight
        objp_result, triangl_status = iterative_LS_triangulation(
                allfeatures_nrm_left, P_left,
                allfeatures_nrm_right, P_right )
        print ("triangl_status:", triangl_status)
    
    elif num_nonplanar > 0:
        # We already did the triangulation during the pose estimation, but we still need to backtransform them from the left camera axis-system
        objp_result = trfm.P_inv(P_left) .dot (np.concatenate((objp_triangl.T, np.ones((1, len(objp_triangl))))))
        objp_result = objp_result[0:3, :].T
        print (objp_triangl)
    
    print ("objp:")
    print (objp)
    print ("=> error:", reprojection_error(
            [objp_orig] * 2,
            [planar_left_orig, planar_right_orig],
            cameraMatrix, distCoeffs, [rvec_left, rvec_right], [tvec_left, tvec_right] )[1])
    
    print ("objp_result of chessboard:")
    print (objp_result[:num_planar, :])
    if has_nonplanar:
        print ("objp_result of non-planar geometry:")
        print (objp_result[num_planar:, :])
    if num_planar + num_nonplanar == 0:
        print ("=> error: undefined")
    else:
        print ("=> error:", reprojection_error(
                [objp_result] * 2,
                [allfeatures_left, allfeatures_right],
                cameraMatrix, distCoeffs, [rvec_left, rvec_right], [tvec_left, tvec_right] )[1])
    
    
    ### Print total combined reprojection error
    
    # We only have both pose estimation and triangulation if we've got 2D points of non-planar geometry
    if has_nonplanar:
        if num_planar + num_nonplanar == 0:
            print ("=> error: undefined")
        else:
            # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
            print ("Total combined error:", reprojection_error(
                    [objp_result] * 2,
                    [allfeatures_left, allfeatures_right],
                    cameraMatrix, distCoeffs,
                    [cvh.Rodrigues(P_left[0:3, 0:3]), cvh.Rodrigues(P_right_result[0:3, 0:3])],
                    [P_left[0:3, 3], P_right_result[0:3, 3]] )[1])
    
    
    """
    Further things we can do (in a real case):
        1. solvePnP() on inliers (including previously already triangulated points),
            or should we use solvePnPRansac() (in that case what to do with the outliers)?
        2. re-triangulate on new pose estimation
    """
    
    
    ### Print summary to be used in Blender to visualize (see "calibrate_pose_visualization.blend")
    
    print ("Camera poses:")
    if has_nonplanar:
        print ("Left")
        print_pose(rvec_left, tvec_left)
        print ()
        print ("Left_result")
        print_pose(cvh.Rodrigues(P_left_result[0:3, 0:3]), P_left_result[0:3, 3:4])
        print ()
        print ("Right")
        print_pose(rvec_right, tvec_right)
        print ()
        print ("Right_result")
        print_pose(cvh.Rodrigues(P_right_result[0:3, 0:3]), P_right_result[0:3, 3:4])
        print ()
    else:
        print ("<skipped: no non-planar objects have been selected>")
        print ()
    
    print ("Points:")
    print ("Chessboard")
    print ("coords = \\\n", map(list, objp_result[:num_planar, :]))
    print ()
    if has_nonplanar:
        print ("Non-planar geometry")
        print ("coords_nonplanar = \\\n", map(list, objp_result[num_planar:, :]))
        print ()
    
    ### Return to remember last manually matched successful non-planar imagepoints
    return nonplanar_left, nonplanar_right
Example #9
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function  # Python 3 compatibility

import os
import numpy as np
import glob
import cv2

import sys

sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "..", "python_libs"))
import cv2_helpers as cvh
import calibration_tools

blue = cvh.rgb(0, 0, 255)


# Tweaking parameters
max_OF_error = 12.0
max_radius_OF_to_FAST = {}
max_dist_ratio = {}
allow_chessboard_matcher_and_refiner = True

# optimized for chessboard features
max_radius_OF_to_FAST["chessboard"] = 4.0  # FAST detects points *around* instead of *on* corners
max_dist_ratio["chessboard"] = 1.0  # disable ratio test

# optimized for FAST features
max_radius_OF_to_FAST["FAST"] = 2.0
max_dist_ratio["FAST"] = 0.7
Example #10
0
def main():
    global boardSize
    global cameraMatrix, distCoeffs, imageSize
    global max_OF_error, max_lost_tracks_ratio
    global keypoint_coverage_radius  #, min_keypoint_coverage
    global target_amount_keypoints, corner_quality_level, corner_min_dist
    global homography_condition_threshold
    global max_solvePnP_reproj_error, max_2nd_solvePnP_reproj_error, max_fundMat_reproj_error
    global max_solvePnP_outlier_ratio, max_2nd_solvePnP_outlier_ratio, solvePnP_use_extrinsic_guess

    # Initially known data
    boardSize = (8, 6)
    color_palette, color_palette_size = prepare_color_palette(
        2, 3, 4)  # used to identify 3D point group ids

    cameraMatrix, distCoeffs, imageSize = \
            load_camera_intrinsics(os.path.join("..", "..", "datasets", "webcam", "camera_intrinsics.txt"))
    #cameraMatrix, distCoeffs, imageSize = \
    #load_camera_intrinsics(os.path.join("..", "..", "..", "ARDrone2_tests", "camera_calibration", "live_video", "camera_intrinsics_front.txt"))

    # Select working (or 'testing') set
    from glob import glob
    images = sorted(
        glob(
            os.path.join("..", "..", "datasets", "webcam", "captures2",
                         "*.jpeg")))
    #images = sorted(glob(os.path.join("..", "..", "..", "ARDrone2_tests", "flying_front", "lowres", "drone0", "*.jpg")))

    # Setup some visualization helpers
    composite2D_painter = Composite2DPainter("composite 2D", imageSize)
    composite3D_painter = Composite3DPainter(
        "composite 3D",
        trfm.P_from_R_and_t(cvh.Rodrigues((pi, 0., 0.)),
                            np.array([[0., 0., 40.]]).T), (1280, 720))

    ### Tweaking parameters ###
    # OF calculation
    max_OF_error = 12.
    max_lost_tracks_ratio = 0.5
    # keypoint_coverage
    keypoint_coverage_radius = int(max_OF_error)
    #min_keypoint_coverage = 0.2
    # goodFeaturesToTrack
    target_amount_keypoints = int(
        round(
            (imageSize[0] * imageSize[1]) /
            (pi * keypoint_coverage_radius**2)))  # target is entire image full
    print "target_amount_keypoints:", target_amount_keypoints
    corner_quality_level = 0.01
    corner_min_dist = keypoint_coverage_radius
    # keyframe_test
    homography_condition_threshold = 500  # defined as ratio between max and min singular values
    # reprojection error
    max_solvePnP_reproj_error = 4.  #0.5    # TODO: revert to a lower number
    max_2nd_solvePnP_reproj_error = max_solvePnP_reproj_error / 2  # be more strict in a 2nd iteration, used after 1st pass of triangulation
    max_fundMat_reproj_error = 2.0
    # solvePnP
    max_solvePnP_outlier_ratio = 0.3
    max_2nd_solvePnP_outlier_ratio = 1.  # used in 2nd iteration, after 1st pass of triangulation
    solvePnP_use_extrinsic_guess = False  # TODO: set to True and see whether the 3D results are better

    # Init

    imgs = []
    imgs_gray = []

    objp = []  # 3D points
    objp_groups = [
    ]  # 3D point group ids, each new batch of detected points is put in a separate group
    group_id = 0  # current 3D point group id

    rvecs, rvecs_keyfr = [], []
    tvecs, tvecs_keyfr = [], []

    # Start frame requires special treatment

    # Start frame : read image and detect 2D points
    imgs.append(cv2.imread(images[0]))
    base_img = imgs[0]
    imgs_gray.append(cv2.cvtColor(imgs[0], cv2.COLOR_BGR2GRAY))
    ret, new_imgp = cvh.extractChessboardFeatures(imgs[0], boardSize)
    if not ret:
        print "First image must contain the entire chessboard!"
        return

    # Start frame : define a priori 3D points
    objp = prepare_object_points(boardSize)
    objp_groups = np.zeros((objp.shape[0]), dtype=np.int)
    group_id += 1

    # Start frame : setup linking data-structures
    base_imgp = new_imgp  # 2D points
    all_idxs_tmp = np.arange(len(base_imgp))
    triangl_idxs = set(all_idxs_tmp)
    nontriangl_idxs = set()
    imgp_to_objp_idxs = np.array(sorted(triangl_idxs), dtype=int)

    # Start frame : get absolute pose
    ret, rvec, tvec = cv2.solvePnP(  # assume first frame is a proper frame with chessboard fully in-sight
        objp, new_imgp, cameraMatrix, distCoeffs)
    print "solvePnP reproj_error init:", reprojection_error(
        objp, new_imgp, rvec, tvec, cameraMatrix, distCoeffs)[0]
    rvecs.append(rvec)
    tvecs.append(tvec)
    rvec_keyfr = rvec
    tvec_keyfr = tvec
    rvecs_keyfr.append(rvec_keyfr)
    tvecs_keyfr.append(tvec_keyfr)

    # Start frame : add other points
    mask_img = keypoint_mask(new_imgp)
    to_add = target_amount_keypoints - len(new_imgp)
    imgp_extra = cv2.goodFeaturesToTrack(imgs_gray[0], to_add,
                                         corner_quality_level, corner_min_dist,
                                         None, mask_img).reshape((-1, 2))
    cv2.imshow(
        "img",
        cv2.drawKeypoints(imgs[0],
                          [cv2.KeyPoint(p[0], p[1], 7.) for p in imgp_extra],
                          color=rgb(0, 0, 255)))
    cv2.waitKey()
    print "added:", len(imgp_extra)
    base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_rebase_and_add_imgp(
        imgp_extra, base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs,
        nontriangl_idxs, all_idxs_tmp)
    ret = 2  # indicate keyframe

    # Draw 3D points info of current frame
    print "Drawing composite 2D image"
    composite2D_painter.draw(imgs[0], rvec, tvec, ret, cameraMatrix,
                             distCoeffs, triangl_idxs, nontriangl_idxs,
                             all_idxs_tmp, new_imgp, imgp_to_objp_idxs, objp,
                             objp_groups, group_id, color_palette,
                             color_palette_size)

    # Draw 3D points info of all frames
    print "Drawing composite 3D image    (keys: LEFT/RIGHT/UP/DOWN/PAGEUP/PAGEDOWN/HOME/END)"
    composite3D_painter.draw(rvec, tvec, ret, triangl_idxs, imgp_to_objp_idxs,
                             objp, objp_groups, color_palette,
                             color_palette_size)

    for i in range(1, len(images)):
        # Frame[i-1] -> Frame[i]
        print "\nFrame[%s] -> Frame[%s]" % (i - 1, i)
        print "    processing '", images[i], "':"
        cur_img = cv2.imread(images[i])
        imgs.append(cur_img)
        imgs_gray.append(cv2.cvtColor(imgs[-1], cv2.COLOR_BGR2GRAY))
        ret, base_imgp, new_imgp, triangl_idxs, nontriangl_idxs, imgp_to_objp_idxs, all_idxs_tmp, objp, objp_groups, group_id, rvec, tvec, rvec_keyfr, tvec_keyfr, base_img = \
                handle_new_frame(base_imgp, new_imgp, imgs[-2], imgs_gray[-2], imgs[-1], imgs_gray[-1], triangl_idxs, nontriangl_idxs, imgp_to_objp_idxs, all_idxs_tmp, objp, objp_groups, group_id, rvec_keyfr, tvec_keyfr, base_img)

        if ret:
            rvecs.append(rvec)
            tvecs.append(tvec)
            if ret == 2:  # frame is a keyframe
                rvecs_keyfr.append(rvec_keyfr)
                tvecs_keyfr.append(tvec_keyfr)
        else:  # frame rejected
            del imgs[-1]
            del imgs_gray[-1]

        # Draw 3D points info of current frame
        print "Drawing composite 2D image"
        composite2D_painter.draw(cur_img, rvec, tvec, ret, cameraMatrix,
                                 distCoeffs, triangl_idxs, nontriangl_idxs,
                                 all_idxs_tmp, new_imgp, imgp_to_objp_idxs,
                                 objp, objp_groups, group_id, color_palette,
                                 color_palette_size)

        # Draw 3D points info of all frames
        print "Drawing composite 3D image    (keys: LEFT/RIGHT/UP/DOWN/PAGEUP/PAGEDOWN)"
        composite3D_painter.draw(rvec, tvec, ret, triangl_idxs,
                                 imgp_to_objp_idxs, objp, objp_groups,
                                 color_palette, color_palette_size)
Example #11
0
def handle_new_frame(
        base_imgp,  # includes 2D points of both triangulated as not-yet triangl points of last keyframe
        prev_imgp,  # includes 2D points of last frame
        prev_img,
        prev_img_gray,
        new_img,
        new_img_gray,
        triangl_idxs,
        nontriangl_idxs,  # indices of 2D points in base_imgp
        imgp_to_objp_idxs,  # indices from 2D points in base_imgp to 3D points in objp
        all_idxs_tmp,  # list of idxs of 2D points in base_imgp, matches prev_imgp to base_imgp
        objp,  # triangulated 3D points
        objp_groups,
        group_id,  # corresponding group ids of triangulated 3D points, and current group id
        rvec_keyfr,
        tvec_keyfr,  # rvec and tvec of last keyframe
        base_img):  # used for debug

    # Save initial indexing state
    triangl_idxs_old = set(triangl_idxs)
    nontriangl_idxs_old = set(nontriangl_idxs)
    all_idxs_tmp_old = np.array(all_idxs_tmp)

    # Calculate OF (Optical Flow), and filter outliers based on OF error
    new_imgp, status_OF, err_OF = cv2.calcOpticalFlowPyrLK(
        prev_img_gray, new_img_gray, prev_imgp
    )  # WARNING: OpenCV can output corrupted values in 'status_OF': "RuntimeWarning: invalid value encountered in less"
    new_to_prev_idxs = np.where(
        np.logical_and((status_OF.reshape(-1) == 1),
                       (err_OF.reshape(-1) < max_OF_error)))[0]

    # If there is too much OF error in the entire image, simply reject the frame
    lost_tracks_ratio = (len(prev_imgp) - len(new_to_prev_idxs)) / float(
        len(prev_imgp))
    print "# points lost because of excessive OF error / # points before: ", len(
        prev_imgp) - len(new_to_prev_idxs), "/", len(
            prev_imgp), "=", lost_tracks_ratio
    if lost_tracks_ratio > max_lost_tracks_ratio:  # reject frame
        print "REJECTED: I lost track of all points!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img

    # Save matches by idxs
    preserve_idxs = set(all_idxs_tmp[new_to_prev_idxs])
    triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_update_by_idxs(
        preserve_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp)
    if len(triangl_idxs) < 8:  # solvePnP uses 8-point algorithm
        print "REJECTED: I lost track of too many already-triangulated points, so we can't do solvePnP() anymore...\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img
    new_imgp = new_imgp[new_to_prev_idxs]
    #cv2.cornerSubPix(    # TODO: activate this secret weapon    <-- hmm, actually seems to make it worse
    #new_img_gray, new_imgp,
    #(corner_min_dist,corner_min_dist),    # window
    #(-1,-1),    # deadzone
    #(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) )    # termination criteria
    ##corners = corners.reshape(-1, 2)

    # Do solvePnPRansac() on current frame's (new_imgp) already triangulated points, ...
    triangl_idxs_array = np.array(
        sorted(triangl_idxs))  # select already-triangulated point-indices
    filtered_triangl_imgp = idxs_get_new_imgp_by_idxs(
        triangl_idxs, new_imgp,
        all_idxs_tmp)  # collect corresponding image-points
    filtered_triangl_objp = objp[imgp_to_objp_idxs[
        triangl_idxs_array]]  # collect corresponding object-points
    print "Doing solvePnP() on", filtered_triangl_objp.shape[0], "points"
    rvec_, tvec_, inliers = cv2.solvePnPRansac(  # perform solvePnPRansac() to identify outliers, force to obey max_solvePnP_outlier_ratio
        filtered_triangl_objp,
        filtered_triangl_imgp,
        cameraMatrix,
        distCoeffs,
        minInliersCount=int(
            ceil((1 - max_solvePnP_outlier_ratio) * len(triangl_idxs))),
        reprojectionError=max_solvePnP_reproj_error)

    # ... if ratio of 'inliers' vs input is too low, reject frame, ...
    if inliers == None:  # inliers is empty => reject frame
        print "REJECTED: No inliers based on solvePnP()!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img
    inliers = inliers.reshape(-1)
    solvePnP_outlier_ratio = (len(triangl_idxs) - len(inliers)) / float(
        len(triangl_idxs))
    print "solvePnP_outlier_ratio:", solvePnP_outlier_ratio
    if solvePnP_outlier_ratio > max_solvePnP_outlier_ratio or len(
            inliers) < 8:  # reject frame
        if solvePnP_outlier_ratio > max_solvePnP_outlier_ratio:
            print "REJECTED: Not enough inliers (ratio) based on solvePnP()!\n"
        else:
            print "REJECTED: Not enough inliers (absolute) based on solvePnP() to perform (non-RANSAC) solvePnP()!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img

    # <DEBUG: visualize reprojection error>    TODO: remove
    reproj_error, imgp_reproj1 = reprojection_error(filtered_triangl_objp,
                                                    filtered_triangl_imgp,
                                                    rvec_, tvec_, cameraMatrix,
                                                    distCoeffs)
    print "solvePnP reproj_error:", reproj_error
    i3 = np.array(new_img)
    try:
        for imgppr, imgppp in zip(filtered_triangl_imgp, imgp_reproj1):
            cvh.line(i3, imgppr.T, imgppp.T, rgb(255, 0, 0))
    except OverflowError:
        print "WARNING: OverflowError!"
    cv2.imshow("img", i3)
    cv2.waitKey()
    # </DEBUG>

    # ... then do solvePnP() on inliers, to get the current frame's pose estimation, ...
    triangl_idxs_array = triangl_idxs_array[
        inliers]  # select inliers among all already-triangulated point-indices
    filtered_triangl_imgp, filtered_triangl_objp = filtered_triangl_imgp[
        inliers], filtered_triangl_objp[inliers]
    preserve_idxs = set(triangl_idxs_array) | nontriangl_idxs
    new_imgp = idxs_get_new_imgp_by_idxs(preserve_idxs, new_imgp, all_idxs_tmp)
    triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_update_by_idxs(  # update indices to only preserve inliers
        preserve_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp)
    ret, rvec, tvec = cv2.solvePnP(  # perform solvePnP() to estimate the pose
        filtered_triangl_objp, filtered_triangl_imgp, cameraMatrix, distCoeffs)

    # .. finally do a check on the average reprojection error, and reject frame if too high.
    reproj_error, imgp_reproj = reprojection_error(filtered_triangl_objp,
                                                   filtered_triangl_imgp, rvec,
                                                   tvec, cameraMatrix,
                                                   distCoeffs)
    print "solvePnP refined reproj_error:", reproj_error
    if reproj_error > max_solvePnP_reproj_error:  # reject frame
        print "REJECTED: Too high reprojection error based on pose estimate of solvePnP()!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img

    # <DEBUG: verify poses by reprojection error>    TODO: remove
    i0 = draw_axis_system(np.array(new_img), rvec, tvec, cameraMatrix,
                          distCoeffs)
    try:
        for imgppp in imgp_reproj1:
            cvh.circle(i0, imgppp.T, 2, rgb(255, 0, 0), thickness=-1)
    except OverflowError:
        print "WARNING: OverflowError!"
    for imgppp in imgp_reproj:
        cvh.circle(i0, imgppp.T, 2, rgb(0, 255, 255), thickness=-1)
    print "cur img check"
    cv2.imshow("img", i0)
    cv2.waitKey()
    # </DEBUG>

    # <DEBUG: verify OpticalFlow motion on preserved inliers>    TODO: remove
    cv2.imshow(
        "img",
        cvh.drawKeypointsAndMotion(new_img, base_imgp[all_idxs_tmp], new_imgp,
                                   rgb(0, 0, 255)))
    cv2.waitKey()
    # </DEBUG>

    # Check whether we got a new keyframe
    is_keyframe = keyframe_test(base_imgp[all_idxs_tmp], new_imgp)
    print "is_keyframe:", is_keyframe
    if is_keyframe:
        # If some points are not yet triangulated, do it now:
        if nontriangl_idxs:

            # First do triangulation of not-yet triangulated points using initial pose estimation, ...
            nontriangl_idxs_array = np.array(sorted(
                nontriangl_idxs))  # select not-yet-triangulated point-indices
            imgp0 = base_imgp[
                nontriangl_idxs_array]  # collect corresponding image-points of last keyframe
            imgp1 = idxs_get_new_imgp_by_idxs(
                nontriangl_idxs, new_imgp, all_idxs_tmp
            )  # collect corresponding image-points of current frame
            # <DEBUG: check sanity of input to triangulation function>    TODO: remove
            check_triangulation_input(base_img, new_img, imgp0, imgp1,
                                      rvec_keyfr, tvec_keyfr, rvec, tvec,
                                      cameraMatrix, distCoeffs)
            # </DEBUG>
            imgpnrm0 = cv2.undistortPoints(np.array(
                [imgp0]), cameraMatrix, distCoeffs)[
                    0]  # undistort and normalize to homogenous coordinates
            imgpnrm1 = cv2.undistortPoints(np.array([imgp1]), cameraMatrix,
                                           distCoeffs)[0]
            objp_done = linear_LS_triangulation(  # triangulate
                imgpnrm0.T,
                trfm.P_from_R_and_t(cvh.Rodrigues(rvec_keyfr),
                                    tvec_keyfr),  # data from last keyframe
                imgpnrm1.T,
                trfm.P_from_R_and_t(cvh.Rodrigues(rvec),
                                    tvec))  # data from current frame
            objp_done = objp_done.T

            # <DEBUG: check reprojection error of the new freshly triangulated points, based on both pose estimates of keyframe and current cam>    TODO: remove
            print "triangl_reproj_error 0:", reprojection_error(
                objp_done, imgp0, rvec_keyfr, tvec_keyfr, cameraMatrix,
                distCoeffs)[0]
            print "triangl_reproj_error 1:", reprojection_error(
                objp_done, imgp1, rvec, tvec, cameraMatrix, distCoeffs)[0]
            # </DEBUG>

            filtered_triangl_objp = np.concatenate(
                (filtered_triangl_objp,
                 objp_done))  # collect all desired object-points
            filtered_triangl_imgp = np.concatenate(
                (filtered_triangl_imgp,
                 imgp1))  # collect corresponding image-points of current frame

            # ... then do solvePnP() on all preserved points ('inliers') to refine pose estimation, ...
            ret, rvec, tvec = cv2.solvePnP(  # perform solvePnP(), we start from the initial pose estimation
                filtered_triangl_objp,
                filtered_triangl_imgp,
                cameraMatrix,
                distCoeffs,
                rvec,
                tvec,
                useExtrinsicGuess=True)
            print "total triangl_reproj_error 1 refined:", reprojection_error(
                filtered_triangl_objp, filtered_triangl_imgp, rvec, tvec,
                cameraMatrix, distCoeffs)[0]  # TODO: remove

            # ... then do re-triangulation of 'inliers_objp_done' using refined pose estimation.
            objp_done = linear_LS_triangulation(  # triangulate
                imgpnrm0.T,
                trfm.P_from_R_and_t(cvh.Rodrigues(rvec_keyfr),
                                    tvec_keyfr),  # data from last keyframe
                imgpnrm1.T,
                trfm.P_from_R_and_t(cvh.Rodrigues(rvec),
                                    tvec))  # data from current frame
            objp_done = objp_done.T

            # <DEBUG: check reprojection error of the new freshly (refined) triangulated points, based on both pose estimates of keyframe and current cam>    TODO: remove
            print "triangl_reproj_error 0 refined:", reprojection_error(
                objp_done, imgp0, rvec_keyfr, tvec_keyfr, cameraMatrix,
                distCoeffs)[0]
            print "triangl_reproj_error 1 refined:", reprojection_error(
                objp_done, imgp1, rvec, tvec, cameraMatrix, distCoeffs)[0]
            # </DEBUG>

            # Update image-points and indices, and store the newly triangulated object-points and assign them the current group id
            new_imgp = filtered_triangl_imgp
            triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_update_by_idxs(
                preserve_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp)
            objp_groups_done = np.empty((len(objp_done)), dtype=int)
            objp_groups_done.fill(group_id)  # assign to current 'group_id'
            (
                objp, objp_groups
            ), imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs = idxs_add_objp(
                (objp_done, objp_groups_done), preserve_idxs - triangl_idxs,
                (objp, objp_groups), imgp_to_objp_idxs, triangl_idxs,
                nontriangl_idxs, all_idxs_tmp)

            ## <DEBUG: check intermediate outlier filtering>    TODO: remove
            #i4 = np.array(new_img)
            #objp_test = objp[imgp_to_objp_idxs[np.array(sorted(triangl_idxs))]]
            #imgp_test = idxs_get_new_imgp_by_idxs(triangl_idxs, new_imgp, all_idxs_tmp)
            ##print imgp_test - filtered_triangl_imgp
            #reproj_error, imgp_reproj4 = reprojection_error(objp_test, imgp_test, rvec, tvec, cameraMatrix, distCoeffs)
            #print "checking both 2", reproj_error
            #for imgppr, imgppp in zip(imgp_test, imgp_reproj4): cvh.line(i4, imgppr.T, imgppp.T, rgb(255,0,0))
            #cv2.imshow("img", i4)
            #cv2.waitKey()
            ## </DEBUG>

        # Check whether we should add new image-points
        mask_img = keypoint_mask(
            new_imgp
        )  # generate mask that covers all image-points (with a certain radius)
        print "coverage:", 1 - cv2.countNonZero(mask_img) / float(
            mask_img.size)  # TODO: remove: unused
        to_add = target_amount_keypoints - len(
            new_imgp)  # limit the amount of to-be-added image-points

        # Add new image-points
        if to_add > 0:
            print "to_add:", to_add
            imgp_extra = cv2.goodFeaturesToTrack(new_img_gray, to_add,
                                                 corner_quality_level,
                                                 corner_min_dist, None,
                                                 mask_img).reshape((-1, 2))
            print "added:", len(imgp_extra)
            group_id += 1  # create a new group to assign the new batch of points to, later on
        else:
            imgp_extra = zeros((0, 2))
            print "adding zero new points"
        base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_rebase_and_add_imgp(  # update indices to include new image-points
            imgp_extra, base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs,
            nontriangl_idxs, all_idxs_tmp)

        # <DEBUG: visualize newly added points>    TODO: remove
        cv2.imshow(
            "img",
            cv2.drawKeypoints(
                new_img, [cv2.KeyPoint(p[0], p[1], 7.) for p in imgp_extra],
                color=rgb(0, 0, 255)))
        cv2.waitKey()
        # </DEBUG>

        # Now this frame becomes the base (= keyframe)
        rvec_keyfr = rvec
        tvec_keyfr = tvec
        base_img = new_img

    # Successfully return
    return True + int(
        is_keyframe
    ), base_imgp, new_imgp, triangl_idxs, nontriangl_idxs, imgp_to_objp_idxs, all_idxs_tmp, objp, objp_groups, group_id, rvec, tvec, rvec_keyfr, tvec_keyfr, base_img
Example #12
0
    def draw(
            self, rvec, tvec, status, triangl_idxs, imgp_to_objp_idxs, objp,
            objp_groups, color_palette,
            color_palette_size):  # TODO: move these variables to another class
        """
        Draw 3D composite view.
        Navigate using the following keys:
            LEFT/RIGHT    UP/DOWN    PAGEUP/PAGEDOWN    HOME/END
        
        'status' can have the following values:
            0: bad frame
            1: good frame but not a keyframe
            2: good frame and also a keyframe
        """

        # Calculate current camera's axis-system expressed in world coordinates and cache center
        if status:
            R_cam = cvh.Rodrigues(rvec)
            cam_axissys_objp = np.empty((4, 3))
            cam_axissys_objp[0, :] = -R_cam.T.dot(tvec).reshape(
                1, 3)  # cam_origin
            cam_axissys_objp[
                1:4, :] = cam_axissys_objp[0, :] + R_cam  # cam_x, cam_y, cam_z
            self.cams_pos = np.concatenate(
                (self.cams_pos, cam_axissys_objp[0:1, :]))  # cache cam_origin
            if status == 2:  # frame is a keyframe
                self.cams_pos_keyfr = np.concatenate(
                    (self.cams_pos_keyfr,
                     cam_axissys_objp[0:1, :]))  # cache cam_origin

        while True:
            # Fill with dark gray background color
            self.img.fill(56)

            # Draw world axis system
            if trfm.projection_depth(
                    np.array([[0, 0, 4]]), self.P
            )[0] > 0:  # only draw axis-system if its Z-axis is entirely in sight
                draw_axis_system(self.img, cvh.Rodrigues(self.P[0:3, 0:3]),
                                 self.P[0:3, 3], self.K, None)

            # Draw 3D points
            objp_proj, objp_visible = trfm.project_points(
                objp, self.K, self.img.shape, self.P)
            objp_visible = set(np.where(objp_visible)[0])
            current_idxs = set(imgp_to_objp_idxs[np.array(
                tuple(triangl_idxs))]) & objp_visible
            done_idxs = np.array(tuple(objp_visible - current_idxs), dtype=int)
            current_idxs = np.array(tuple(current_idxs), dtype=int)
            groups = objp_groups[current_idxs]
            for opp, opg, color in zip(
                    objp_proj[current_idxs], groups,
                    color_palette[groups % color_palette_size]):
                cvh.circle(self.img, opp[0:2], 4, color,
                           thickness=-1)  # draw point, big radius
            groups = objp_groups[done_idxs]
            for opp, opg, color in zip(
                    objp_proj[done_idxs], groups,
                    color_palette[groups % color_palette_size]):
                cvh.circle(self.img, opp[0:2], 2, color,
                           thickness=-1)  # draw point, small radius

            # Draw camera trajectory
            cams_pos_proj, cams_pos_visible = trfm.project_points(
                self.cams_pos, self.K, self.img.shape, self.P)
            cams_pos_proj = cams_pos_proj[np.where(cams_pos_visible)[0]]
            color = rgb(0, 0, 0)
            for p1, p2 in zip(cams_pos_proj[:-1], cams_pos_proj[1:]):
                cvh.line(self.img, p1, p2, color,
                         thickness=2)  # interconnect trajectory points
            cams_pos_keyfr_proj, cams_pos_keyfr_visible = trfm.project_points(
                self.cams_pos_keyfr, self.K, self.img.shape, self.P)
            cams_pos_keyfr_proj = cams_pos_keyfr_proj[np.where(
                cams_pos_keyfr_visible)[0]]
            color = rgb(255, 255, 255)
            for p in cams_pos_proj:
                cvh.circle(self.img, p, 1, color,
                           thickness=-1)  # draw trajectory points
            for p in cams_pos_keyfr_proj:
                cvh.circle(self.img, p, 3,
                           color)  # highlight keyframe trajectory points

            # Draw current camera axis system
            if status:
                (cam_origin, cam_xAxis, cam_yAxis, cam_zAxis), cam_visible = \
                        trfm.project_points(cam_axissys_objp, self.K, self.img.shape, self.P)
                if cam_visible.sum() == len(
                        cam_visible
                ):  # only draw axis-system if it's entirely in sight
                    cvh.line(self.img,
                             cam_origin,
                             cam_xAxis,
                             rgb(255, 0, 0),
                             lineType=cv2.CV_AA)
                    cvh.line(self.img,
                             cam_origin,
                             cam_yAxis,
                             rgb(0, 255, 0),
                             lineType=cv2.CV_AA)
                    cvh.line(self.img,
                             cam_origin,
                             cam_zAxis,
                             rgb(0, 0, 255),
                             lineType=cv2.CV_AA)
                    cvh.circle(self.img, cam_zAxis, 3,
                               rgb(0, 0,
                                   255))  # small dot to highlight cam Z axis
            else:
                last_cam_origin, last_cam_visible = trfm.project_points(
                    self.cams_pos[-1:], self.K, self.img.shape, self.P)
                if last_cam_visible[0]:  # only draw if in sight
                    cvh.putText(self.img, '?', last_cam_origin[0] - (11, 11),
                                fontFace, fontScale * 4,
                                rgb(255, 0,
                                    0))  # draw '?' because it's a bad frame

            # Display image
            cv2.imshow(self.img_title, self.img)

            # Translate view by keyboard
            key = cv2.waitKey() & 0xFF
            delta_t_view = np.zeros((3))
            if key in (0x51, 0x53):  # LEFT/RIGHT key
                delta_t_view[0] = 2 * (
                    key == 0x51) - 1  # LEFT -> increase cam X pos
            elif key in (0x52, 0x54):  # UP/DOWN key
                delta_t_view[1] = 2 * (key
                                       == 0x52) - 1  # UP -> increase cam Y pos
            elif key in (0x55, 0x56):  # PAGEUP/PAGEDOWN key
                delta_t_view[2] = 2 * (
                    key == 0x55) - 1  # PAGEUP -> increase cam Z pos
            elif key in (0x50, 0x57):  # HOME/END key
                delta_z_rot = 2 * (
                    key == 0x50
                ) - 1  # HOME -> counter-clockwise rotate around cam Z axis
                self.P[0:3,
                       0:4] = cvh.Rodrigues((0, 0, delta_z_rot * pi / 36)).dot(
                           self.P[0:3, 0:4])  # by steps of 5 degrees
            else:
                break
            self.P[0:3, 3] += delta_t_view * 0.3
 def draw(self, rvec, tvec, status,
          triangl_idxs, imgp_to_objp_idxs, objp, objp_groups, color_palette, color_palette_size):    # TODO: move these variables to another class
     """
     Draw 3D composite view.
     Navigate using the following keys:
         LEFT/RIGHT    UP/DOWN    PAGEUP/PAGEDOWN    HOME/END
     
     'status' can have the following values:
         0: bad frame
         1: good frame but not a keyframe
         2: good frame and also a keyframe
     """
     
     # Calculate current camera's axis-system expressed in world coordinates and cache center
     if status:
         R_cam = cvh.Rodrigues(rvec)
         cam_axissys_objp = np.empty((4, 3))
         cam_axissys_objp[0, :] = -R_cam.T.dot(tvec).reshape(1, 3)    # cam_origin
         cam_axissys_objp[1:4, :] = cam_axissys_objp[0, :] + R_cam    # cam_x, cam_y, cam_z
         self.cams_pos = np.concatenate((self.cams_pos, cam_axissys_objp[0:1, :]))    # cache cam_origin
         if status == 2:    # frame is a keyframe
             self.cams_pos_keyfr = np.concatenate((self.cams_pos_keyfr, cam_axissys_objp[0:1, :]))    # cache cam_origin
     
     while True:
         # Fill with dark gray background color
         self.img.fill(56)
         
         # Draw world axis system
         if trfm.projection_depth(np.array([[0,0,4]]), self.P)[0] > 0:    # only draw axis-system if its Z-axis is entirely in sight
             draw_axis_system(self.img, cvh.Rodrigues(self.P[0:3, 0:3]), self.P[0:3, 3], self.K, None)
         
         # Draw 3D points
         objp_proj, objp_visible = trfm.project_points(objp, self.K, self.img.shape, self.P)
         objp_visible = set(np.where(objp_visible)[0])
         current_idxs = set(imgp_to_objp_idxs[np.array(tuple(triangl_idxs))]) & objp_visible
         done_idxs = np.array(tuple(objp_visible - current_idxs), dtype=int)
         current_idxs = np.array(tuple(current_idxs), dtype=int)
         groups = objp_groups[current_idxs]
         for opp, opg, color in zip(objp_proj[current_idxs], groups, color_palette[groups % color_palette_size]):
             cvh.circle(self.img, opp[0:2], 4, color, thickness=-1)    # draw point, big radius
         groups = objp_groups[done_idxs]
         for opp, opg, color in zip(objp_proj[done_idxs], groups, color_palette[groups % color_palette_size]):
             cvh.circle(self.img, opp[0:2], 2, color, thickness=-1)    # draw point, small radius
         
         # Draw camera trajectory
         cams_pos_proj, cams_pos_visible = trfm.project_points(self.cams_pos, self.K, self.img.shape, self.P)
         cams_pos_proj = cams_pos_proj[np.where(cams_pos_visible)[0]]
         color = rgb(0,0,0)
         for p1, p2 in zip(cams_pos_proj[:-1], cams_pos_proj[1:]):
             cvh.line(self.img, p1, p2, color, thickness=2)    # interconnect trajectory points
         cams_pos_keyfr_proj, cams_pos_keyfr_visible = trfm.project_points(self.cams_pos_keyfr, self.K, self.img.shape, self.P)
         cams_pos_keyfr_proj = cams_pos_keyfr_proj[np.where(cams_pos_keyfr_visible)[0]]
         color = rgb(255,255,255)
         for p in cams_pos_proj:
             cvh.circle(self.img, p, 1, color, thickness=-1)    # draw trajectory points
         for p in cams_pos_keyfr_proj:
             cvh.circle(self.img, p, 3, color)    # highlight keyframe trajectory points
         
         # Draw current camera axis system
         if status:
             (cam_origin, cam_xAxis, cam_yAxis, cam_zAxis), cam_visible = \
                     trfm.project_points(cam_axissys_objp, self.K, self.img.shape, self.P)
             if cam_visible.sum() == len(cam_visible):    # only draw axis-system if it's entirely in sight
                 cvh.line(self.img, cam_origin, cam_xAxis, rgb(255,0,0), lineType=cv2.CV_AA)
                 cvh.line(self.img, cam_origin, cam_yAxis, rgb(0,255,0), lineType=cv2.CV_AA)
                 cvh.line(self.img, cam_origin, cam_zAxis, rgb(0,0,255), lineType=cv2.CV_AA)
                 cvh.circle(self.img, cam_zAxis, 3, rgb(0,0,255))    # small dot to highlight cam Z axis
         else:
             last_cam_origin, last_cam_visible = trfm.project_points(self.cams_pos[-1:], self.K, self.img.shape, self.P)
             if last_cam_visible[0]:    # only draw if in sight
                 cvh.putText(self.img, '?', last_cam_origin[0] - (11, 11), fontFace, fontScale * 4, rgb(255,0,0))    # draw '?' because it's a bad frame
         
         # Display image
         cv2.imshow(self.img_title, self.img)
         
         # Translate view by keyboard
         key = cv2.waitKey() & 0xFF
         delta_t_view = np.zeros((3))
         if key in (0x51, 0x53):    # LEFT/RIGHT key
             delta_t_view[0] = 2 * (key == 0x51) - 1    # LEFT -> increase cam X pos
         elif key in (0x52, 0x54):    # UP/DOWN key
             delta_t_view[1] = 2 * (key == 0x52) - 1    # UP -> increase cam Y pos
         elif key in (0x55, 0x56):    # PAGEUP/PAGEDOWN key
             delta_t_view[2] = 2 * (key == 0x55) - 1    # PAGEUP -> increase cam Z pos
         elif key in (0x50, 0x57):    # HOME/END key
             delta_z_rot = 2 * (key == 0x50) - 1    # HOME -> counter-clockwise rotate around cam Z axis
             self.P[0:3, 0:4] = cvh.Rodrigues((0, 0, delta_z_rot * pi/36)).dot(self.P[0:3, 0:4])    # by steps of 5 degrees
         else:
             break
         self.P[0:3, 3] += delta_t_view * 0.3
def handle_new_frame(base_imgp,    # includes 2D points of both triangulated as not-yet triangl points of last keyframe
                     prev_imgp,    # includes 2D points of last frame
                     prev_img, prev_img_gray,
                     new_img, new_img_gray,
                     triangl_idxs, nontriangl_idxs,    # indices of 2D points in base_imgp
                     imgp_to_objp_idxs,    # indices from 2D points in base_imgp to 3D points in objp
                     all_idxs_tmp,    # list of idxs of 2D points in base_imgp, matches prev_imgp to base_imgp
                     objp,    # triangulated 3D points
                     objp_groups, group_id,    # corresponding group ids of triangulated 3D points, and current group id
                     rvec_keyfr, tvec_keyfr,    # rvec and tvec of last keyframe
                     base_img):    # used for debug
    
    # Save initial indexing state
    triangl_idxs_old = set(triangl_idxs)
    nontriangl_idxs_old = set(nontriangl_idxs)
    all_idxs_tmp_old = np.array(all_idxs_tmp)
    
    # Calculate OF (Optical Flow), and filter outliers based on OF error
    new_imgp, status_OF, err_OF = cv2.calcOpticalFlowPyrLK(prev_img_gray, new_img_gray, prev_imgp)    # WARNING: OpenCV can output corrupted values in 'status_OF': "RuntimeWarning: invalid value encountered in less"
    new_to_prev_idxs = np.where(np.logical_and((status_OF.reshape(-1) == 1), (err_OF.reshape(-1) < max_OF_error)))[0]
    
    # If there is too much OF error in the entire image, simply reject the frame
    lost_tracks_ratio = (len(prev_imgp) - len(new_to_prev_idxs)) / float(len(prev_imgp))
    print "# points lost because of excessive OF error / # points before: ", len(prev_imgp) - len(new_to_prev_idxs), "/", len(prev_imgp), "=", lost_tracks_ratio
    if lost_tracks_ratio > max_lost_tracks_ratio:    # reject frame
        print "REJECTED: I lost track of all points!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img
    
    # Save matches by idxs
    preserve_idxs = set(all_idxs_tmp[new_to_prev_idxs])
    triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_update_by_idxs(
            preserve_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp )
    if len(triangl_idxs) < 8:    # solvePnP uses 8-point algorithm
        print "REJECTED: I lost track of too many already-triangulated points, so we can't do solvePnP() anymore...\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img
    new_imgp = new_imgp[new_to_prev_idxs]
    #cv2.cornerSubPix(    # TODO: activate this secret weapon    <-- hmm, actually seems to make it worse
                #new_img_gray, new_imgp,
                #(corner_min_dist,corner_min_dist),    # window
                #(-1,-1),    # deadzone
                #(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) )    # termination criteria
        ##corners = corners.reshape(-1, 2)
    
    # Do solvePnPRansac() on current frame's (new_imgp) already triangulated points, ...
    triangl_idxs_array = np.array(sorted(triangl_idxs))    # select already-triangulated point-indices
    filtered_triangl_imgp = idxs_get_new_imgp_by_idxs(triangl_idxs, new_imgp, all_idxs_tmp)    # collect corresponding image-points
    filtered_triangl_objp = objp[imgp_to_objp_idxs[triangl_idxs_array]]    # collect corresponding object-points
    print "Doing solvePnP() on", filtered_triangl_objp.shape[0], "points"
    rvec_, tvec_, inliers = cv2.solvePnPRansac(    # perform solvePnPRansac() to identify outliers, force to obey max_solvePnP_outlier_ratio
            filtered_triangl_objp, filtered_triangl_imgp, cameraMatrix, distCoeffs, minInliersCount=int(ceil((1 - max_solvePnP_outlier_ratio) * len(triangl_idxs))), reprojectionError=max_solvePnP_reproj_error )
    
    # ... if ratio of 'inliers' vs input is too low, reject frame, ...
    if inliers == None:    # inliers is empty => reject frame
        print "REJECTED: No inliers based on solvePnP()!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img
    inliers = inliers.reshape(-1)
    solvePnP_outlier_ratio = (len(triangl_idxs) - len(inliers)) / float(len(triangl_idxs))
    print "solvePnP_outlier_ratio:", solvePnP_outlier_ratio
    if solvePnP_outlier_ratio > max_solvePnP_outlier_ratio or len(inliers) < 8:    # reject frame
        if solvePnP_outlier_ratio > max_solvePnP_outlier_ratio:
            print "REJECTED: Not enough inliers (ratio) based on solvePnP()!\n"
        else:
            print "REJECTED: Not enough inliers (absolute) based on solvePnP() to perform (non-RANSAC) solvePnP()!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img
    
    # <DEBUG: visualize reprojection error>    TODO: remove
    reproj_error, imgp_reproj1 = reprojection_error(filtered_triangl_objp, filtered_triangl_imgp, rvec_, tvec_, cameraMatrix, distCoeffs)
    print "solvePnP reproj_error:", reproj_error
    i3 = np.array(new_img)
    try:
        for imgppr, imgppp in zip(filtered_triangl_imgp, imgp_reproj1): cvh.line(i3, imgppr.T, imgppp.T, rgb(255,0,0))
    except OverflowError: print "WARNING: OverflowError!"
    cv2.imshow("img", i3)
    cv2.waitKey()
    # </DEBUG>
    
    # ... then do solvePnP() on inliers, to get the current frame's pose estimation, ...
    triangl_idxs_array = triangl_idxs_array[inliers]    # select inliers among all already-triangulated point-indices
    filtered_triangl_imgp, filtered_triangl_objp = filtered_triangl_imgp[inliers], filtered_triangl_objp[inliers]
    preserve_idxs = set(triangl_idxs_array) | nontriangl_idxs
    new_imgp = idxs_get_new_imgp_by_idxs(preserve_idxs, new_imgp, all_idxs_tmp)
    triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_update_by_idxs(    # update indices to only preserve inliers
            preserve_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp )
    ret, rvec, tvec = cv2.solvePnP(    # perform solvePnP() to estimate the pose
            filtered_triangl_objp, filtered_triangl_imgp, cameraMatrix, distCoeffs )
    
    # .. finally do a check on the average reprojection error, and reject frame if too high.
    reproj_error, imgp_reproj = reprojection_error(filtered_triangl_objp, filtered_triangl_imgp, rvec, tvec, cameraMatrix, distCoeffs)
    print "solvePnP refined reproj_error:", reproj_error
    if reproj_error > max_solvePnP_reproj_error:    # reject frame
        print "REJECTED: Too high reprojection error based on pose estimate of solvePnP()!\n"
        return False, base_imgp, prev_imgp, triangl_idxs_old, nontriangl_idxs_old, imgp_to_objp_idxs, all_idxs_tmp_old, objp, objp_groups, group_id, None, None, rvec_keyfr, tvec_keyfr, base_img
    
    # <DEBUG: verify poses by reprojection error>    TODO: remove
    i0 = draw_axis_system(np.array(new_img), rvec, tvec, cameraMatrix, distCoeffs)
    try:
        for imgppp in imgp_reproj1: cvh.circle(i0, imgppp.T, 2, rgb(255,0,0), thickness=-1)
    except OverflowError: print "WARNING: OverflowError!"
    for imgppp in imgp_reproj : cvh.circle(i0, imgppp.T, 2, rgb(0,255,255), thickness=-1)
    print "cur img check"
    cv2.imshow("img", i0)
    cv2.waitKey()
    # </DEBUG>
    
    # <DEBUG: verify OpticalFlow motion on preserved inliers>    TODO: remove
    cv2.imshow("img", cvh.drawKeypointsAndMotion(new_img, base_imgp[all_idxs_tmp], new_imgp, rgb(0,0,255)))
    cv2.waitKey()
    # </DEBUG>
    
    # Check whether we got a new keyframe
    is_keyframe = keyframe_test(base_imgp[all_idxs_tmp], new_imgp)
    print "is_keyframe:", is_keyframe
    if is_keyframe:
        # If some points are not yet triangulated, do it now:
        if nontriangl_idxs:
            
            # First do triangulation of not-yet triangulated points using initial pose estimation, ...
            nontriangl_idxs_array = np.array(sorted(nontriangl_idxs))    # select not-yet-triangulated point-indices
            imgp0 = base_imgp[nontriangl_idxs_array]    # collect corresponding image-points of last keyframe
            imgp1 = idxs_get_new_imgp_by_idxs(nontriangl_idxs, new_imgp, all_idxs_tmp)    # collect corresponding image-points of current frame
            # <DEBUG: check sanity of input to triangulation function>    TODO: remove
            check_triangulation_input(base_img, new_img, imgp0, imgp1, rvec_keyfr, tvec_keyfr, rvec, tvec, cameraMatrix, distCoeffs)
            # </DEBUG>
            imgpnrm0 = cv2.undistortPoints(np.array([imgp0]), cameraMatrix, distCoeffs)[0]    # undistort and normalize to homogenous coordinates
            imgpnrm1 = cv2.undistortPoints(np.array([imgp1]), cameraMatrix, distCoeffs)[0]
            objp_done = linear_LS_triangulation(    # triangulate
                    imgpnrm0.T, trfm.P_from_R_and_t(cvh.Rodrigues(rvec_keyfr), tvec_keyfr),    # data from last keyframe
                    imgpnrm1.T, trfm.P_from_R_and_t(cvh.Rodrigues(rvec), tvec) )               # data from current frame
            objp_done = objp_done.T
            
            # <DEBUG: check reprojection error of the new freshly triangulated points, based on both pose estimates of keyframe and current cam>    TODO: remove
            print "triangl_reproj_error 0:", reprojection_error(objp_done, imgp0, rvec_keyfr, tvec_keyfr, cameraMatrix, distCoeffs)[0]
            print "triangl_reproj_error 1:", reprojection_error(objp_done, imgp1, rvec, tvec, cameraMatrix, distCoeffs)[0]
            # </DEBUG>
            
            filtered_triangl_objp = np.concatenate((filtered_triangl_objp, objp_done))    # collect all desired object-points
            filtered_triangl_imgp = np.concatenate((filtered_triangl_imgp, imgp1))    # collect corresponding image-points of current frame
            
            # ... then do solvePnP() on all preserved points ('inliers') to refine pose estimation, ...
            ret, rvec, tvec = cv2.solvePnP(    # perform solvePnP(), we start from the initial pose estimation
                    filtered_triangl_objp, filtered_triangl_imgp, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=True )
            print "total triangl_reproj_error 1 refined:", reprojection_error(filtered_triangl_objp, filtered_triangl_imgp, rvec, tvec, cameraMatrix, distCoeffs)[0]    # TODO: remove
            
            # ... then do re-triangulation of 'inliers_objp_done' using refined pose estimation.
            objp_done = linear_LS_triangulation(    # triangulate
                    imgpnrm0.T, trfm.P_from_R_and_t(cvh.Rodrigues(rvec_keyfr), tvec_keyfr),    # data from last keyframe
                    imgpnrm1.T, trfm.P_from_R_and_t(cvh.Rodrigues(rvec), tvec) )               # data from current frame
            objp_done = objp_done.T
            
            # <DEBUG: check reprojection error of the new freshly (refined) triangulated points, based on both pose estimates of keyframe and current cam>    TODO: remove
            print "triangl_reproj_error 0 refined:", reprojection_error(objp_done, imgp0, rvec_keyfr, tvec_keyfr, cameraMatrix, distCoeffs)[0]
            print "triangl_reproj_error 1 refined:", reprojection_error(objp_done, imgp1, rvec, tvec, cameraMatrix, distCoeffs)[0]
            # </DEBUG>
            
            # Update image-points and indices, and store the newly triangulated object-points and assign them the current group id
            new_imgp = filtered_triangl_imgp
            triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_update_by_idxs(
                    preserve_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp )
            objp_groups_done = np.empty((len(objp_done)), dtype=int); objp_groups_done.fill(group_id)    # assign to current 'group_id'
            (objp, objp_groups), imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs = idxs_add_objp(
                    (objp_done, objp_groups_done), preserve_idxs - triangl_idxs, (objp, objp_groups), imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp )
            
            ## <DEBUG: check intermediate outlier filtering>    TODO: remove
            #i4 = np.array(new_img)
            #objp_test = objp[imgp_to_objp_idxs[np.array(sorted(triangl_idxs))]]
            #imgp_test = idxs_get_new_imgp_by_idxs(triangl_idxs, new_imgp, all_idxs_tmp)
            ##print imgp_test - filtered_triangl_imgp
            #reproj_error, imgp_reproj4 = reprojection_error(objp_test, imgp_test, rvec, tvec, cameraMatrix, distCoeffs)
            #print "checking both 2", reproj_error
            #for imgppr, imgppp in zip(imgp_test, imgp_reproj4): cvh.line(i4, imgppr.T, imgppp.T, rgb(255,0,0))
            #cv2.imshow("img", i4)
            #cv2.waitKey()
            ## </DEBUG>
        
        # Check whether we should add new image-points
        mask_img = keypoint_mask(new_imgp)    # generate mask that covers all image-points (with a certain radius)
        print "coverage:", 1 - cv2.countNonZero(mask_img)/float(mask_img.size)    # TODO: remove: unused
        to_add = target_amount_keypoints - len(new_imgp)    # limit the amount of to-be-added image-points
        
        # Add new image-points
        if to_add > 0:
            print "to_add:", to_add
            imgp_extra = cv2.goodFeaturesToTrack(new_img_gray, to_add, corner_quality_level, corner_min_dist, None, mask_img).reshape((-1, 2))
            print "added:", len(imgp_extra)
            group_id += 1    # create a new group to assign the new batch of points to, later on
        else:
            imgp_extra = zeros((0, 2))
            print "adding zero new points"
        base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_rebase_and_add_imgp(    # update indices to include new image-points
                imgp_extra, base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp )
        
        # <DEBUG: visualize newly added points>    TODO: remove
        cv2.imshow("img", cv2.drawKeypoints(new_img, [cv2.KeyPoint(p[0],p[1], 7.) for p in imgp_extra], color=rgb(0,0,255)))
        cv2.waitKey()
        # </DEBUG>
        
        # Now this frame becomes the base (= keyframe)
        rvec_keyfr = rvec
        tvec_keyfr = tvec
        base_img = new_img
    
    # Successfully return
    return True + int(is_keyframe), base_imgp, new_imgp, triangl_idxs, nontriangl_idxs, imgp_to_objp_idxs, all_idxs_tmp, objp, objp_groups, group_id, rvec, tvec, rvec_keyfr, tvec_keyfr, base_img
def realtime_pose_estimation(device_id, filename_base_extrinsics, cameraMatrix,
                             distCoeffs, objp, boardSize):
    """
    This interactive demo will track a chessboard in realtime using a webcam,
    and the WORLD axis-system will be drawn on it: [X Y Z] = [red green blue]
    Further on you will see some data in the bottom-right corner,
    this indicates both the pose of the current image w.r.t. the WORLD axis-system,
    as well as the pose of the current image w.r.t. the previous keyframe pose.
    
    To create a new keyframe while running, press SPACE.
    Each time a new keyframe is generated,
    the corresponding image and data (in txt-format) is written to the 'filename_base_extrinsics' folder.
    
    All poses are defined in the WORLD axis-system,
    the rotation notation follows axis-angle representation: '<unit vector> * <magnitude (degrees)>'.
    
    To quit, press ESC.
    """

    cv2.namedWindow("Image (with axis-system)")
    fontFace = cv2.FONT_HERSHEY_DUPLEX
    fontScale = 0.5
    mlt = cvh.MultilineText()
    cap = cv2.VideoCapture(device_id)

    imageNr = 0  # keyframe image id
    rvec_prev = np.zeros((3, 1))
    rvec = None
    tvec_prev = np.zeros((3, 1))
    tvec = None

    # Loop until 'q' or ESC pressed
    last_key_pressed = 0
    while not last_key_pressed in (ord('q'), 27):
        ret_, img = cap.read()
        ret, corners = cvh.extractChessboardFeatures(img, boardSize)

        # If valid features found, solve for 'rvec' and 'tvec'
        if ret == True:
            ret, rvec, tvec = cv2.solvePnP(  # TODO: use Ransac version for other types of features
                objp, corners, cameraMatrix, distCoeffs)

            # Draw axis-system
            cvh.drawAxisSystem(img, cameraMatrix, distCoeffs, rvec, tvec)

            # OpenCV's 'rvec' and 'tvec' seem to be defined as follows:
            #   'rvec': rotation transformation: transforms points from "WORLD axis-system -> CAMERA axis-system"
            #   'tvec': translation of "CAMERA center -> WORLD center", all defined in the "CAMERA axis-system"
            rvec *= -1  # convert to: "CAMERA axis-system -> WORLD axis-system", equivalent to rotation of CAMERA axis-system w.r.t. WORLD axis-system
            tvec = cvh.Rodrigues(rvec).dot(
                tvec)  # bring to "WORLD axis-system", ...
            tvec *= -1  # ... and change direction to "WORLD center -> CAMERA center"

            # Calculate pose relative to last keyframe
            rvec_rel = trfm.delta_rvec(rvec, rvec_prev)
            tvec_rel = tvec - tvec_prev

            # Extract axis and angle, to enhance representation
            rvec_axis, rvec_angle = trfm.axis_and_angle_from_rvec(rvec)
            rvec_rel_axis, rvec_rel_angle = trfm.axis_and_angle_from_rvec(
                rvec_rel)

            # Draw pose information
            texts = []
            texts.append("Current pose:")
            texts.append("    Rvec: %s * %.1fdeg" %
                         (format3DVector(rvec_axis), degrees(rvec_angle)))
            texts.append("    Tvec: %s" % format3DVector(tvec))
            texts.append("Relative to previous pose:")
            texts.append(
                "    Rvec: %s * %.1fdeg" %
                (format3DVector(rvec_rel_axis), degrees(rvec_rel_angle)))
            texts.append("    Tvec: %s" % format3DVector(tvec_rel))

            mlt.text(texts[0],
                     fontFace,
                     fontScale * 1.5,
                     rgb(150, 0, 0),
                     thickness=2)
            mlt.text(texts[1], fontFace, fontScale, rgb(255, 0, 0))
            mlt.text(texts[2], fontFace, fontScale, rgb(255, 0, 0))
            mlt.text(texts[3],
                     fontFace,
                     fontScale * 1.5,
                     rgb(150, 0, 0),
                     thickness=2)
            mlt.text(texts[4], fontFace, fontScale, rgb(255, 0, 0))
            mlt.text(texts[5], fontFace, fontScale, rgb(255, 0, 0))
            mlt.putText(img, (img.shape[1],
                              img.shape[0]))  # put text in bottom-right corner

        # Show Image
        cv2.imshow("Image (with axis-system)", img)
        mlt.clear()

        # Save keyframe image when SPACE is pressed
        last_key_pressed = cv2.waitKey(1) & 0xFF
        if last_key_pressed == ord(' ') and ret:
            filename = filename_base_extrinsics + str(imageNr)
            cv2.imwrite(filename + ".jpg", img)  # write image to jpg-file
            textTotal = '\n'.join(texts)
            open(filename + ".txt",
                 'w').write(textTotal)  # write data to txt-file

            print("Saved keyframe image+data to", filename, ":")
            print(textTotal)

            imageNr += 1
            rvec_prev = rvec
            tvec_prev = tvec
def realtime_pose_estimation(device_id, filename_base_extrinsics, cameraMatrix, distCoeffs, objp, boardSize):
    """
    This interactive demo will track a chessboard in realtime using a webcam,
    and the WORLD axis-system will be drawn on it: [X Y Z] = [red green blue]
    Further on you will see some data in the bottom-right corner,
    this indicates both the pose of the current image w.r.t. the WORLD axis-system,
    as well as the pose of the current image w.r.t. the previous keyframe pose.
    
    To create a new keyframe while running, press SPACE.
    Each time a new keyframe is generated,
    the corresponding image and data (in txt-format) is written to the 'filename_base_extrinsics' folder.
    
    All poses are defined in the WORLD axis-system,
    the rotation notation follows axis-angle representation: '<unit vector> * <magnitude (degrees)>'.
    
    To quit, press ESC.
    """
    
    cv2.namedWindow("Image (with axis-system)")
    fontFace = cv2.FONT_HERSHEY_DUPLEX
    fontScale = 0.5
    mlt = cvh.MultilineText()
    cap = cv2.VideoCapture(device_id)

    imageNr = 0    # keyframe image id
    rvec_prev = np.zeros((3, 1))
    rvec = None
    tvec_prev = np.zeros((3, 1))
    tvec = None

    # Loop until 'q' or ESC pressed
    last_key_pressed = 0
    while not last_key_pressed in (ord('q'), 27):
        ret_, img = cap.read()
        ret, corners = cvh.extractChessboardFeatures(img, boardSize)

        # If valid features found, solve for 'rvec' and 'tvec'
        if ret == True:
            ret, rvec, tvec = cv2.solvePnP(    # TODO: use Ransac version for other types of features
                    objp, corners, cameraMatrix, distCoeffs )

            # Draw axis-system
            cvh.drawAxisSystem(img, cameraMatrix, distCoeffs, rvec, tvec)
            
            # OpenCV's 'rvec' and 'tvec' seem to be defined as follows:
            #   'rvec': rotation transformation: transforms points from "WORLD axis-system -> CAMERA axis-system"
            #   'tvec': translation of "CAMERA center -> WORLD center", all defined in the "CAMERA axis-system"
            rvec *= -1    # convert to: "CAMERA axis-system -> WORLD axis-system", equivalent to rotation of CAMERA axis-system w.r.t. WORLD axis-system
            tvec = cvh.Rodrigues(rvec).dot(tvec)    # bring to "WORLD axis-system", ...
            tvec *= -1    # ... and change direction to "WORLD center -> CAMERA center"
            
            # Calculate pose relative to last keyframe
            rvec_rel = trfm.delta_rvec(rvec, rvec_prev)
            tvec_rel = tvec - tvec_prev
            
            # Extract axis and angle, to enhance representation
            rvec_axis, rvec_angle = trfm.axis_and_angle_from_rvec(rvec)
            rvec_rel_axis, rvec_rel_angle = trfm.axis_and_angle_from_rvec(rvec_rel)
            
            # Draw pose information
            texts = []
            texts.append("Current pose:")
            texts.append("    Rvec: %s * %.1fdeg" % (format3DVector(rvec_axis), degrees(rvec_angle)))
            texts.append("    Tvec: %s" % format3DVector(tvec))
            texts.append("Relative to previous pose:")
            texts.append("    Rvec: %s * %.1fdeg" % (format3DVector(rvec_rel_axis), degrees(rvec_rel_angle)))
            texts.append("    Tvec: %s" % format3DVector(tvec_rel))
            
            mlt.text(texts[0], fontFace, fontScale*1.5, rgb(150,0,0), thickness=2)
            mlt.text(texts[1], fontFace, fontScale, rgb(255,0,0))
            mlt.text(texts[2], fontFace, fontScale, rgb(255,0,0))
            mlt.text(texts[3], fontFace, fontScale*1.5, rgb(150,0,0), thickness=2)
            mlt.text(texts[4], fontFace, fontScale, rgb(255,0,0))
            mlt.text(texts[5], fontFace, fontScale, rgb(255,0,0))
            mlt.putText(img, (img.shape[1], img.shape[0]))    # put text in bottom-right corner

        # Show Image
        cv2.imshow("Image (with axis-system)", img)
        mlt.clear()
        
        # Save keyframe image when SPACE is pressed
        last_key_pressed = cv2.waitKey(1) & 0xFF
        if last_key_pressed == ord(' ') and ret:
            filename = filename_base_extrinsics + str(imageNr)
            cv2.imwrite(filename + ".jpg", img)    # write image to jpg-file
            textTotal = '\n'.join(texts)
            open(filename + ".txt", 'w').write(textTotal)    # write data to txt-file
            
            print ("Saved keyframe image+data to", filename, ":")
            print (textTotal)
            
            imageNr += 1
            rvec_prev = rvec
            tvec_prev = tvec
 def run(self):
     print ("Select your matches. Press SPACE when done.")
     while True:
         img = cv2.drawKeypoints(self.images[self.img_idx], [cv2.KeyPoint(p[0],p[1], 7.) for p in self.points[self.img_idx]], color=rgb(0,0,255))
         cv2.imshow(self.window_name, img)
         key = cv2.waitKey(50) & 0xFF
         if key == ord(' '): break    # finish if SPACE is pressed
     
     num_points_diff = len(self.points[0]) - len(self.points[1])
     if num_points_diff:
         del self.points[num_points_diff < 0][-abs(num_points_diff):]
     print ("Selected", len(self.points[0]), "pairs of matches.")
def main():
    global boardSize
    global cameraMatrix, distCoeffs, imageSize
    global max_OF_error, max_lost_tracks_ratio
    global keypoint_coverage_radius#, min_keypoint_coverage
    global target_amount_keypoints, corner_quality_level, corner_min_dist
    global homography_condition_threshold
    global max_solvePnP_reproj_error, max_2nd_solvePnP_reproj_error, max_fundMat_reproj_error
    global max_solvePnP_outlier_ratio, max_2nd_solvePnP_outlier_ratio, solvePnP_use_extrinsic_guess
    
    # Initially known data
    boardSize = (8, 6)
    color_palette, color_palette_size = prepare_color_palette(2, 3, 4)    # used to identify 3D point group ids
    
    cameraMatrix, distCoeffs, imageSize = \
            load_camera_intrinsics(os.path.join("..", "..", "datasets", "webcam", "camera_intrinsics.txt"))
    #cameraMatrix, distCoeffs, imageSize = \
            #load_camera_intrinsics(os.path.join("..", "..", "..", "ARDrone2_tests", "camera_calibration", "live_video", "camera_intrinsics_front.txt"))
    
    # Select working (or 'testing') set
    from glob import glob
    images = sorted(glob(os.path.join("..", "..", "datasets", "webcam", "captures2", "*.jpeg")))
    #images = sorted(glob(os.path.join("..", "..", "..", "ARDrone2_tests", "flying_front", "lowres", "drone0", "*.jpg")))
    
    # Setup some visualization helpers
    composite2D_painter = Composite2DPainter("composite 2D", imageSize)
    composite3D_painter = Composite3DPainter(
            "composite 3D", trfm.P_from_R_and_t(cvh.Rodrigues((pi, 0., 0.)), np.array([[0., 0., 40.]]).T), (1280, 720) )
    
    
    ### Tweaking parameters ###
    # OF calculation
    max_OF_error = 12.
    max_lost_tracks_ratio = 0.5
    # keypoint_coverage
    keypoint_coverage_radius = int(max_OF_error)
    #min_keypoint_coverage = 0.2
    # goodFeaturesToTrack
    target_amount_keypoints = int(round((imageSize[0] * imageSize[1]) / (pi * keypoint_coverage_radius**2)))    # target is entire image full
    print "target_amount_keypoints:", target_amount_keypoints
    corner_quality_level = 0.01
    corner_min_dist = keypoint_coverage_radius
    # keyframe_test
    homography_condition_threshold = 500    # defined as ratio between max and min singular values
    # reprojection error
    max_solvePnP_reproj_error = 4.#0.5    # TODO: revert to a lower number
    max_2nd_solvePnP_reproj_error = max_solvePnP_reproj_error / 2    # be more strict in a 2nd iteration, used after 1st pass of triangulation
    max_fundMat_reproj_error = 2.0
    # solvePnP
    max_solvePnP_outlier_ratio = 0.3
    max_2nd_solvePnP_outlier_ratio = 1.    # used in 2nd iteration, after 1st pass of triangulation
    solvePnP_use_extrinsic_guess = False    # TODO: set to True and see whether the 3D results are better
    
    
    # Init
    
    imgs = []
    imgs_gray = []
    
    objp = []    # 3D points
    objp_groups = []    # 3D point group ids, each new batch of detected points is put in a separate group
    group_id = 0    # current 3D point group id
    
    rvecs, rvecs_keyfr = [], []
    tvecs, tvecs_keyfr = [], []
    
    
    # Start frame requires special treatment
    
    # Start frame : read image and detect 2D points
    imgs.append(cv2.imread(images[0]))
    base_img = imgs[0]
    imgs_gray.append(cv2.cvtColor(imgs[0], cv2.COLOR_BGR2GRAY))
    ret, new_imgp = cvh.extractChessboardFeatures(imgs[0], boardSize)
    if not ret:
        print "First image must contain the entire chessboard!"
        return
    
    # Start frame : define a priori 3D points
    objp = prepare_object_points(boardSize)
    objp_groups = np.zeros((objp.shape[0]), dtype=np.int)
    group_id += 1
    
    # Start frame : setup linking data-structures
    base_imgp = new_imgp    # 2D points
    all_idxs_tmp = np.arange(len(base_imgp))
    triangl_idxs = set(all_idxs_tmp)
    nontriangl_idxs = set()
    imgp_to_objp_idxs = np.array(sorted(triangl_idxs), dtype=int)
    
    # Start frame : get absolute pose
    ret, rvec, tvec = cv2.solvePnP(    # assume first frame is a proper frame with chessboard fully in-sight
            objp, new_imgp, cameraMatrix, distCoeffs )
    print "solvePnP reproj_error init:", reprojection_error(objp, new_imgp, rvec, tvec, cameraMatrix, distCoeffs)[0]
    rvecs.append(rvec)
    tvecs.append(tvec)
    rvec_keyfr = rvec
    tvec_keyfr = tvec
    rvecs_keyfr.append(rvec_keyfr)
    tvecs_keyfr.append(tvec_keyfr)
    
    # Start frame : add other points
    mask_img = keypoint_mask(new_imgp)
    to_add = target_amount_keypoints - len(new_imgp)
    imgp_extra = cv2.goodFeaturesToTrack(imgs_gray[0], to_add, corner_quality_level, corner_min_dist, None, mask_img).reshape((-1, 2))
    cv2.imshow("img", cv2.drawKeypoints(imgs[0], [cv2.KeyPoint(p[0],p[1], 7.) for p in imgp_extra], color=rgb(0,0,255)))
    cv2.waitKey()
    print "added:", len(imgp_extra)
    base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp = idxs_rebase_and_add_imgp(
            imgp_extra, base_imgp, new_imgp, imgp_to_objp_idxs, triangl_idxs, nontriangl_idxs, all_idxs_tmp )
    ret = 2    # indicate keyframe
        
    # Draw 3D points info of current frame
    print "Drawing composite 2D image"
    composite2D_painter.draw(imgs[0], rvec, tvec, ret,
                             cameraMatrix, distCoeffs, triangl_idxs, nontriangl_idxs, all_idxs_tmp, new_imgp, imgp_to_objp_idxs, objp, objp_groups, group_id, color_palette, color_palette_size)
    
    # Draw 3D points info of all frames
    print "Drawing composite 3D image    (keys: LEFT/RIGHT/UP/DOWN/PAGEUP/PAGEDOWN/HOME/END)"
    composite3D_painter.draw(rvec, tvec, ret,
                             triangl_idxs, imgp_to_objp_idxs, objp, objp_groups, color_palette, color_palette_size)
    
    for i in range(1, len(images)):
        # Frame[i-1] -> Frame[i]
        print "\nFrame[%s] -> Frame[%s]" % (i-1, i)
        print "    processing '", images[i], "':"
        cur_img = cv2.imread(images[i])
        imgs.append(cur_img)
        imgs_gray.append(cv2.cvtColor(imgs[-1], cv2.COLOR_BGR2GRAY))
        ret, base_imgp, new_imgp, triangl_idxs, nontriangl_idxs, imgp_to_objp_idxs, all_idxs_tmp, objp, objp_groups, group_id, rvec, tvec, rvec_keyfr, tvec_keyfr, base_img = \
                handle_new_frame(base_imgp, new_imgp, imgs[-2], imgs_gray[-2], imgs[-1], imgs_gray[-1], triangl_idxs, nontriangl_idxs, imgp_to_objp_idxs, all_idxs_tmp, objp, objp_groups, group_id, rvec_keyfr, tvec_keyfr, base_img)
        
        if ret:
            rvecs.append(rvec)
            tvecs.append(tvec)
            if ret == 2:    # frame is a keyframe
                rvecs_keyfr.append(rvec_keyfr)
                tvecs_keyfr.append(tvec_keyfr)
        else:    # frame rejected
            del imgs[-1]
            del imgs_gray[-1]
        
        # Draw 3D points info of current frame
        print "Drawing composite 2D image"
        composite2D_painter.draw(cur_img, rvec, tvec, ret,
                                 cameraMatrix, distCoeffs, triangl_idxs, nontriangl_idxs, all_idxs_tmp, new_imgp, imgp_to_objp_idxs, objp, objp_groups, group_id, color_palette, color_palette_size)
        
        # Draw 3D points info of all frames
        print "Drawing composite 3D image    (keys: LEFT/RIGHT/UP/DOWN/PAGEUP/PAGEDOWN)"
        composite3D_painter.draw(rvec, tvec, ret,
                                 triangl_idxs, imgp_to_objp_idxs, objp, objp_groups, color_palette, color_palette_size)
def triangl_pose_est_interactive(img_left, img_right, cameraMatrix, distCoeffs,
                                 imageSize, objp, boardSize, nonplanar_left,
                                 nonplanar_right):
    """ (TODO: remove debug-prints)
    Triangulation and relative pose estimation will be performed from LEFT to RIGHT image.
    
    Both images have to contain the whole chessboard,
    in order to make a decent estimation of the relative pose based on 'solvePnP'.
    
    Then the user can manually create matches between non-planar objects.
    If the user omits this step, only triangulation of the chessboard corners will be performed,
    and they will be compared to the real 3D points.
    Otherwise the coordinates of the triangulated points of the manually matched points will be printed,
    and a relative pose estimation will be performed (using the essential matrix),
    this pose estimation will be compared with the decent 'solvePnP' estimation.
    In that case you will be asked whether you want to mute the chessboard corners in all calculations,
    note that this requires at least 8 pairs of matches corresponding with non-planar geometry.
    
    During manual matching process,
    switching between LEFT and RIGHT image will be done in a zigzag fashion.
    To stop selecting matches, press SPACE.
    
    Additionally, you can also introduce predefined non-planar geometry,
    this is e.g. useful if you don't want to select non-planar geometry manually.
    """

    ### Setup initial state, and calc some very accurate poses to compare against with later

    K_left = cameraMatrix
    K_right = cameraMatrix
    K_inv_left = cvh.invert(K_left)
    K_inv_right = cvh.invert(K_right)
    distCoeffs_left = distCoeffs
    distCoeffs_right = distCoeffs

    # Extract chessboard features, together they form a set of 'planar' points
    ret_left, planar_left = cvh.extractChessboardFeatures(img_left, boardSize)
    ret_right, planar_right = cvh.extractChessboardFeatures(
        img_right, boardSize)
    if not ret_left or not ret_right:
        print("Chessboard is not (entirely) in sight, aborting.")
        return

    # Save exact 2D and 3D points of the chessboard
    num_planar = planar_left.shape[0]
    planar_left_orig, planar_right_orig = planar_left, planar_right
    objp_orig = objp

    # Calculate P matrix of left pose, in reality this one is known
    ret, rvec_left, tvec_left = cv2.solvePnP(objp, planar_left, K_left,
                                             distCoeffs_left)
    P_left = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_left), tvec_left)

    # Calculate P matrix of right pose, in reality this one is unknown
    ret, rvec_right, tvec_right = cv2.solvePnP(objp, planar_right, K_right,
                                               distCoeffs_right)
    P_right = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_right), tvec_right)

    # Load previous non-planar inliers, if desired
    if nonplanar_left.size and not raw_input(
            "Type 'no' if you don't want to use previous non-planar inliers? "
    ).strip().lower() == "no":
        nonplanar_left = map(tuple, nonplanar_left)
        nonplanar_right = map(tuple, nonplanar_left)
    else:
        nonplanar_left = []
        nonplanar_right = []

    ### Create predefined non-planar 3D geometry, to enable automatic selection of non-planar points

    if raw_input(
            "Type 'yes' if you want to create and select predefined non-planar geometry? "
    ).strip().lower() == "yes":

        # A cube
        cube_coords = np.array([(0., 0., 0.), (1., 0., 0.), (1., 1., 0.),
                                (0., 1., 0.), (0., 0., 1.), (1., 0., 1.),
                                (1., 1., 1.), (0., 1., 1.)])
        cube_coords *= 2  # scale
        cube_edges = np.array([(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6),
                               (6, 7), (7, 4), (0, 4), (1, 5), (2, 6), (3, 7)])

        # An 8-point circle
        s2 = 1. / np.sqrt(2)
        circle_coords = np.array([(1., 0., 0.), (s2, s2, 0.), (0., 1., 0.),
                                  (-s2, s2, 0.), (-1., 0., 0.), (-s2, -s2, 0.),
                                  (0., -1., 0.), (s2, -s2, 0.)])
        circle_edges = np.array([(i, (i + 1) % 8) for i in range(8)])

        # Position 2 cubes and 2 circles in the scene
        cube1 = np.array(cube_coords)
        cube1[:, 1] -= 1
        cube2 = np.array(cube_coords)
        cube2 = cvh.Rodrigues((0., 0., pi / 4)).dot(cube2.T).T
        cube2[:, 0] += 4
        cube2[:, 1] += 3
        cube2[:, 2] += 2
        circle1 = np.array(circle_coords)
        circle1 *= 2
        circle1[:, 1] += 5
        circle1[:, 2] += 2
        circle2 = np.array(circle_coords)
        circle2 = cvh.Rodrigues((pi / 2, 0., 0.)).dot(circle2.T).T
        circle2[:, 1] += 5
        circle2[:, 2] += 2

        # Print output to be used in Blender (see "calibrate_pose_visualization.blend")
        print()
        print("Cubes")
        print("edges_cube = \\\n", map(list, cube_edges))
        print("coords_cube1 = \\\n", map(list, cube1))
        print("coords_cube2 = \\\n", map(list, cube2))
        print()
        print("Circles")
        print("edges_circle = \\\n", map(list, circle_edges))
        print("coords_circle1 = \\\n", map(list, circle1))
        print("coords_circle2 = \\\n", map(list, circle2))
        print()

        color = rgb(0, 200, 150)
        for verts, edges in zip(
            [cube1, cube2, circle1, circle2],
            [cube_edges, cube_edges, circle_edges, circle_edges]):
            out_left = cvh.wireframe3DGeometry(img_left, verts, edges, color,
                                               rvec_left, tvec_left, K_left,
                                               distCoeffs_left)
            out_right = cvh.wireframe3DGeometry(img_right, verts, edges, color,
                                                rvec_right, tvec_right,
                                                K_right, distCoeffs_right)
            valid_match_idxs = [
                i for i, (pl, pr) in enumerate(zip(out_left, out_right))
                if 0 <= min(pl[0], pr[0]) <= max(pl[0], pr[0]) < imageSize[0]
                and 0 <= min(pl[1], pr[1]) <= max(pl[1], pr[1]) < imageSize[1]
            ]
            nonplanar_left += map(tuple,
                                  out_left[valid_match_idxs])  # concatenate
            nonplanar_right += map(tuple,
                                   out_right[valid_match_idxs])  # concatenate

        nonplanar_left = np.rint(nonplanar_left).astype(int)
        nonplanar_right = np.rint(nonplanar_right).astype(int)

    ### User can manually create matches between non-planar objects

    class ManualMatcher:
        def __init__(self, window_name, images, points):
            self.window_name = window_name
            self.images = images
            self.points = points
            self.img_idx = 0  # 0: left; 1: right
            cv2.namedWindow(window_name)
            cv2.setMouseCallback(window_name, self.onMouse)

        def onMouse(self, event, x, y, flags, userdata):
            if event == cv2.EVENT_LBUTTONDOWN:
                self.points[self.img_idx].append((x, y))

            elif event == cv2.EVENT_LBUTTONUP:
                # Switch images in a ping-pong fashion
                if len(self.points[0]) != len(self.points[1]):
                    self.img_idx = 1 - self.img_idx

        def run(self):
            print("Select your matches. Press SPACE when done.")
            while True:
                img = cv2.drawKeypoints(self.images[self.img_idx], [
                    cv2.KeyPoint(p[0], p[1], 7.)
                    for p in self.points[self.img_idx]
                ],
                                        color=rgb(0, 0, 255))
                cv2.imshow(self.window_name, img)
                key = cv2.waitKey(50) & 0xFF
                if key == ord(' '): break  # finish if SPACE is pressed

            num_points_diff = len(self.points[0]) - len(self.points[1])
            if num_points_diff:
                del self.points[num_points_diff < 0][-abs(num_points_diff):]
            print("Selected", len(self.points[0]), "pairs of matches.")

    # Execute the manual matching
    nonplanar_left, nonplanar_right = list(nonplanar_left), list(
        nonplanar_right)
    ManualMatcher("Select match-points of non-planar objects",
                  [img_left, img_right],
                  [nonplanar_left, nonplanar_right]).run()
    num_nonplanar = len(nonplanar_left)
    has_nonplanar = (num_nonplanar > 0)
    if 0 < num_nonplanar < 8:
        print("Warning: you've selected less than 8 pairs of matches.")
    nonplanar_left = np.array(nonplanar_left).reshape(num_nonplanar, 2)
    nonplanar_right = np.array(nonplanar_right).reshape(num_nonplanar, 2)

    mute_chessboard_corners = False
    if num_nonplanar >= 8 and raw_input(
            "Type 'yes' if you want to exclude chessboard corners from calculations? "
    ).strip().lower() == "yes":
        mute_chessboard_corners = True
        num_planar = 0
        planar_left = np.zeros((0, 2))
        planar_right = np.zeros((0, 2))
        objp = np.zeros((0, 3))
        print("Chessboard corners muted.")

    has_prev_triangl_points = not mute_chessboard_corners  # normally in this example, this should always be True, unless user forces False

    ### Undistort points => normalized coordinates

    allfeatures_left = np.concatenate((planar_left, nonplanar_left))
    allfeatures_right = np.concatenate((planar_right, nonplanar_right))

    allfeatures_nrm_left = cv2.undistortPoints(np.array([allfeatures_left]),
                                               K_left, distCoeffs_left)[0]
    allfeatures_nrm_right = cv2.undistortPoints(np.array([allfeatures_right]),
                                                K_right, distCoeffs_right)[0]

    planar_nrm_left, nonplanar_nrm_left = allfeatures_nrm_left[:planar_left.shape[
        0]], allfeatures_nrm_left[planar_left.shape[0]:]
    planar_nrm_right, nonplanar_nrm_right = allfeatures_nrm_right[:planar_right.shape[
        0]], allfeatures_nrm_right[planar_right.shape[0]:]

    ### Calculate relative pose using the essential matrix

    # Only do pose estimation if we've got 2D points of non-planar geometry
    if has_nonplanar:

        # Determine inliers by calculating the fundamental matrix on all points,
        # except when mute_chessboard_corners is True: only use nonplanar_nrm_left and nonplanar_nrm_right
        F, status = cv2.findFundamentalMat(
            allfeatures_nrm_left, allfeatures_nrm_right, cv2.FM_RANSAC,
            0.006 * np.amax(allfeatures_nrm_left),
            0.99)  # threshold from [Snavely07 4.1]
        # OpenCV BUG: "status" matrix is not initialized with zeros in some cases, reproducable with 2 sets of 8 2D points equal to (0., 0.)
        #   maybe because "_mask.create()" is not called:
        #   https://github.com/Itseez/opencv/blob/7e2bb63378dafb90063af40caff20c363c8c9eaf/modules/calib3d/src/ptsetreg.cpp#L185
        # Workaround to test on outliers: use "!= 1", instead of "== 0"
        print(status.T)
        inlier_idxs = np.where(status == 1)[0]
        print("Removed", allfeatures_nrm_left.shape[0] - inlier_idxs.shape[0],
              "outlier(s).")
        num_planar = np.where(inlier_idxs < num_planar)[0].shape[0]
        num_nonplanar = inlier_idxs.shape[0] - num_planar
        print("num chessboard inliers:", num_planar)
        allfeatures_left, allfeatures_right = allfeatures_left[
            inlier_idxs], allfeatures_right[inlier_idxs]
        allfeatures_nrm_left, allfeatures_nrm_right = allfeatures_nrm_left[
            inlier_idxs], allfeatures_nrm_right[inlier_idxs]
        if not mute_chessboard_corners:
            objp = objp[inlier_idxs[:num_planar]]
            planar_left, planar_right = allfeatures_left[:
                                                         num_planar], allfeatures_right[:
                                                                                        num_planar]
            planar_nrm_left, planar_nrm_right = allfeatures_nrm_left[:
                                                                     num_planar], allfeatures_nrm_right[:
                                                                                                        num_planar]
        nonplanar_nrm_left, nonplanar_nrm_right = allfeatures_nrm_left[
            num_planar:], allfeatures_nrm_right[num_planar:]

        # Calculate first solution of relative pose
        if allfeatures_nrm_left.shape[0] >= 8:
            F, status = cv2.findFundamentalMat(allfeatures_nrm_left,
                                               allfeatures_nrm_right,
                                               cv2.FM_8POINT)
            print("F:")
            print(F)
        else:
            print(
                "Error: less than 8 pairs of inliers found, I can't perform the 8-point algorithm."
            )
        #E = (K_right.T) .dot (F) .dot (K_left)    # "Multiple View Geometry in CV" by Hartley&Zisserman (9.12)
        E = F  # K = I because we already normalized the coordinates
        print("Correct determinant of essential matrix?",
              (abs(cv2.determinant(E)) <= 1e-7))
        w, u, vt = cv2.SVDecomp(E, flags=cv2.SVD_MODIFY_A)
        print(w)
        if ((w[0] < w[1] and w[0] / w[1]) or
            (w[1] < w[0] and w[1] / w[0]) or 0) < 0.7:
            print(
                "Essential matrix' 'w' vector deviates too much from expected")
        W = np.array([
            [0., -1., 0.],  # Hartley&Zisserman (9.13)
            [1., 0., 0.],
            [0., 0., 1.]
        ])
        R = (u).dot(W).dot(vt)  # Hartley&Zisserman result 9.19
        det_R = cv2.determinant(R)
        print("Coherent rotation?", (abs(det_R) - 1 <= 1e-7))
        if det_R - (
                -1
        ) < 1e-7:  # http://en.wikipedia.org/wiki/Essential_matrix#Showing_that_it_is_valid
            # E *= -1:
            vt *= -1  # svd(-E) = u * w * (-v).T
            R *= -1  # => u * W * (-v).T = -R
            print("det(R) == -1, compensated.")
        t = u[:, 2:3]  # Hartley&Zisserman result 9.19
        P = trfm.P_from_R_and_t(R, t)

        # Select right solution where the (center of the) 3d points are/is in front of both cameras,
        # when has_prev_triangl_points == True, we can do it a lot faster.

        test_point = np.ones((4, 1))  # 3D point to test the solutions with

        if has_prev_triangl_points:
            print("Using advantage of already triangulated points' position")
            print(P)

            # Select the closest already triangulated cloudpoint idx to the center of the cloud
            center_of_mass = objp.sum(axis=0) / objp.shape[0]
            center_objp_idx = np.argmin(
                ((objp - center_of_mass)**2).sum(axis=1))
            print("center_objp_idx:", center_objp_idx)

            # Select the corresponding image points
            center_imgp_left = planar_nrm_left[center_objp_idx]
            center_imgp_right = planar_nrm_right[center_objp_idx]

            # Select the corresponding 3D point
            test_point[0:3, :] = objp[center_objp_idx].reshape(3, 1)
            print("test_point:")
            print(test_point)
            test_point = P_left.dot(
                test_point
            )  # set the reference axis-system to the one of the left camera, note that are_points_in_front_of_left_camera is automatically True

            center_objp_triangl, triangl_status = iterative_LS_triangulation(
                center_imgp_left.reshape(1, 2), np.eye(4),
                center_imgp_right.reshape(1, 2), P)
            print("triangl_status:", triangl_status)

            if (center_objp_triangl).dot(test_point[0:3, 0:1]) < 0:
                P[0:3, 3:4] *= -1  # do a baseline reversal
                print(P, "fixed triangulation inversion")

            if P[0:3, :].dot(test_point)[
                    2, 0] < 0:  # are_points_in_front_of_right_camera is False
                P[0:3, 0:3] = (u).dot(W.T).dot(
                    vt)  # use the other solution of the twisted pair ...
                P[0:3, 3:4] *= -1  # ... and also do a baseline reversal
                print(P, "fixed camera projection inversion")

        elif num_nonplanar > 0:
            print(
                "Doing all ambiguity checks since there are no already triangulated points"
            )
            print(P)

            for i in range(4):  # check all 4 solutions
                objp_triangl, triangl_status = iterative_LS_triangulation(
                    nonplanar_nrm_left, np.eye(4), nonplanar_nrm_right, P)
                print("triangl_status:", triangl_status)
                center_of_mass = objp_triangl.sum(axis=0) / len(
                    objp_triangl
                )  # select the center of the triangulated cloudpoints
                test_point[0:3, :] = center_of_mass.reshape(3, 1)
                print("test_point:")
                print(trfm.P_inv(P_left).dot(test_point))

                if np.eye(3, 4).dot(test_point)[2, 0] > 0 and P[0:3, :].dot(
                        test_point
                )[2, 0] > 0:  # are_points_in_front_of_cameras is True
                    break

                if i % 2:
                    P[0:3, 0:3] = (u).dot(W.T).dot(
                        vt)  # use the other solution of the twisted pair
                    print(P, "using the other solution of the twisted pair")

                else:
                    P[0:3, 3:4] *= -1  # do a baseline reversal
                    print(P, "doing a baseline reversal")

        are_points_in_front_of_left_camera = (np.eye(
            3, 4).dot(test_point)[2, 0] > 0)
        are_points_in_front_of_right_camera = (P[0:3, :].dot(test_point)[2, 0]
                                               > 0)
        print("are_points_in_front_of_cameras?",
              are_points_in_front_of_left_camera,
              are_points_in_front_of_right_camera)
        if not (are_points_in_front_of_left_camera
                and are_points_in_front_of_right_camera):
            print("No valid solution found!")

        P_left_result = trfm.P_inv(P).dot(P_right)
        P_right_result = P.dot(P_left)

        print("P_left")
        print(P_left)
        print("P_rel")
        print(P)
        print("P_right")
        print(P_right)
        print(
            "=> error:",
            reprojection_error([objp_orig] * 2,
                               [planar_left_orig, planar_right_orig],
                               cameraMatrix, distCoeffs,
                               [rvec_left, rvec_right],
                               [tvec_left, tvec_right])[1])

        print("P_left_result")
        print(P_left_result)
        print("P_right_result")
        print(P_right_result)
        # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
        print(
            "=> error:",
            reprojection_error([objp_orig] * 2,
                               [planar_left_orig, planar_right_orig],
                               cameraMatrix, distCoeffs, [
                                   cvh.Rodrigues(P_left[0:3, 0:3]),
                                   cvh.Rodrigues(P_right_result[0:3, 0:3])
                               ], [P_left[0:3, 3], P_right_result[0:3, 3]])[1])

    ### Triangulate

    objp_result = np.zeros((0, 3))

    if has_prev_triangl_points:
        # Do triangulation of all points
        # NOTICE: in a real case, we should only use not yet triangulated points that are in sight
        objp_result, triangl_status = iterative_LS_triangulation(
            allfeatures_nrm_left, P_left, allfeatures_nrm_right, P_right)
        print("triangl_status:", triangl_status)

    elif num_nonplanar > 0:
        # We already did the triangulation during the pose estimation, but we still need to backtransform them from the left camera axis-system
        objp_result = trfm.P_inv(P_left).dot(
            np.concatenate((objp_triangl.T, np.ones((1, len(objp_triangl))))))
        objp_result = objp_result[0:3, :].T
        print(objp_triangl)

    print("objp:")
    print(objp)
    print(
        "=> error:",
        reprojection_error([objp_orig] * 2,
                           [planar_left_orig, planar_right_orig], cameraMatrix,
                           distCoeffs, [rvec_left, rvec_right],
                           [tvec_left, tvec_right])[1])

    print("objp_result of chessboard:")
    print(objp_result[:num_planar, :])
    if has_nonplanar:
        print("objp_result of non-planar geometry:")
        print(objp_result[num_planar:, :])
    if num_planar + num_nonplanar == 0:
        print("=> error: undefined")
    else:
        print(
            "=> error:",
            reprojection_error([objp_result] * 2,
                               [allfeatures_left, allfeatures_right],
                               cameraMatrix, distCoeffs,
                               [rvec_left, rvec_right],
                               [tvec_left, tvec_right])[1])

    ### Print total combined reprojection error

    # We only have both pose estimation and triangulation if we've got 2D points of non-planar geometry
    if has_nonplanar:
        if num_planar + num_nonplanar == 0:
            print("=> error: undefined")
        else:
            # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
            print(
                "Total combined error:",
                reprojection_error(
                    [objp_result] * 2, [allfeatures_left, allfeatures_right],
                    cameraMatrix, distCoeffs, [
                        cvh.Rodrigues(P_left[0:3, 0:3]),
                        cvh.Rodrigues(P_right_result[0:3, 0:3])
                    ], [P_left[0:3, 3], P_right_result[0:3, 3]])[1])
    """
    Further things we can do (in a real case):
        1. solvePnP() on inliers (including previously already triangulated points),
            or should we use solvePnPRansac() (in that case what to do with the outliers)?
        2. re-triangulate on new pose estimation
    """

    ### Print summary to be used in Blender to visualize (see "calibrate_pose_visualization.blend")

    print("Camera poses:")
    if has_nonplanar:
        print("Left")
        print_pose(rvec_left, tvec_left)
        print()
        print("Left_result")
        print_pose(cvh.Rodrigues(P_left_result[0:3, 0:3]), P_left_result[0:3,
                                                                         3:4])
        print()
        print("Right")
        print_pose(rvec_right, tvec_right)
        print()
        print("Right_result")
        print_pose(cvh.Rodrigues(P_right_result[0:3, 0:3]),
                   P_right_result[0:3, 3:4])
        print()
    else:
        print("<skipped: no non-planar objects have been selected>")
        print()

    print("Points:")
    print("Chessboard")
    print("coords = \\\n", map(list, objp_result[:num_planar, :]))
    print()
    if has_nonplanar:
        print("Non-planar geometry")
        print("coords_nonplanar = \\\n", map(list,
                                             objp_result[num_planar:, :]))
        print()

    ### Return to remember last manually matched successful non-planar imagepoints
    return nonplanar_left, nonplanar_right
Example #20
0
# -*- coding: utf-8 -*-
from __future__ import print_function  # Python 3 compatibility

import os
import numpy as np
import glob
import cv2

import sys
sys.path.append(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "..",
                 "python_libs"))
import cv2_helpers as cvh
import calibration_tools

blue = cvh.rgb(0, 0, 255)

# Tweaking parameters
max_OF_error = 12.
max_radius_OF_to_FAST = {}
max_dist_ratio = {}
allow_chessboard_matcher_and_refiner = True

# optimized for chessboard features
max_radius_OF_to_FAST[
    "chessboard"] = 4.  # FAST detects points *around* instead of *on* corners
max_dist_ratio["chessboard"] = 1.  # disable ratio test

# optimized for FAST features
max_radius_OF_to_FAST["FAST"] = 2.
max_dist_ratio["FAST"] = 0.7