コード例 #1
0
def calcJm(img, H):
    h, w, c = img.shape
    skysample = np.zeros((sum(H), 3))
    gndsample = np.zeros((w * h - sum(H), 3))
    ks = 0
    kg = 0
    for x in range(w):
        for y in range(H[x]):
            skysample[ks, 0] = img[y, x, 0]
            skysample[ks, 1] = img[y, x, 1]
            skysample[ks, 2] = img[y, x, 2]
            ks = ks + 1
        for y in range(h - H[x]):
            gndsample[kg, 0] = img[y + H[x], x, 0]
            gndsample[kg, 1] = img[y + H[x], x, 1]
            gndsample[kg, 2] = img[y + H[x], x, 2]
            kg = kg + 1
    skycov, skymean = cv2.calcCovarMatrix(skysample,
                                          cv2.COVAR_ROWS | cv2.COVAR_NORMAL)
    gndcov, gndmean = cv2.calcCovarMatrix(gndsample,
                                          cv2.COVAR_ROWS | cv2.COVAR_NORMAL)
    skyret, skyeigval, skyeigvec = cv2.eigen(skycov, 1)
    gndret, gndeigval, gndeigvec = cv2.eigen(gndcov, 1)
    skyeigvalsum = sum(skyeigval)
    gndeigvalsum = sum(gndeigval)
    skydet = cv2.determinant(skycov)
    gnddet = cv2.determinant(gndcov)
    Jm = 1.0 / (skydet + gnddet + skyeigvalsum * skyeigvalsum +
                gndeigvalsum * gndeigvalsum)
    return (Jm)
コード例 #2
0
def calculate_sky_energy(border, src_image):

    # 制作天空图像掩码和地面图像掩码
    sky_mask = make_sky_mask(src_image, border, 1)
    ground_mask = make_sky_mask(src_image, border, 0)

    # 扣取天空图像和地面图像
    sky_image_ma = np.ma.array(src_image, mask = cv2.cvtColor(sky_mask, cv2.COLOR_GRAY2BGR))
    ground_image_ma = np.ma.array(src_image, mask = cv2.cvtColor(ground_mask, cv2.COLOR_GRAY2BGR))

    # 计算天空和地面图像协方差矩阵
    sky_image = sky_image_ma.compressed()
    ground_image = ground_image_ma.compressed()

    sky_image.shape = (sky_image.size//3, 3)
    ground_image.shape = (ground_image.size//3, 3)

    sky_covar, sky_mean = cv2.calcCovarMatrix(sky_image, mean=None, flags=cv2.COVAR_ROWS|cv2.COVAR_NORMAL|cv2.COVAR_SCALE)
    sky_retval, sky_eig_val, sky_eig_vec = cv2.eigen(sky_covar)

    ground_covar, ground_mean = cv2.calcCovarMatrix(ground_image, mean=None,flags=cv2.COVAR_ROWS|cv2.COVAR_NORMAL|cv2.COVAR_SCALE)
    ground_retval, ground_eig_val, ground_eig_vec = cv2.eigen(ground_covar)

    gamma = 2  # 论文原始参数

    sky_det = cv2.determinant(sky_covar)
    #sky_eig_det = cv2.determinant(sky_eig_vec)
    ground_det = cv2.determinant(ground_covar)
    #ground_eig_det = cv2.determinant(ground_eig_vec)

    sky_energy = 1 / ((gamma * sky_det + ground_det) + (gamma * sky_eig_val[0,0] + ground_eig_val[0,0]))

    return sky_energy
コード例 #3
0
def projectionMatrix(camera_parameters, homography):
    #Matrice des paramètres extrinsèques
    M = np.dot(np.linalg.inv(K), homography)

    _lambda1 = np.linalg.norm(M[:, 0])
    _lambda2 = np.linalg.norm(M[:, 1])
    _lambda = (_lambda1 + _lambda2) / np.float32(2)

    M[:, 0] = M[:, 0] / _lambda1
    M[:, 1] = M[:, 1] / _lambda2
    t = M[:, 2] / _lambda
    R = np.c_[M[:, 0], M[:, 1], np.cross(M[:, 0], M[:, 1])]
    #R = np.c_[np.cross(R[:, 1], R[:, 2]), R[:, 1], R[:, 2]]

    if cv2.determinant(R) < 0:
        R[:, 2] = R[:, 2] * (-1)

    W, U, Vt = cv2.SVDecomp(R)
    R = U.dot(Vt)

    extr = np.float32([[R[0, 0], R[0, 1], R[0, 2], t[0]],
                       [R[1, 0], R[1, 1], R[1, 2], t[1]],
                       [R[2, 0], R[2, 1], R[2, 2], t[2]]])

    return np.dot(camera_parameters, extr)
コード例 #4
0
def H_from_E(E, RandT=False):
    '''Returns a 4x4x4 matrix of possible H translations.
    Or returns the two rotations and translation vectors when keyword is True.
    '''
    S,U,V = cv2.SVDecomp(E)
    #TIP: Recover E by dot(U,dot(diag(S.flatten()),V))
    W = array([[0,-1,0],[ 1,0,0],[0,0,1]])

    R1 = dot(dot(U,W),V)
    R2 = dot(dot(U,W.T),V)
    if cv2.determinant(R1) < 0:
        R1,R2 = -R1,-R2
    t1 = U[:,2]
    t2 = -t1

    if RandT:
        return R1, R2, t1, t2

    H = zeros((4,4,4))
    H[:2,:3,:3] = R1
    H[2:,:3,:3] = R2
    H[[0,2],:3,3] = t1
    H[[1,3],:3,3] = t2
    H[:,3,3] = 1

    return H
コード例 #5
0
def kabsch(P, Q, calcTranslation=False, centP=None, centQ=None):
    '''
    Perform Kabsch's algorithm and return optimal rotation and translation
    '''
    # Pt * Q
    cov_mat = np.dot(np.transpose(flatten(P)), flatten(Q))

    d, u, vt = cv2.SVDecomp(cov_mat)

    rot = np.dot(np.transpose(vt), np.transpose(u))
    v = np.transpose(vt)

    # Check and fix special reflection case
    if cv2.determinant(rot) < 0:
        # rot[:, 2] = np.multiply(rot[:, 2], -1)
        v[:, 2] = np.multiply(v[:, 2], -1)
        rot = np.dot(v, np.transpose(u))

    t = None

    if calcTranslation:
        t = np.dot(-rot, np.transpose(centP)) + np.transpose(centQ)

    # return rotation ans translation matrixes
    return rot, t
コード例 #6
0
def H_from_E(E, RandT=False):
    '''Returns a 4x4x4 matrix of possible H translations.
    Or returns the two rotations and translation vectors when keyword is True.
    '''
    S,U,V = cv2.SVDecomp(E)
    #TIP: Recover E by dot(U,dot(diag(S.flatten()),V))
    W = array([[0,-1,0],[1,0,0],[0,0,1]])

    R1 = dot(dot(U,W),V)
    R2 = dot(dot(U,W.T),V)
    if cv2.determinant(R1) < 0:
        R1,R2 = -R1,-R2
    t1 = U[:,2]
    t2 = -t1

    if RandT:
        return R1, R2, t1, t2

    H = zeros((4,4,4))
    H[:2,:3,:3] = R1
    H[2:,:3,:3] = R2
    H[[0,2],:3,3] = t1
    H[[1,3],:3,3] = t2
    H[:,3,3] = 1

    return H
コード例 #7
0
    def harris_response(self, H):
        # check if we are in this method
        if self.flag == 1:
            print '>> harris_response'
            self.flag = 0

        trace = cv2.trace(H)
        response = cv2.determinant(H) - 0.04 * np.power(trace[0], 2)
        return response
コード例 #8
0
def calc_harris_value_matrix(img, alpha=0.04):
    grad_x, grad_y = grad(img)
    i_x_2 = cv2.GaussianBlur(grad_x ** 2, (5, 5), 0)
    i_y_2 = cv2.GaussianBlur(grad_y ** 2, (5, 5), 0)
    i_x_y = cv2.GaussianBlur(grad_x * grad_y, (5, 5), 0)

    r = np.empty(img.shape)
    for (i, j), _ in np.ndenumerate(r):
        moment_list = [i_x_2[i, j], i_x_y[i, j], i_x_y[i, j], i_y_2[i, j]]
        moment = np.array(moment_list).reshape(2, 2)
        r[i, j] = cv2.determinant(moment) - alpha * cv2.trace(moment)[0] ** 2
    return r
コード例 #9
0
ファイル: tobrachmann.py プロジェクト: paroj/linemod_dataset
def lm2brach(R, t):
    """
    convert linemod to brachmann pose format
    """
    R = t_LM2cv.dot(R)
    t = t_LM2cv.dot(t)

    # result may be reconstructed behind the camera
    if cv2.determinant(R) < 0:
        R *= -1
        t *= -1

    return t_cv2brach.dot(R.T).T, t  # TODO why dont we transfrom t as well?
コード例 #10
0
    def fixedCvInvert(self, H):
        """[If the determinate is zero, OpenCV returns a wrong inverted matrix. ]

        Args:
            H ([type]): [3x3 Matrix]

        Returns:
            [type]: [Inverted matrix]
        """
        if (cv2.determinant(H) != 0.0):
            return cv2.invert(H)[1]
        else:
            return np.array([[1., 0., -H[0, -1]], [0., 1., -H[1, -1]],
                             [0., 0., 0.]])
コード例 #11
0
    def __call__(self, img):
        img_pad, img_norm, pad = self.preprocess_img(img)
        
        list_sources, list_keypoints = self.detect_hand(img_norm)
        if list_sources is None:
            return None, None

        list_joints = []
        list_bbox = []
        for i in range(len(list_sources)):

            source = list_sources[i]
            keypoints = list_keypoints[i]

            # calculating transformation from img_pad coords
            # to img_landmark coords (cropped hand image)
            scale = max(img.shape) / 256
            Mtr = cv2.getAffineTransform(
                source * scale,
                self._target_triangle
            )

            img_landmark = cv2.warpAffine(
                self._im_normalize(img_pad), Mtr, (256,256)
            )
        
            joints = self.predict_joints(img_landmark)
        
            # adding the [0,0,1] row to make the matrix square
            Mtr = self._pad1(Mtr.T).T
            Mtr[2,:2] = 0
            
            Minv = np.linalg.inv(Mtr)

            # projecting 2d keypoints back into original image coordinate space
            kp_orig = (self._pad1(joints[:,:2]) @ Minv.T)[:,:2]
            box_orig = (self._target_box @ Minv.T)[:,:2]
            kp_orig -= pad[::-1]
            box_orig -= pad[::-1]

            joints[:,:2] = kp_orig
            if joints.shape[1] == 3:
                # scale Z coordinate proportionally
                joints[:,2] = joints[:,2] * np.sqrt(cv2.determinant(Minv))

            list_joints.append(joints)
            list_bbox.append(box_orig)
            
        return list_joints, list_bbox
コード例 #12
0
    def runLoop(self, files, paths):
        """
            Process input frame-by-frame
        """

        logger.info(' * Analysing image sequence')

        if len(paths) == 0:
            logger.warn(' Empty sequence, nothing to be done')
            return
        
        # sequence of frames
        frames = []
        for (i, path) in enumerate(paths):

            logger.info('')
            logger.info(' * frame {} : {}'.format(i, path) )

            # set up frame object
            frame = Frame(index=i, name=files[i])
            frames.append(frame)

            # load frame image
            frame.image = util_cv.load_image(path)
            frame.image_gray = cv2.cvtColor(frame.image, cv2.COLOR_BGR2GRAY)

            # detect and compute features
            frame.keypoints, frame.descriptors = self.computeFeatures(image=frame.image_gray)

            # match feature descriptors
            frame.matches = self.matchFeatures(frame.descriptors)

            # debug features
            if self.args.debug:
                debugImage = util_cv.drawMatches(self.im_reference, self.keypoints_ref, frame.image, frame.keypoints, frame.matches)
                util_cv.display_image(debugImage, title='DEBUG')

            ## compute homography from the reference object to the scene
            frame.homography = self.computeHomography(self.keypoints_ref, frame.keypoints, frame.matches)

            # debug homography 
            if self.args.debug:
                debugImage = frame.image.copy()
                util_cv.draw_homography(self.im_reference, frame.homography.astype('float32'), debugImage)
                debugImage = util_cv.drawMatches(self.im_reference, self.keypoints_ref, debugImage, frame.keypoints, frame.matches)
                util_cv.display_image(debugImage, title='DEBUG')


            ## check if homography is bad
            bad_homo = False
            # If the det of the upper 2x2 matrix of the homography is smaller than 0, it is not orientation-preserving.
            # src: http://answers.opencv.org/question/2588/check-if-homography-is-good/
            # This filters out some bad frames, although it's not a perfect criteria (tested with the bad results using BruteForceMatcher)
            det2x2 = cv2.determinant(frame.homography[0:2,0:2])
            if det2x2 < 0:
                bad_homo = True

            # This criteria can be found in the image stitching pipeline of opencv...
            det3x3 = abs(cv2.determinant(frame.homography))
            if det3x3 < 0.00001:#sys.float_info.epsilon:
                bad_homo = True

            if bad_homo:
                
                # homography is useless, just show the original picture
                frame.im_replacemt = frame.image.copy()

            else:

                # warp replacement image and merge with the frame
                h,w = frame.image.shape[:2]
                im_warped = cv2.warpPerspective(self.im_replacemt, frame.homography, dsize=(w,h))
                im_replacemt = numpy.ma.array(frame.image, mask=im_warped > 0, fill_value=0).filled()
                im_replacemt = cv2.bitwise_or(im_replacemt, im_warped)
                frame.im_replacemt = im_replacemt


            # debug result
            if self.args.debug:
                color = (255,0,0)
                if det2x2 < 0 or det3x3 < 0.00001:
                    color = (0,0,255)
                cv2.putText(frame.im_replacemt, 'Frame {}: {}'.format(i, os.path.basename(path)), (0,25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0))
                cv2.putText(frame.im_replacemt, 'det2x2 = {}'.format(det2x2), (0,50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
                cv2.putText(frame.im_replacemt, 'det3x3 = {}'.format(det3x3), (0,75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
                util_cv.display_images([frame.image, frame.im_replacemt], title='Processed Frame')

            # write image
            if self.args.output:
                output = path.replace(self.args.input, self.args.output)
                logger.info( ' * Write {}'.format(output) )
                cv2.imwrite( output, frame.im_replacemt )

            # clear memory
            frames.pop(0)
コード例 #13
0
def triangl_pose_est_interactive(img_left, img_right, cameraMatrix, distCoeffs, imageSize, objp, boardSize, nonplanar_left, nonplanar_right):
    """ (TODO: remove debug-prints)
    Triangulation and relative pose estimation will be performed from LEFT to RIGHT image.
    
    Both images have to contain the whole chessboard,
    in order to make a decent estimation of the relative pose based on 'solvePnP'.
    
    Then the user can manually create matches between non-planar objects.
    If the user omits this step, only triangulation of the chessboard corners will be performed,
    and they will be compared to the real 3D points.
    Otherwise the coordinates of the triangulated points of the manually matched points will be printed,
    and a relative pose estimation will be performed (using the essential matrix),
    this pose estimation will be compared with the decent 'solvePnP' estimation.
    In that case you will be asked whether you want to mute the chessboard corners in all calculations,
    note that this requires at least 8 pairs of matches corresponding with non-planar geometry.
    
    During manual matching process,
    switching between LEFT and RIGHT image will be done in a zigzag fashion.
    To stop selecting matches, press SPACE.
    
    Additionally, you can also introduce predefined non-planar geometry,
    this is e.g. useful if you don't want to select non-planar geometry manually.
    """
    
    ### Setup initial state, and calc some very accurate poses to compare against with later
    
    K_left = cameraMatrix
    K_right = cameraMatrix
    K_inv_left = cvh.invert(K_left)
    K_inv_right = cvh.invert(K_right)
    distCoeffs_left = distCoeffs
    distCoeffs_right = distCoeffs
    
    # Extract chessboard features, together they form a set of 'planar' points
    ret_left, planar_left = cvh.extractChessboardFeatures(img_left, boardSize)
    ret_right, planar_right = cvh.extractChessboardFeatures(img_right, boardSize)
    if not ret_left or not ret_right:
        print ("Chessboard is not (entirely) in sight, aborting.")
        return
    
    # Save exact 2D and 3D points of the chessboard
    num_planar = planar_left.shape[0]
    planar_left_orig, planar_right_orig = planar_left, planar_right
    objp_orig = objp
    
    # Calculate P matrix of left pose, in reality this one is known
    ret, rvec_left, tvec_left = cv2.solvePnP(
            objp, planar_left, K_left, distCoeffs_left )
    P_left = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_left), tvec_left)
    
    # Calculate P matrix of right pose, in reality this one is unknown
    ret, rvec_right, tvec_right = cv2.solvePnP(
            objp, planar_right, K_right, distCoeffs_right )
    P_right = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_right), tvec_right)
    
    # Load previous non-planar inliers, if desired
    if nonplanar_left.size and not raw_input("Type 'no' if you don't want to use previous non-planar inliers? ").strip().lower() == "no":
        nonplanar_left = map(tuple, nonplanar_left)
        nonplanar_right = map(tuple, nonplanar_left)
    else:
        nonplanar_left = []
        nonplanar_right = []
    
    
    ### Create predefined non-planar 3D geometry, to enable automatic selection of non-planar points
    
    if raw_input("Type 'yes' if you want to create and select predefined non-planar geometry? ").strip().lower() == "yes":
        
        # A cube
        cube_coords = np.array([(0., 0., 0.), (1., 0., 0.), (1., 1., 0.), (0., 1., 0.),
                                (0., 0., 1.), (1., 0., 1.), (1., 1., 1.), (0., 1., 1.)])
        cube_coords *= 2    # scale
        cube_edges = np.array([(0, 1), (1, 2), (2, 3), (3, 0),
                               (4, 5), (5, 6), (6, 7), (7, 4),
                               (0, 4), (1, 5), (2, 6), (3, 7)])
        
        # An 8-point circle
        s2 = 1. / np.sqrt(2)
        circle_coords = np.array([( 1.,  0., 0.), ( s2,  s2, 0.),
                                  ( 0.,  1., 0.), (-s2,  s2, 0.),
                                  (-1.,  0., 0.), (-s2, -s2, 0.),
                                  ( 0., -1., 0.), ( s2, -s2, 0.)])
        circle_edges = np.array([(i, (i+1) % 8) for i in range(8)])
        
        # Position 2 cubes and 2 circles in the scene
        cube1 = np.array(cube_coords)
        cube1[:, 1] -= 1
        cube2 = np.array(cube_coords)
        cube2 = cvh.Rodrigues((0., 0., pi/4)).dot(cube2.T).T
        cube2[:, 0] += 4
        cube2[:, 1] += 3
        cube2[:, 2] += 2
        circle1 = np.array(circle_coords)
        circle1 *= 2
        circle1[:, 1] += 5
        circle1[:, 2] += 2
        circle2 = np.array(circle_coords)
        circle2 = cvh.Rodrigues((pi/2, 0., 0.)).dot(circle2.T).T
        circle2[:, 1] += 5
        circle2[:, 2] += 2
        
        # Print output to be used in Blender (see "calibrate_pose_visualization.blend")
        print ()
        print ("Cubes")
        print ("edges_cube = \\\n", map(list, cube_edges))
        print ("coords_cube1 = \\\n", map(list, cube1))
        print ("coords_cube2 = \\\n", map(list, cube2))
        print ()
        print ("Circles")
        print ("edges_circle = \\\n", map(list, circle_edges))
        print ("coords_circle1 = \\\n", map(list, circle1))
        print ("coords_circle2 = \\\n", map(list, circle2))
        print ()
        
        color = rgb(0, 200, 150)
        for verts, edges in zip([cube1, cube2, circle1, circle2],
                                [cube_edges, cube_edges, circle_edges, circle_edges]):
            out_left = cvh.wireframe3DGeometry(
                    img_left, verts, edges, color, rvec_left, tvec_left, K_left, distCoeffs_left )
            out_right = cvh.wireframe3DGeometry(
                    img_right, verts, edges, color, rvec_right, tvec_right, K_right, distCoeffs_right )
            valid_match_idxs = [i for i, (pl, pr) in enumerate(zip(out_left, out_right))
                                    if 0 <= min(pl[0], pr[0]) <= max(pl[0], pr[0]) < imageSize[0] and 
                                        0 <= min(pl[1], pr[1]) <= max(pl[1], pr[1]) < imageSize[1]
                                ]
            nonplanar_left += map(tuple, out_left[valid_match_idxs])    # concatenate
            nonplanar_right += map(tuple, out_right[valid_match_idxs])    # concatenate
        
        nonplanar_left = np.rint(nonplanar_left).astype(int)
        nonplanar_right = np.rint(nonplanar_right).astype(int)
    
    
    ### User can manually create matches between non-planar objects
    
    class ManualMatcher:
        def __init__(self, window_name, images, points):
            self.window_name = window_name
            self.images = images
            self.points = points
            self.img_idx = 0    # 0: left; 1: right
            cv2.namedWindow(window_name)
            cv2.setMouseCallback(window_name, self.onMouse)
        
        def onMouse(self, event, x, y, flags, userdata):
            if event == cv2.EVENT_LBUTTONDOWN:
                self.points[self.img_idx].append((x, y))
            
            elif event == cv2.EVENT_LBUTTONUP:
                # Switch images in a ping-pong fashion
                if len(self.points[0]) != len(self.points[1]):
                    self.img_idx = 1 - self.img_idx
        
        def run(self):
            print ("Select your matches. Press SPACE when done.")
            while True:
                img = cv2.drawKeypoints(self.images[self.img_idx], [cv2.KeyPoint(p[0],p[1], 7.) for p in self.points[self.img_idx]], color=rgb(0,0,255))
                cv2.imshow(self.window_name, img)
                key = cv2.waitKey(50) & 0xFF
                if key == ord(' '): break    # finish if SPACE is pressed
            
            num_points_diff = len(self.points[0]) - len(self.points[1])
            if num_points_diff:
                del self.points[num_points_diff < 0][-abs(num_points_diff):]
            print ("Selected", len(self.points[0]), "pairs of matches.")
    
    # Execute the manual matching
    nonplanar_left, nonplanar_right = list(nonplanar_left), list(nonplanar_right)
    ManualMatcher("Select match-points of non-planar objects", [img_left, img_right], [nonplanar_left, nonplanar_right]).run()
    num_nonplanar = len(nonplanar_left)
    has_nonplanar = (num_nonplanar > 0)
    if 0 < num_nonplanar < 8:
        print ("Warning: you've selected less than 8 pairs of matches.")
    nonplanar_left = np.array(nonplanar_left).reshape(num_nonplanar, 2)
    nonplanar_right = np.array(nonplanar_right).reshape(num_nonplanar, 2)
    
    mute_chessboard_corners = False
    if num_nonplanar >= 8 and raw_input("Type 'yes' if you want to exclude chessboard corners from calculations? ").strip().lower() == "yes":
        mute_chessboard_corners = True
        num_planar = 0
        planar_left = np.zeros((0, 2))
        planar_right = np.zeros((0, 2))
        objp = np.zeros((0, 3))
        print ("Chessboard corners muted.")
    
    has_prev_triangl_points = not mute_chessboard_corners    # normally in this example, this should always be True, unless user forces False
    
    
    ### Undistort points => normalized coordinates
    
    allfeatures_left = np.concatenate((planar_left, nonplanar_left))
    allfeatures_right = np.concatenate((planar_right, nonplanar_right))
    
    allfeatures_nrm_left = cv2.undistortPoints(np.array([allfeatures_left]), K_left, distCoeffs_left)[0]
    allfeatures_nrm_right = cv2.undistortPoints(np.array([allfeatures_right]), K_right, distCoeffs_right)[0]
    
    planar_nrm_left, nonplanar_nrm_left = allfeatures_nrm_left[:planar_left.shape[0]], allfeatures_nrm_left[planar_left.shape[0]:]
    planar_nrm_right, nonplanar_nrm_right = allfeatures_nrm_right[:planar_right.shape[0]], allfeatures_nrm_right[planar_right.shape[0]:]
    
    
    ### Calculate relative pose using the essential matrix
    
    # Only do pose estimation if we've got 2D points of non-planar geometry
    if has_nonplanar:
    
        # Determine inliers by calculating the fundamental matrix on all points,
        # except when mute_chessboard_corners is True: only use nonplanar_nrm_left and nonplanar_nrm_right
        F, status = cv2.findFundamentalMat(allfeatures_nrm_left, allfeatures_nrm_right, cv2.FM_RANSAC, 0.006 * np.amax(allfeatures_nrm_left), 0.99)    # threshold from [Snavely07 4.1]
        # OpenCV BUG: "status" matrix is not initialized with zeros in some cases, reproducable with 2 sets of 8 2D points equal to (0., 0.)
        #   maybe because "_mask.create()" is not called:
        #   https://github.com/Itseez/opencv/blob/7e2bb63378dafb90063af40caff20c363c8c9eaf/modules/calib3d/src/ptsetreg.cpp#L185
        # Workaround to test on outliers: use "!= 1", instead of "== 0"
        print (status.T)
        inlier_idxs = np.where(status == 1)[0]
        print ("Removed", allfeatures_nrm_left.shape[0] - inlier_idxs.shape[0], "outlier(s).")
        num_planar = np.where(inlier_idxs < num_planar)[0].shape[0]
        num_nonplanar = inlier_idxs.shape[0] - num_planar
        print ("num chessboard inliers:", num_planar)
        allfeatures_left, allfeatures_right = allfeatures_left[inlier_idxs], allfeatures_right[inlier_idxs]
        allfeatures_nrm_left, allfeatures_nrm_right = allfeatures_nrm_left[inlier_idxs], allfeatures_nrm_right[inlier_idxs]
        if not mute_chessboard_corners:
            objp = objp[inlier_idxs[:num_planar]]
            planar_left, planar_right = allfeatures_left[:num_planar], allfeatures_right[:num_planar]
            planar_nrm_left, planar_nrm_right = allfeatures_nrm_left[:num_planar], allfeatures_nrm_right[:num_planar]
        nonplanar_nrm_left, nonplanar_nrm_right = allfeatures_nrm_left[num_planar:], allfeatures_nrm_right[num_planar:]
        
        # Calculate first solution of relative pose
        if allfeatures_nrm_left.shape[0] >= 8:
            F, status = cv2.findFundamentalMat(allfeatures_nrm_left, allfeatures_nrm_right, cv2.FM_8POINT)
            print ("F:")
            print (F)
        else:
            print ("Error: less than 8 pairs of inliers found, I can't perform the 8-point algorithm.")
        #E = (K_right.T) .dot (F) .dot (K_left)    # "Multiple View Geometry in CV" by Hartley&Zisserman (9.12)
        E = F    # K = I because we already normalized the coordinates
        print ("Correct determinant of essential matrix?", (abs(cv2.determinant(E)) <= 1e-7))
        w, u, vt = cv2.SVDecomp(E, flags=cv2.SVD_MODIFY_A)
        print (w)
        if ((w[0] < w[1] and w[0]/w[1]) or (w[1] < w[0] and w[1]/w[0]) or 0) < 0.7:
            print ("Essential matrix' 'w' vector deviates too much from expected")
        W = np.array([[0., -1., 0.],    # Hartley&Zisserman (9.13)
                      [1.,  0., 0.],
                      [0.,  0., 1.]])
        R = (u) .dot (W) .dot (vt)    # Hartley&Zisserman result 9.19
        det_R = cv2.determinant(R)
        print ("Coherent rotation?", (abs(det_R) - 1 <= 1e-7))
        if det_R - (-1) < 1e-7:    # http://en.wikipedia.org/wiki/Essential_matrix#Showing_that_it_is_valid
            # E *= -1:
            vt *= -1    # svd(-E) = u * w * (-v).T
            R *= -1     # => u * W * (-v).T = -R
            print ("det(R) == -1, compensated.")
        t = u[:, 2:3]    # Hartley&Zisserman result 9.19
        P = trfm.P_from_R_and_t(R, t)
        
        # Select right solution where the (center of the) 3d points are/is in front of both cameras,
        # when has_prev_triangl_points == True, we can do it a lot faster.
        
        test_point = np.ones((4, 1))    # 3D point to test the solutions with
        
        if has_prev_triangl_points:
            print ("Using advantage of already triangulated points' position")
            print (P)
            
            # Select the closest already triangulated cloudpoint idx to the center of the cloud
            center_of_mass = objp.sum(axis=0) / objp.shape[0]
            center_objp_idx = np.argmin(((objp - center_of_mass)**2).sum(axis=1))
            print ("center_objp_idx:", center_objp_idx)
            
            # Select the corresponding image points
            center_imgp_left = planar_nrm_left[center_objp_idx]
            center_imgp_right = planar_nrm_right[center_objp_idx]
            
            # Select the corresponding 3D point
            test_point[0:3, :] = objp[center_objp_idx].reshape(3, 1)
            print ("test_point:")
            print (test_point)
            test_point = P_left.dot(test_point)    # set the reference axis-system to the one of the left camera, note that are_points_in_front_of_left_camera is automatically True
            
            center_objp_triangl, triangl_status = iterative_LS_triangulation(
                    center_imgp_left.reshape(1, 2), np.eye(4),
                    center_imgp_right.reshape(1, 2), P )
            print ("triangl_status:", triangl_status)
            
            if (center_objp_triangl) .dot (test_point[0:3, 0:1]) < 0:
                P[0:3, 3:4] *= -1    # do a baseline reversal
                print (P, "fixed triangulation inversion")
            
            if P[0:3, :].dot(test_point)[2, 0] < 0:    # are_points_in_front_of_right_camera is False
                P[0:3, 0:3] = (u) .dot (W.T) .dot (vt)    # use the other solution of the twisted pair ...
                P[0:3, 3:4] *= -1    # ... and also do a baseline reversal
                print (P, "fixed camera projection inversion")
        
        elif num_nonplanar > 0:
            print ("Doing all ambiguity checks since there are no already triangulated points")
            print (P)
            
            for i in range(4):    # check all 4 solutions
                objp_triangl, triangl_status = iterative_LS_triangulation(
                        nonplanar_nrm_left, np.eye(4),
                        nonplanar_nrm_right, P )
                print ("triangl_status:", triangl_status)
                center_of_mass = objp_triangl.sum(axis=0) / len(objp_triangl)    # select the center of the triangulated cloudpoints
                test_point[0:3, :] = center_of_mass.reshape(3, 1)
                print ("test_point:")
                print (trfm.P_inv(P_left) .dot (test_point))
                
                if np.eye(3, 4).dot(test_point)[2, 0] > 0 and P[0:3, :].dot(test_point)[2, 0] > 0:    # are_points_in_front_of_cameras is True
                    break
                
                if i % 2:
                    P[0:3, 0:3] = (u) .dot (W.T) .dot (vt)   # use the other solution of the twisted pair
                    print (P, "using the other solution of the twisted pair")
                
                else:
                    P[0:3, 3:4] *= -1    # do a baseline reversal
                    print (P, "doing a baseline reversal")
        
        
        are_points_in_front_of_left_camera = (np.eye(3, 4).dot(test_point)[2, 0] > 0)
        are_points_in_front_of_right_camera = (P[0:3, :].dot(test_point)[2, 0] > 0)
        print ("are_points_in_front_of_cameras?", are_points_in_front_of_left_camera, are_points_in_front_of_right_camera)
        if not (are_points_in_front_of_left_camera and are_points_in_front_of_right_camera):
            print ("No valid solution found!")
        
        P_left_result = trfm.P_inv(P).dot(P_right)
        P_right_result = P.dot(P_left)
        
        print ("P_left")
        print (P_left)
        print ("P_rel")
        print (P)
        print ("P_right")
        print (P_right)
        print ("=> error:", reprojection_error(
                [objp_orig] * 2, [planar_left_orig, planar_right_orig],
                cameraMatrix, distCoeffs,
                [rvec_left, rvec_right],
                [tvec_left, tvec_right] )[1])
        
        print ("P_left_result")
        print (P_left_result)
        print ("P_right_result")
        print (P_right_result)
        # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
        print ("=> error:", reprojection_error(
                [objp_orig] * 2, [planar_left_orig, planar_right_orig],
                cameraMatrix, distCoeffs,
                [cvh.Rodrigues(P_left[0:3, 0:3]), cvh.Rodrigues(P_right_result[0:3, 0:3])],
                [P_left[0:3, 3], P_right_result[0:3, 3]] )[1])
    
    
    ### Triangulate
    
    objp_result = np.zeros((0, 3))
    
    if has_prev_triangl_points:
        # Do triangulation of all points
        # NOTICE: in a real case, we should only use not yet triangulated points that are in sight
        objp_result, triangl_status = iterative_LS_triangulation(
                allfeatures_nrm_left, P_left,
                allfeatures_nrm_right, P_right )
        print ("triangl_status:", triangl_status)
    
    elif num_nonplanar > 0:
        # We already did the triangulation during the pose estimation, but we still need to backtransform them from the left camera axis-system
        objp_result = trfm.P_inv(P_left) .dot (np.concatenate((objp_triangl.T, np.ones((1, len(objp_triangl))))))
        objp_result = objp_result[0:3, :].T
        print (objp_triangl)
    
    print ("objp:")
    print (objp)
    print ("=> error:", reprojection_error(
            [objp_orig] * 2,
            [planar_left_orig, planar_right_orig],
            cameraMatrix, distCoeffs, [rvec_left, rvec_right], [tvec_left, tvec_right] )[1])
    
    print ("objp_result of chessboard:")
    print (objp_result[:num_planar, :])
    if has_nonplanar:
        print ("objp_result of non-planar geometry:")
        print (objp_result[num_planar:, :])
    if num_planar + num_nonplanar == 0:
        print ("=> error: undefined")
    else:
        print ("=> error:", reprojection_error(
                [objp_result] * 2,
                [allfeatures_left, allfeatures_right],
                cameraMatrix, distCoeffs, [rvec_left, rvec_right], [tvec_left, tvec_right] )[1])
    
    
    ### Print total combined reprojection error
    
    # We only have both pose estimation and triangulation if we've got 2D points of non-planar geometry
    if has_nonplanar:
        if num_planar + num_nonplanar == 0:
            print ("=> error: undefined")
        else:
            # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
            print ("Total combined error:", reprojection_error(
                    [objp_result] * 2,
                    [allfeatures_left, allfeatures_right],
                    cameraMatrix, distCoeffs,
                    [cvh.Rodrigues(P_left[0:3, 0:3]), cvh.Rodrigues(P_right_result[0:3, 0:3])],
                    [P_left[0:3, 3], P_right_result[0:3, 3]] )[1])
    
    
    """
    Further things we can do (in a real case):
        1. solvePnP() on inliers (including previously already triangulated points),
            or should we use solvePnPRansac() (in that case what to do with the outliers)?
        2. re-triangulate on new pose estimation
    """
    
    
    ### Print summary to be used in Blender to visualize (see "calibrate_pose_visualization.blend")
    
    print ("Camera poses:")
    if has_nonplanar:
        print ("Left")
        print_pose(rvec_left, tvec_left)
        print ()
        print ("Left_result")
        print_pose(cvh.Rodrigues(P_left_result[0:3, 0:3]), P_left_result[0:3, 3:4])
        print ()
        print ("Right")
        print_pose(rvec_right, tvec_right)
        print ()
        print ("Right_result")
        print_pose(cvh.Rodrigues(P_right_result[0:3, 0:3]), P_right_result[0:3, 3:4])
        print ()
    else:
        print ("<skipped: no non-planar objects have been selected>")
        print ()
    
    print ("Points:")
    print ("Chessboard")
    print ("coords = \\\n", map(list, objp_result[:num_planar, :]))
    print ()
    if has_nonplanar:
        print ("Non-planar geometry")
        print ("coords_nonplanar = \\\n", map(list, objp_result[num_planar:, :]))
        print ()
    
    ### Return to remember last manually matched successful non-planar imagepoints
    return nonplanar_left, nonplanar_right
コード例 #14
0
def calc():
    array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float64)
    for i in range(0, int(1e6)):
        cv2.determinant(array)
コード例 #15
0
    def runLoop(self, files, paths):
        """
            Process input frame-by-frame
        """

        logger.info(' * Analysing image sequence')

        if len(paths) == 0:
            logger.warn(' Empty sequence, nothing to be done')
            return

        # sequence of frames
        frames = []
        for (i, path) in enumerate(paths):

            logger.info('')
            logger.info(' * frame {} : {}'.format(i, path))

            # set up frame object
            frame = Frame(index=i, name=files[i])
            frames.append(frame)

            # load frame image
            frame.image = util_cv.load_image(path)
            frame.image_gray = cv2.cvtColor(frame.image, cv2.COLOR_BGR2GRAY)

            # detect and compute features
            frame.keypoints, frame.descriptors = self.computeFeatures(
                image=frame.image_gray)

            # match feature descriptors
            frame.matches = self.matchFeatures(frame.descriptors)

            # debug features
            if self.args.debug:
                debugImage = util_cv.drawMatches(self.im_reference,
                                                 self.keypoints_ref,
                                                 frame.image, frame.keypoints,
                                                 frame.matches)
                util_cv.display_image(debugImage, title='DEBUG')

            ## compute homography from the reference object to the scene
            frame.homography = self.computeHomography(self.keypoints_ref,
                                                      frame.keypoints,
                                                      frame.matches)

            # debug homography
            if self.args.debug:
                debugImage = frame.image.copy()
                util_cv.draw_homography(self.im_reference,
                                        frame.homography.astype('float32'),
                                        debugImage)
                debugImage = util_cv.drawMatches(self.im_reference,
                                                 self.keypoints_ref,
                                                 debugImage, frame.keypoints,
                                                 frame.matches)
                util_cv.display_image(debugImage, title='DEBUG')

            ## check if homography is bad
            bad_homo = False
            # If the det of the upper 2x2 matrix of the homography is smaller than 0, it is not orientation-preserving.
            # src: http://answers.opencv.org/question/2588/check-if-homography-is-good/
            # This filters out some bad frames, although it's not a perfect criteria (tested with the bad results using BruteForceMatcher)
            det2x2 = cv2.determinant(frame.homography[0:2, 0:2])
            if det2x2 < 0:
                bad_homo = True

            # This criteria can be found in the image stitching pipeline of opencv...
            det3x3 = abs(cv2.determinant(frame.homography))
            if det3x3 < 0.00001:  #sys.float_info.epsilon:
                bad_homo = True

            if bad_homo:

                # homography is useless, just show the original picture
                frame.im_replacemt = frame.image.copy()

            else:

                # warp replacement image and merge with the frame
                h, w = frame.image.shape[:2]
                im_warped = cv2.warpPerspective(self.im_replacemt,
                                                frame.homography,
                                                dsize=(w, h))
                im_replacemt = numpy.ma.array(frame.image,
                                              mask=im_warped > 0,
                                              fill_value=0).filled()
                im_replacemt = cv2.bitwise_or(im_replacemt, im_warped)
                frame.im_replacemt = im_replacemt

            # debug result
            if self.args.debug:
                color = (255, 0, 0)
                if det2x2 < 0 or det3x3 < 0.00001:
                    color = (0, 0, 255)
                cv2.putText(frame.im_replacemt,
                            'Frame {}: {}'.format(i, os.path.basename(path)),
                            (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
                cv2.putText(frame.im_replacemt, 'det2x2 = {}'.format(det2x2),
                            (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
                cv2.putText(frame.im_replacemt, 'det3x3 = {}'.format(det3x3),
                            (0, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
                util_cv.display_images([frame.image, frame.im_replacemt],
                                       title='Processed Frame')

            # write image
            if self.args.output:
                output = path.replace(self.args.input, self.args.output)
                logger.info(' * Write {}'.format(output))
                cv2.imwrite(output, frame.im_replacemt)

            # clear memory
            frames.pop(0)
コード例 #16
0
def triangl_pose_est_interactive(img_left, img_right, cameraMatrix, distCoeffs,
                                 imageSize, objp, boardSize, nonplanar_left,
                                 nonplanar_right):
    """ (TODO: remove debug-prints)
    Triangulation and relative pose estimation will be performed from LEFT to RIGHT image.
    
    Both images have to contain the whole chessboard,
    in order to make a decent estimation of the relative pose based on 'solvePnP'.
    
    Then the user can manually create matches between non-planar objects.
    If the user omits this step, only triangulation of the chessboard corners will be performed,
    and they will be compared to the real 3D points.
    Otherwise the coordinates of the triangulated points of the manually matched points will be printed,
    and a relative pose estimation will be performed (using the essential matrix),
    this pose estimation will be compared with the decent 'solvePnP' estimation.
    In that case you will be asked whether you want to mute the chessboard corners in all calculations,
    note that this requires at least 8 pairs of matches corresponding with non-planar geometry.
    
    During manual matching process,
    switching between LEFT and RIGHT image will be done in a zigzag fashion.
    To stop selecting matches, press SPACE.
    
    Additionally, you can also introduce predefined non-planar geometry,
    this is e.g. useful if you don't want to select non-planar geometry manually.
    """

    ### Setup initial state, and calc some very accurate poses to compare against with later

    K_left = cameraMatrix
    K_right = cameraMatrix
    K_inv_left = cvh.invert(K_left)
    K_inv_right = cvh.invert(K_right)
    distCoeffs_left = distCoeffs
    distCoeffs_right = distCoeffs

    # Extract chessboard features, together they form a set of 'planar' points
    ret_left, planar_left = cvh.extractChessboardFeatures(img_left, boardSize)
    ret_right, planar_right = cvh.extractChessboardFeatures(
        img_right, boardSize)
    if not ret_left or not ret_right:
        print("Chessboard is not (entirely) in sight, aborting.")
        return

    # Save exact 2D and 3D points of the chessboard
    num_planar = planar_left.shape[0]
    planar_left_orig, planar_right_orig = planar_left, planar_right
    objp_orig = objp

    # Calculate P matrix of left pose, in reality this one is known
    ret, rvec_left, tvec_left = cv2.solvePnP(objp, planar_left, K_left,
                                             distCoeffs_left)
    P_left = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_left), tvec_left)

    # Calculate P matrix of right pose, in reality this one is unknown
    ret, rvec_right, tvec_right = cv2.solvePnP(objp, planar_right, K_right,
                                               distCoeffs_right)
    P_right = trfm.P_from_R_and_t(cvh.Rodrigues(rvec_right), tvec_right)

    # Load previous non-planar inliers, if desired
    if nonplanar_left.size and not raw_input(
            "Type 'no' if you don't want to use previous non-planar inliers? "
    ).strip().lower() == "no":
        nonplanar_left = map(tuple, nonplanar_left)
        nonplanar_right = map(tuple, nonplanar_left)
    else:
        nonplanar_left = []
        nonplanar_right = []

    ### Create predefined non-planar 3D geometry, to enable automatic selection of non-planar points

    if raw_input(
            "Type 'yes' if you want to create and select predefined non-planar geometry? "
    ).strip().lower() == "yes":

        # A cube
        cube_coords = np.array([(0., 0., 0.), (1., 0., 0.), (1., 1., 0.),
                                (0., 1., 0.), (0., 0., 1.), (1., 0., 1.),
                                (1., 1., 1.), (0., 1., 1.)])
        cube_coords *= 2  # scale
        cube_edges = np.array([(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6),
                               (6, 7), (7, 4), (0, 4), (1, 5), (2, 6), (3, 7)])

        # An 8-point circle
        s2 = 1. / np.sqrt(2)
        circle_coords = np.array([(1., 0., 0.), (s2, s2, 0.), (0., 1., 0.),
                                  (-s2, s2, 0.), (-1., 0., 0.), (-s2, -s2, 0.),
                                  (0., -1., 0.), (s2, -s2, 0.)])
        circle_edges = np.array([(i, (i + 1) % 8) for i in range(8)])

        # Position 2 cubes and 2 circles in the scene
        cube1 = np.array(cube_coords)
        cube1[:, 1] -= 1
        cube2 = np.array(cube_coords)
        cube2 = cvh.Rodrigues((0., 0., pi / 4)).dot(cube2.T).T
        cube2[:, 0] += 4
        cube2[:, 1] += 3
        cube2[:, 2] += 2
        circle1 = np.array(circle_coords)
        circle1 *= 2
        circle1[:, 1] += 5
        circle1[:, 2] += 2
        circle2 = np.array(circle_coords)
        circle2 = cvh.Rodrigues((pi / 2, 0., 0.)).dot(circle2.T).T
        circle2[:, 1] += 5
        circle2[:, 2] += 2

        # Print output to be used in Blender (see "calibrate_pose_visualization.blend")
        print()
        print("Cubes")
        print("edges_cube = \\\n", map(list, cube_edges))
        print("coords_cube1 = \\\n", map(list, cube1))
        print("coords_cube2 = \\\n", map(list, cube2))
        print()
        print("Circles")
        print("edges_circle = \\\n", map(list, circle_edges))
        print("coords_circle1 = \\\n", map(list, circle1))
        print("coords_circle2 = \\\n", map(list, circle2))
        print()

        color = rgb(0, 200, 150)
        for verts, edges in zip(
            [cube1, cube2, circle1, circle2],
            [cube_edges, cube_edges, circle_edges, circle_edges]):
            out_left = cvh.wireframe3DGeometry(img_left, verts, edges, color,
                                               rvec_left, tvec_left, K_left,
                                               distCoeffs_left)
            out_right = cvh.wireframe3DGeometry(img_right, verts, edges, color,
                                                rvec_right, tvec_right,
                                                K_right, distCoeffs_right)
            valid_match_idxs = [
                i for i, (pl, pr) in enumerate(zip(out_left, out_right))
                if 0 <= min(pl[0], pr[0]) <= max(pl[0], pr[0]) < imageSize[0]
                and 0 <= min(pl[1], pr[1]) <= max(pl[1], pr[1]) < imageSize[1]
            ]
            nonplanar_left += map(tuple,
                                  out_left[valid_match_idxs])  # concatenate
            nonplanar_right += map(tuple,
                                   out_right[valid_match_idxs])  # concatenate

        nonplanar_left = np.rint(nonplanar_left).astype(int)
        nonplanar_right = np.rint(nonplanar_right).astype(int)

    ### User can manually create matches between non-planar objects

    class ManualMatcher:
        def __init__(self, window_name, images, points):
            self.window_name = window_name
            self.images = images
            self.points = points
            self.img_idx = 0  # 0: left; 1: right
            cv2.namedWindow(window_name)
            cv2.setMouseCallback(window_name, self.onMouse)

        def onMouse(self, event, x, y, flags, userdata):
            if event == cv2.EVENT_LBUTTONDOWN:
                self.points[self.img_idx].append((x, y))

            elif event == cv2.EVENT_LBUTTONUP:
                # Switch images in a ping-pong fashion
                if len(self.points[0]) != len(self.points[1]):
                    self.img_idx = 1 - self.img_idx

        def run(self):
            print("Select your matches. Press SPACE when done.")
            while True:
                img = cv2.drawKeypoints(self.images[self.img_idx], [
                    cv2.KeyPoint(p[0], p[1], 7.)
                    for p in self.points[self.img_idx]
                ],
                                        color=rgb(0, 0, 255))
                cv2.imshow(self.window_name, img)
                key = cv2.waitKey(50) & 0xFF
                if key == ord(' '): break  # finish if SPACE is pressed

            num_points_diff = len(self.points[0]) - len(self.points[1])
            if num_points_diff:
                del self.points[num_points_diff < 0][-abs(num_points_diff):]
            print("Selected", len(self.points[0]), "pairs of matches.")

    # Execute the manual matching
    nonplanar_left, nonplanar_right = list(nonplanar_left), list(
        nonplanar_right)
    ManualMatcher("Select match-points of non-planar objects",
                  [img_left, img_right],
                  [nonplanar_left, nonplanar_right]).run()
    num_nonplanar = len(nonplanar_left)
    has_nonplanar = (num_nonplanar > 0)
    if 0 < num_nonplanar < 8:
        print("Warning: you've selected less than 8 pairs of matches.")
    nonplanar_left = np.array(nonplanar_left).reshape(num_nonplanar, 2)
    nonplanar_right = np.array(nonplanar_right).reshape(num_nonplanar, 2)

    mute_chessboard_corners = False
    if num_nonplanar >= 8 and raw_input(
            "Type 'yes' if you want to exclude chessboard corners from calculations? "
    ).strip().lower() == "yes":
        mute_chessboard_corners = True
        num_planar = 0
        planar_left = np.zeros((0, 2))
        planar_right = np.zeros((0, 2))
        objp = np.zeros((0, 3))
        print("Chessboard corners muted.")

    has_prev_triangl_points = not mute_chessboard_corners  # normally in this example, this should always be True, unless user forces False

    ### Undistort points => normalized coordinates

    allfeatures_left = np.concatenate((planar_left, nonplanar_left))
    allfeatures_right = np.concatenate((planar_right, nonplanar_right))

    allfeatures_nrm_left = cv2.undistortPoints(np.array([allfeatures_left]),
                                               K_left, distCoeffs_left)[0]
    allfeatures_nrm_right = cv2.undistortPoints(np.array([allfeatures_right]),
                                                K_right, distCoeffs_right)[0]

    planar_nrm_left, nonplanar_nrm_left = allfeatures_nrm_left[:planar_left.shape[
        0]], allfeatures_nrm_left[planar_left.shape[0]:]
    planar_nrm_right, nonplanar_nrm_right = allfeatures_nrm_right[:planar_right.shape[
        0]], allfeatures_nrm_right[planar_right.shape[0]:]

    ### Calculate relative pose using the essential matrix

    # Only do pose estimation if we've got 2D points of non-planar geometry
    if has_nonplanar:

        # Determine inliers by calculating the fundamental matrix on all points,
        # except when mute_chessboard_corners is True: only use nonplanar_nrm_left and nonplanar_nrm_right
        F, status = cv2.findFundamentalMat(
            allfeatures_nrm_left, allfeatures_nrm_right, cv2.FM_RANSAC,
            0.006 * np.amax(allfeatures_nrm_left),
            0.99)  # threshold from [Snavely07 4.1]
        # OpenCV BUG: "status" matrix is not initialized with zeros in some cases, reproducable with 2 sets of 8 2D points equal to (0., 0.)
        #   maybe because "_mask.create()" is not called:
        #   https://github.com/Itseez/opencv/blob/7e2bb63378dafb90063af40caff20c363c8c9eaf/modules/calib3d/src/ptsetreg.cpp#L185
        # Workaround to test on outliers: use "!= 1", instead of "== 0"
        print(status.T)
        inlier_idxs = np.where(status == 1)[0]
        print("Removed", allfeatures_nrm_left.shape[0] - inlier_idxs.shape[0],
              "outlier(s).")
        num_planar = np.where(inlier_idxs < num_planar)[0].shape[0]
        num_nonplanar = inlier_idxs.shape[0] - num_planar
        print("num chessboard inliers:", num_planar)
        allfeatures_left, allfeatures_right = allfeatures_left[
            inlier_idxs], allfeatures_right[inlier_idxs]
        allfeatures_nrm_left, allfeatures_nrm_right = allfeatures_nrm_left[
            inlier_idxs], allfeatures_nrm_right[inlier_idxs]
        if not mute_chessboard_corners:
            objp = objp[inlier_idxs[:num_planar]]
            planar_left, planar_right = allfeatures_left[:
                                                         num_planar], allfeatures_right[:
                                                                                        num_planar]
            planar_nrm_left, planar_nrm_right = allfeatures_nrm_left[:
                                                                     num_planar], allfeatures_nrm_right[:
                                                                                                        num_planar]
        nonplanar_nrm_left, nonplanar_nrm_right = allfeatures_nrm_left[
            num_planar:], allfeatures_nrm_right[num_planar:]

        # Calculate first solution of relative pose
        if allfeatures_nrm_left.shape[0] >= 8:
            F, status = cv2.findFundamentalMat(allfeatures_nrm_left,
                                               allfeatures_nrm_right,
                                               cv2.FM_8POINT)
            print("F:")
            print(F)
        else:
            print(
                "Error: less than 8 pairs of inliers found, I can't perform the 8-point algorithm."
            )
        #E = (K_right.T) .dot (F) .dot (K_left)    # "Multiple View Geometry in CV" by Hartley&Zisserman (9.12)
        E = F  # K = I because we already normalized the coordinates
        print("Correct determinant of essential matrix?",
              (abs(cv2.determinant(E)) <= 1e-7))
        w, u, vt = cv2.SVDecomp(E, flags=cv2.SVD_MODIFY_A)
        print(w)
        if ((w[0] < w[1] and w[0] / w[1]) or
            (w[1] < w[0] and w[1] / w[0]) or 0) < 0.7:
            print(
                "Essential matrix' 'w' vector deviates too much from expected")
        W = np.array([
            [0., -1., 0.],  # Hartley&Zisserman (9.13)
            [1., 0., 0.],
            [0., 0., 1.]
        ])
        R = (u).dot(W).dot(vt)  # Hartley&Zisserman result 9.19
        det_R = cv2.determinant(R)
        print("Coherent rotation?", (abs(det_R) - 1 <= 1e-7))
        if det_R - (
                -1
        ) < 1e-7:  # http://en.wikipedia.org/wiki/Essential_matrix#Showing_that_it_is_valid
            # E *= -1:
            vt *= -1  # svd(-E) = u * w * (-v).T
            R *= -1  # => u * W * (-v).T = -R
            print("det(R) == -1, compensated.")
        t = u[:, 2:3]  # Hartley&Zisserman result 9.19
        P = trfm.P_from_R_and_t(R, t)

        # Select right solution where the (center of the) 3d points are/is in front of both cameras,
        # when has_prev_triangl_points == True, we can do it a lot faster.

        test_point = np.ones((4, 1))  # 3D point to test the solutions with

        if has_prev_triangl_points:
            print("Using advantage of already triangulated points' position")
            print(P)

            # Select the closest already triangulated cloudpoint idx to the center of the cloud
            center_of_mass = objp.sum(axis=0) / objp.shape[0]
            center_objp_idx = np.argmin(
                ((objp - center_of_mass)**2).sum(axis=1))
            print("center_objp_idx:", center_objp_idx)

            # Select the corresponding image points
            center_imgp_left = planar_nrm_left[center_objp_idx]
            center_imgp_right = planar_nrm_right[center_objp_idx]

            # Select the corresponding 3D point
            test_point[0:3, :] = objp[center_objp_idx].reshape(3, 1)
            print("test_point:")
            print(test_point)
            test_point = P_left.dot(
                test_point
            )  # set the reference axis-system to the one of the left camera, note that are_points_in_front_of_left_camera is automatically True

            center_objp_triangl, triangl_status = iterative_LS_triangulation(
                center_imgp_left.reshape(1, 2), np.eye(4),
                center_imgp_right.reshape(1, 2), P)
            print("triangl_status:", triangl_status)

            if (center_objp_triangl).dot(test_point[0:3, 0:1]) < 0:
                P[0:3, 3:4] *= -1  # do a baseline reversal
                print(P, "fixed triangulation inversion")

            if P[0:3, :].dot(test_point)[
                    2, 0] < 0:  # are_points_in_front_of_right_camera is False
                P[0:3, 0:3] = (u).dot(W.T).dot(
                    vt)  # use the other solution of the twisted pair ...
                P[0:3, 3:4] *= -1  # ... and also do a baseline reversal
                print(P, "fixed camera projection inversion")

        elif num_nonplanar > 0:
            print(
                "Doing all ambiguity checks since there are no already triangulated points"
            )
            print(P)

            for i in range(4):  # check all 4 solutions
                objp_triangl, triangl_status = iterative_LS_triangulation(
                    nonplanar_nrm_left, np.eye(4), nonplanar_nrm_right, P)
                print("triangl_status:", triangl_status)
                center_of_mass = objp_triangl.sum(axis=0) / len(
                    objp_triangl
                )  # select the center of the triangulated cloudpoints
                test_point[0:3, :] = center_of_mass.reshape(3, 1)
                print("test_point:")
                print(trfm.P_inv(P_left).dot(test_point))

                if np.eye(3, 4).dot(test_point)[2, 0] > 0 and P[0:3, :].dot(
                        test_point
                )[2, 0] > 0:  # are_points_in_front_of_cameras is True
                    break

                if i % 2:
                    P[0:3, 0:3] = (u).dot(W.T).dot(
                        vt)  # use the other solution of the twisted pair
                    print(P, "using the other solution of the twisted pair")

                else:
                    P[0:3, 3:4] *= -1  # do a baseline reversal
                    print(P, "doing a baseline reversal")

        are_points_in_front_of_left_camera = (np.eye(
            3, 4).dot(test_point)[2, 0] > 0)
        are_points_in_front_of_right_camera = (P[0:3, :].dot(test_point)[2, 0]
                                               > 0)
        print("are_points_in_front_of_cameras?",
              are_points_in_front_of_left_camera,
              are_points_in_front_of_right_camera)
        if not (are_points_in_front_of_left_camera
                and are_points_in_front_of_right_camera):
            print("No valid solution found!")

        P_left_result = trfm.P_inv(P).dot(P_right)
        P_right_result = P.dot(P_left)

        print("P_left")
        print(P_left)
        print("P_rel")
        print(P)
        print("P_right")
        print(P_right)
        print(
            "=> error:",
            reprojection_error([objp_orig] * 2,
                               [planar_left_orig, planar_right_orig],
                               cameraMatrix, distCoeffs,
                               [rvec_left, rvec_right],
                               [tvec_left, tvec_right])[1])

        print("P_left_result")
        print(P_left_result)
        print("P_right_result")
        print(P_right_result)
        # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
        print(
            "=> error:",
            reprojection_error([objp_orig] * 2,
                               [planar_left_orig, planar_right_orig],
                               cameraMatrix, distCoeffs, [
                                   cvh.Rodrigues(P_left[0:3, 0:3]),
                                   cvh.Rodrigues(P_right_result[0:3, 0:3])
                               ], [P_left[0:3, 3], P_right_result[0:3, 3]])[1])

    ### Triangulate

    objp_result = np.zeros((0, 3))

    if has_prev_triangl_points:
        # Do triangulation of all points
        # NOTICE: in a real case, we should only use not yet triangulated points that are in sight
        objp_result, triangl_status = iterative_LS_triangulation(
            allfeatures_nrm_left, P_left, allfeatures_nrm_right, P_right)
        print("triangl_status:", triangl_status)

    elif num_nonplanar > 0:
        # We already did the triangulation during the pose estimation, but we still need to backtransform them from the left camera axis-system
        objp_result = trfm.P_inv(P_left).dot(
            np.concatenate((objp_triangl.T, np.ones((1, len(objp_triangl))))))
        objp_result = objp_result[0:3, :].T
        print(objp_triangl)

    print("objp:")
    print(objp)
    print(
        "=> error:",
        reprojection_error([objp_orig] * 2,
                           [planar_left_orig, planar_right_orig], cameraMatrix,
                           distCoeffs, [rvec_left, rvec_right],
                           [tvec_left, tvec_right])[1])

    print("objp_result of chessboard:")
    print(objp_result[:num_planar, :])
    if has_nonplanar:
        print("objp_result of non-planar geometry:")
        print(objp_result[num_planar:, :])
    if num_planar + num_nonplanar == 0:
        print("=> error: undefined")
    else:
        print(
            "=> error:",
            reprojection_error([objp_result] * 2,
                               [allfeatures_left, allfeatures_right],
                               cameraMatrix, distCoeffs,
                               [rvec_left, rvec_right],
                               [tvec_left, tvec_right])[1])

    ### Print total combined reprojection error

    # We only have both pose estimation and triangulation if we've got 2D points of non-planar geometry
    if has_nonplanar:
        if num_planar + num_nonplanar == 0:
            print("=> error: undefined")
        else:
            # We use "P_left" instead of "P_left_result" because the latter depends on the unknown "P_right"
            print(
                "Total combined error:",
                reprojection_error(
                    [objp_result] * 2, [allfeatures_left, allfeatures_right],
                    cameraMatrix, distCoeffs, [
                        cvh.Rodrigues(P_left[0:3, 0:3]),
                        cvh.Rodrigues(P_right_result[0:3, 0:3])
                    ], [P_left[0:3, 3], P_right_result[0:3, 3]])[1])
    """
    Further things we can do (in a real case):
        1. solvePnP() on inliers (including previously already triangulated points),
            or should we use solvePnPRansac() (in that case what to do with the outliers)?
        2. re-triangulate on new pose estimation
    """

    ### Print summary to be used in Blender to visualize (see "calibrate_pose_visualization.blend")

    print("Camera poses:")
    if has_nonplanar:
        print("Left")
        print_pose(rvec_left, tvec_left)
        print()
        print("Left_result")
        print_pose(cvh.Rodrigues(P_left_result[0:3, 0:3]), P_left_result[0:3,
                                                                         3:4])
        print()
        print("Right")
        print_pose(rvec_right, tvec_right)
        print()
        print("Right_result")
        print_pose(cvh.Rodrigues(P_right_result[0:3, 0:3]),
                   P_right_result[0:3, 3:4])
        print()
    else:
        print("<skipped: no non-planar objects have been selected>")
        print()

    print("Points:")
    print("Chessboard")
    print("coords = \\\n", map(list, objp_result[:num_planar, :]))
    print()
    if has_nonplanar:
        print("Non-planar geometry")
        print("coords_nonplanar = \\\n", map(list,
                                             objp_result[num_planar:, :]))
        print()

    ### Return to remember last manually matched successful non-planar imagepoints
    return nonplanar_left, nonplanar_right
コード例 #17
0
src_pts = np.float32([ kps1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kps2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.LMEDS, 3);
matchesMask = mask.ravel().tolist()

# Apply the perspective transformation to the source image corners
h, w = img1.shape
corners = np.float32([ [0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0] ]).reshape(-1, 1, 2)
transformedCorners = cv2.perspectiveTransform(corners, M)

# Draw a polygon on the second image joining the transformed corners
img2 = cv2.polylines(img2, [np.int32(transformedCorners)], True, (255, 255, 255), 2, cv2.LINE_AA)

coeffs = []
det = cv2.determinant(M)
n1 = math.sqrt(M[0, 0] * M[0, 0] + M[1, 0] * M[1, 0])
n2 = math.sqrt(M[0, 1] * M[0, 1] + M[1, 1] * M[1, 1])
n3 = math.sqrt(M[2, 0] * M[2, 0] + M[2, 1] * M[2, 1])

coeffs.append(det)
coeffs.append(n1)
coeffs.append(n2)
coeffs.append(n3)

# Display the results
coincide = format(len(good))
if (det < 0 or math.fabs(det) < 2e-05):
    coincide = 0
if (n1 > 4 or n1 < 0.1):
    coincide = 0