def world_to_rgb(self,pt,use_distortion=True):
        projMatrix = np.matrix(self.rgb_camera_info.P).reshape(3,4)
        distCoeffs = np.matrix(self.rgb_camera_info.D)
        cameraMatrix, rotMatrix, tvec, _, _, _, _ = cv2.decomposeProjectionMatrix(projMatrix)
        rvec,_ = cv2.Rodrigues(rotMatrix)
        
        if not use_distortion:
            distCoeffs = np.array([])
        imgpoints2, _ = cv2.projectPoints(np.array([pt]), rvec, np.zeros(3),cameraMatrix, distCoeffs)

        result = imgpoints2[0][0]
        return result
Exemplo n.º 2
1
	def estimatePose(self, img, corners):
		# Corners start from the top right corner, move downwards to the bottom
		# row, then starts at the top of the column to the left.
		# Points are in the form [column, row]
		
		# Form the world points with X pointing forward, and Y pointing left
		pts_3d = np.array([]).reshape(0, 3)
		for n in range(self.size[1]):
			y = n*self.square
			for m in range(self.size[0]):
				x = -m*self.square
				pts_3d = np.vstack([pts_3d, [x, y, 0]])
		
		# Get checkerboard pose (unsubscribe once there is a valid estimate)
		valid, r_vec, t_vec = cv2.solvePnP(pts_3d, corners, self.K, self.D)
		
		if valid:
			self.image_sub.unregister()
		else:
			return
		
		# Form the inverse of the extrinsic matric (i.e. transform of camera wrt world)
		R_c = cv2.Rodrigues(r_vec)[0].T
		t_c = -np.matmul(R_c, t_vec)
		T_c = np.hstack([R_c, t_c])
		alpha, beta, gamma = cv2.decomposeProjectionMatrix(T_c)[-1]
		
		# Convert euler angles to roll-pitch-yaw of a camera with X forward and Y left
		roll = beta
		pitch = -alpha - 90
		yaw = gamma + 90
		
		height = t_c[2]
				
		# Output the results
		print("\n--- Pose Estimation Results ---")
		print("     Height: %9.5f m" % height)
		print("     Roll:   %+9.5f deg" % roll)
		print("     Pitch:  %+9.5f deg" % pitch)
		print("     Yaw:    %+9.5f deg\n" % yaw)
		
		# Exit when done
		cv2.destroyAllWindows()
		rospy.signal_shutdown("Done.")		
Exemplo n.º 3
0
def fill_polygon(blankimage, lines, indent_right, indent_left, p, factor):
    K, R, t_, _, _, _, _ = cv2.decomposeProjectionMatrix(p)
    t = np.asmatrix(-R) * np.asmatrix(t_[:3] / t_[3])

    for l in lines:
        L = np.hstack((R, t)) * l

        if np.linalg.norm(L[:, 0]) < 25:

            if np.any(L[2, :] < 0):
                x0, y0 = get_intersection(L[:, 0].T, L[:, 1].T, 0.051)
                L[:, 0] = np.array([[x0], [y0], [0.051]])
            if np.linalg.norm(L[:, 0]) > 15:
                W = indent(L, indent_right * factor * 0.75,
                           indent_left * factor * 0.75, 0)

            else:
                W = indent(L, indent_right, indent_left, 0.0)
            x = np.dot(K, W)
            x = np.dot(K, L)
            mask = x[2, :] > 0
            x = x[:2, mask] / x[2, mask]
            x = x.T
            if x.size == 8:
                cv2.fillPoly(blankimage, np.int32([x]), (255, 255, 255))

    return blankimage
Exemplo n.º 4
0
def solve(tag, imagePoints):
    print(tag)
    print("imagepoints")
    rprint(imagePoints)

    ret, rvec, tvec = cv2.solvePnP(quad_3d, imagePoints , K, dist_coef)

    print("R:")
    #print(rvec)
    rprint(cv2.Rodrigues(rvec)[0])
    #print("tvec")
    print("T:")
    rprint(tvec)

    projectionMatrix = np.concatenate((cv2.Rodrigues(rvec)[0], tvec),axis=1)
    #print(projectionMatrix)

    #print("decomposed")
    (cam,rot,tr,rx,ry,rz,eu)=cv2.decomposeProjectionMatrix(projectionMatrix)
    #print('cam:')
    #rprint(cam)
    #print("T:")
    #rprint(tr)
    print("R-decomp:")
    rprint(rot)
    #print(ry)
    print('Euler')
    print('degs')
    rprint(eu)
    print('rads')
    rprint(eu*pi/180.0)

    print('heading')
    rprint(atan2(tvec[0],tvec[2])*180.0/pi)
Exemplo n.º 5
0
 def from_P(cls, P, width, height):
     K, R, T, Rx, Ry, Rz, angles = cv2.decomposeProjectionMatrix(P)  # pylint: disable=unused-variable
     return cls(K=K,
                R=R,
                T=Point3D(-R @ Point3D(T)),
                width=width,
                height=height)
Exemplo n.º 6
0
 def getAngles(self, rvec, tvec):
     rmat = cv2.Rodrigues(rvec)[0]
     P = np.hstack((rmat, tvec)) # projection matrix [R | t]
     degrees = -cv2.decomposeProjectionMatrix(P)[6]
     print("\ndegrees:\n {0}".format(degrees))
     rx, ry, rz = degrees[:, 0]
     return [rx, ry, rz]
Exemplo n.º 7
0
def callback(msg):
    points = msg.data
    points_2d = np.array([[points[0], points[1]], [points[2], points[3]],
                          [points[4], points[5]], [points[6], points[7]],
                          [points[8], points[9]], [points[10], points[11]]])
    #points_2d = np.array([[1060.0,340.0], [660.0,340.0], [660.0,740.0], [1060.0,740.0]])
    ret, rvec, tvec = cv2.solvePnP(points_3d, points_2d, camera_mat, dist)

    R = cv2.Rodrigues(rvec)[0]
    projMat = np.array(
        [[R[0][0], R[0][1], R[0][2], 0.0], [R[1][0], R[1][1], R[1][2], 0.0],
         [R[2][0], R[2][1], R[2][2], 1.0]],
        dtype=np.float32)
    eulerAngles = cv2.decomposeProjectionMatrix(projMat)[6]
    print eulerAngles
    br.sendTransform(
        (-tvec[0], -tvec[1], tvec[2]),
        tf.transformations.quaternion_from_euler(math.radians(-eulerAngles[0]),
                                                 0,
                                                 math.radians(eulerAngles[2])),
        rospy.Time.now(), "bebop2", "map")

    #print tvec
    #print rvec
    print "\n"
def readKittiCalib(path):

    calib = Calib()
    with open(path) as f:

        for line in f:
            l = Label()
            lines = line.split()
            vector = np.asarray(lines[1:], dtype=np.float32)
            if len(lines) == 0:
                continue

            if lines[0] == "P2:":
                calib.p_left = vector.reshape(3, 4)
            elif lines[0] == "P3:":
                calib.p_right = vector.reshape(3, 4)
            elif lines[0] == "R0_rect:":
                calib.r0 = vector.reshape(3, 3)

    calib.tx = float(calib.p_right[0, 3])
    rot = np.zeros((3, 3))
    trans = np.zeros((4, 1))
    cam_matrix = np.zeros((3, 3), dtype=np.double)
    cam_matrix = cv2.decomposeProjectionMatrix(calib.p_left, cam_matrix, rot,
                                               trans)
    calib.k_left = cam_matrix[0]
    return calib
Exemplo n.º 9
0
def get_head_pose(flandmarks):
    ''' Estimates the 3d orientation of the head.

        Estimates the head pose by utilizing cv2.solvePnP().

    :param flandmarks: 2D facial landmarks
    :return: the euler angles of rotation of the persons head,
             a rotation matrix,
             translation vector,
             pose matrix
    '''

    # head model from 2d facial landmarks
    head_model_2d = np.float32([
        flandmarks[0], flandmarks[4], flandmarks[6], flandmarks[12],
        flandmarks[16], flandmarks[17], flandmarks[26], flandmarks[27],
        flandmarks[30], flandmarks[33], flandmarks[36], flandmarks[39],
        flandmarks[42], flandmarks[45], flandmarks[62]
    ])

    # in camera coords
    _, rotation_vec, translation_vec = cv2.solvePnP(HEAD_MODEL_3D[0:15],
                                                    head_model_2d,
                                                    cp.cam_intrinsic_mat,
                                                    cp.dist_coeffs)

    # transform
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))

    # transform to euler
    _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)

    return euler_angles, rotation_mat, translation_vec, pose_mat
Exemplo n.º 10
0
def get_cam_param(filename):
    fs = cv2.FileStorage(filename, cv2.FILE_STORAGE_READ)
    if not fs.isOpened():
        print("File not open")
    # intrinsic matrix
    all_cam_param = []
    for i in range(num_cams):
        s_index = str(i).zfill(3)
        # projection matrix
        P = fs.getNode("viff" + s_index + "_matrix").mat()
        K, R, t = cv2.decomposeProjectionMatrix(P)[0:3]
        t = np.squeeze(t)
        np.divide(t, t[3], out=t)
        # print(T.shape)
        t = t[0:-1]
        cam_params = CameraParams(R, t, K, P)
        all_cam_param.append(cam_params)
    # print(array_T)
    fs.release()
    # fs = cv2.FileStorage("squirrel.yml", cv2.FILE_STORAGE_WRITE)
    # fs.write("Rotations", "[")
    # fs.write("Rotations", np.array(array_R))
    # fs.write("Rotations", "]")
    # fs.write("Motions", np.array(array_T))
    # fs.release()
    return all_cam_param
 def aruco(self, frame):
     #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
     corners, ids, rejectedImgPoints = aruco.detectMarkers(
         frame, self.aruco_dict, parameters=self.parameters)
     frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
     rvecs, tvecs, trash = aruco.estimatePoseSingleMarkers(
         corners, self.size_of_marker, self.mtx, self.dist)
     imaxis = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
     if tvecs is not None:
         for i in range(len(tvecs)):
             imaxis = aruco.drawAxis(imaxis, self.mtx, self.dist, rvecs[i],
                                     tvecs[i], self.length_of_axis)
             rvec = np.squeeze(rvecs[0], axis=None)
             tvec = np.squeeze(tvecs[0], axis=None)
             tvec = np.expand_dims(tvec, axis=1)
             rvec_matrix = cv2.Rodrigues(rvec)[0]
             proj_matrix = np.hstack((rvec_matrix, tvec))
             euler_angles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
             cv2.putText(imaxis, 'X: ' + str(int(euler_angles[0])),
                         (10, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                         (0, 0, 255))
             cv2.putText(imaxis, 'Y: ' + str(int(euler_angles[1])),
                         (115, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                         (0, 255, 0))
             cv2.putText(imaxis, 'Z: ' + str(int(euler_angles[2])),
                         (200, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                         (255, 0, 0))
     cv2.imshow('Aruco', imaxis)
def head_pose(model_points, sorted_image_points, camera_matrix):
    dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
    (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, sorted_image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
    rotation_mat, _ = cv2.Rodrigues(rotation_vector)
    pose_mat = cv2.hconcat((rotation_mat, translation_vector))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)
    return euler_angle
Exemplo n.º 13
0
    def cvtToBoxPnp(self, box_info):
        """数据格式转换,转换至目标位姿信息"""
        w, h = self.armour_size
        w, h = w / 2, h / 2
        world_coordinate = np.array([
            [w, h, 0],
            [-w, h, 0],
            [-w, -h, 0],
            [w, -h, 0],
        ],
                                    dtype=np.float64)
        # 像素坐标
        pnts = np.array(box_info, dtype=np.float64)
        # rotation_vector 旋转向量 translation_vector 平移向量
        success, rvec, tvec = cv2.solvePnP(world_coordinate, pnts,
                                           Camera_intrinsic["mtx"],
                                           Camera_intrinsic["dist"])
        distance = np.linalg.norm(tvec)

        yaw_angle = np.arctan(tvec[0] / tvec[2])
        pitch_angle = np.arctan(tvec[1] / tvec[2])
        # 默认为弧度制,转换为角度制改下面
        # 这里角度为像素坐标系值,图像中右侧为x正方向,下侧为y轴正方向
        yaw_angle = float(np.rad2deg(yaw_angle))
        pitch_angle = -float(np.rad2deg(pitch_angle))

        rvec_matrix = cv2.Rodrigues(rvec)[0]
        proj_matrix = np.hstack((rvec_matrix, rvec))
        eulerAngles = -cv2.decomposeProjectionMatrix(proj_matrix)[6]  # 欧拉角
        pitch, roll, yaw = eulerAngles[0], eulerAngles[1], eulerAngles[2]
        rect_pnp_info = [distance, int(yaw_angle), int(pitch_angle)]
        return rect_pnp_info
Exemplo n.º 14
0
def get_head_pose(shape, im_shape):

    K = [
        im_shape[1], 0.0, im_shape[1] / 2, 0.0, im_shape[1], im_shape[0] / 2,
        0.0, 0.0, 1.0
    ]
    D = [0.0, 0.0, 0.0, 0.0]
    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(4, 1).astype(np.float32)

    # 选取68个关键点中的14个点
    image_pts = np.float32([
        shape[17], shape[21], shape[22], shape[26], shape[36], shape[39],
        shape[42], shape[45], shape[31], shape[35], shape[48], shape[54],
        shape[57], shape[8]
    ])

    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts,
                                                    cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec,
                                        translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle
Exemplo n.º 15
0
    def face_orientation(self, landmarks):
        v_cx, v_cy = landmarks[
            2, 0] - (landmarks[0, 0] + landmarks[1, 0]) / 2, landmarks[
                2, 1] - (landmarks[0, 1] + landmarks[1, 1]) / 2
        image_points = np.array(
            [
                (landmarks[2, 0], landmarks[2, 1]),  # Nose tip
                (landmarks[2, 0] + 1.2 * v_cx,
                 landmarks[2, 1] + 1.2 * v_cy),  # Chin
                (landmarks[0, 0], landmarks[0, 1]),  # Left eye left corner
                (landmarks[1, 0], landmarks[1, 1]),  # Right eye right corne
                (landmarks[3, 0], landmarks[3, 1]),  # Left Mouth corner
                (landmarks[4, 0], landmarks[4, 1])  # Right mouth corner
            ],
            dtype="double")

        (success, rotation_vector,
         translation_vector) = cv2.solvePnP(self.model_points, image_points,
                                            self.camera_matrix,
                                            self.dist_coeffs)

        rvec_matrix = cv2.Rodrigues(rotation_vector)[0]

        proj_matrix = np.hstack((rvec_matrix, translation_vector))
        eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]

        pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]

        pitch = math.degrees(math.asin(math.sin(pitch)))
        roll = -math.degrees(math.asin(math.sin(roll)))
        yaw = math.degrees(math.asin(math.sin(yaw)))

        return int(roll), int(pitch), int(yaw)
Exemplo n.º 16
0
    def get_euler_angle(self, shape, frame):
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # get the shape of the image
        size = gray.shape

        # find the landmark image points
        image_points = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                                   shape[39], shape[42], shape[45], shape[31], shape[33],
                                   shape[35], shape[48], shape[54], shape[57], shape[8]])

        # calculate the camera matrix and declare coefficients (we assume no lens distortion)
        focal_length = size[1]
        center = (size[1]/2, size[0]/2)
        camera_matrix = np.array([
            [focal_length, 0, center[0]],
            [0, focal_length, center[1]],
            [0, 0, 1]], dtype="double")
        dist_coeffs = np.zeros((4, 1))

        # calculate the Rigid Body Transform vectors
        _, rotation_vec, translation_vec = cv2.solvePnP(self.model_points,
                                                        image_points,
                                                        camera_matrix,
                                                        dist_coeffs)

        # calc euler angle
        rotation_mat, _ = cv2.Rodrigues(rotation_vec)
        pose_mat = cv2.hconcat((rotation_mat, translation_vec))
        _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

        return euler_angle
Exemplo n.º 17
0
def calculate_pitch_yaw_roll(landmarks_2D, cam_w=256, cam_h=256,
                             radians=False):
    """ Return the the pitch  yaw and roll angles associated with the input image.
    @param radians When True it returns the angle in radians, otherwise in degrees.
    """

    assert landmarks_2D is not None, 'landmarks_2D is None'

    # Estimated camera matrix values.
    c_x = cam_w / 2
    c_y = cam_h / 2
    f_x = c_x / np.tan(60 / 2 * np.pi / 180)
    f_y = f_x
    camera_matrix = np.float32([[f_x, 0.0, c_x], [0.0, f_y, c_y],
                                [0.0, 0.0, 1.0]])
    camera_distortion = np.float32([0.0, 0.0, 0.0, 0.0, 0.0])

    # dlib (68 landmark) trached points
    # TRACKED_POINTS = [17, 21, 22, 26, 36, 39, 42, 45, 31, 35, 48, 54, 57, 8]
    # wflw(98 landmark) trached points
    # TRACKED_POINTS = [33, 38, 50, 46, 60, 64, 68, 72, 55, 59, 76, 82, 85, 16]
    # X-Y-Z with X pointing forward and Y on the left and Z up.
    # The X-Y-Z coordinates used are like the standard coordinates of ROS (robotic operative system)
    # OpenCV uses the reference usually used in computer vision:
    # X points to the right, Y down, Z to the front
    landmarks_3D = np.float32([
        [6.825897, 6.760612, 4.402142],  # LEFT_EYEBROW_LEFT, 
        [1.330353, 7.122144, 6.903745],  # LEFT_EYEBROW_RIGHT, 
        [-1.330353, 7.122144, 6.903745],  # RIGHT_EYEBROW_LEFT,
        [-6.825897, 6.760612, 4.402142],  # RIGHT_EYEBROW_RIGHT,
        [5.311432, 5.485328, 3.987654],  # LEFT_EYE_LEFT,
        [1.789930, 5.393625, 4.413414],  # LEFT_EYE_RIGHT,
        [-1.789930, 5.393625, 4.413414],  # RIGHT_EYE_LEFT,
        [-5.311432, 5.485328, 3.987654],  # RIGHT_EYE_RIGHT,
        [-2.005628, 1.409845, 6.165652],  # NOSE_LEFT,
        [-2.005628, 1.409845, 6.165652],  # NOSE_RIGHT,
        [2.774015, -2.080775, 5.048531],  # MOUTH_LEFT,
        [-2.774015, -2.080775, 5.048531],  # MOUTH_RIGHT,
        [0.000000, -3.116408, 6.097667],  # LOWER_LIP,
        [0.000000, -7.415691, 4.070434],  # CHIN
    ])
    landmarks_2D = np.asarray(landmarks_2D, dtype=np.float32).reshape(-1, 2)

    # Applying the PnP solver to find the 3D pose of the head from the 2D position of the landmarks.
    # retval - bool
    # rvec - Output rotation vector that, together with tvec, brings points from the world coordinate system to the camera coordinate system.
    # tvec - Output translation vector. It is the position of the world origin (SELLION) in camera co-ords
    _, rvec, tvec = cv2.solvePnP(landmarks_3D, landmarks_2D,
                                      camera_matrix, camera_distortion)
    #Get as input the rotational vector, Return a rotational matrix

    # const double PI = 3.141592653;
    # double thetaz = atan2(r21, r11) / PI * 180;
    # double thetay = atan2(-1 * r31, sqrt(r32*r32 + r33*r33)) / PI * 180;
    # double thetax = atan2(r32, r33) / PI * 180;
    
    rmat, _ = cv2.Rodrigues(rvec)
    pose_mat = cv2.hconcat((rmat, tvec))
    _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
    return map(lambda k: k[0], euler_angles) # euler_angles contain (pitch, yaw, roll)
    def solve_pose_by_68_points(self, image_points):
        """
        Solve pose from all the 68 image points
        Return (rotation_vector, translation_vector) as pose.
        """

        if self.r_vec is None:
            (_, rotation_vector, translation_vector) = cv2.solvePnP(
                self.model_points_68, image_points, self.camera_matrix, self.dist_coeefs)
            self.r_vec = rotation_vector
            self.t_vec = translation_vector

        (_, rotation_vector, translation_vector) = cv2.solvePnP(
            self.model_points_68,
            image_points,
            self.camera_matrix,
            self.dist_coeefs,
            rvec=self.r_vec,
            tvec=self.t_vec,
            useExtrinsicGuess=True)
        axis = np.float32([[500,0,0], 
                          [0,500,0], 
                          [0,0,500]])    
        imgpts, jac = cv2.projectPoints(axis, rotation_vector, translation_vector, self.camera_matrix, self.dist_coeefs)
        modelpts, jac2 = cv2.projectPoints(self.model_points, rotation_vector, translation_vector, self.camera_matrix, self.dist_coeefs)
        rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
        proj_matrix = np.hstack((rvec_matrix, translation_vector))
        eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
        pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]
        pitch = math.degrees(math.asin(math.sin(pitch)))
        roll = -math.degrees(math.asin(math.sin(roll)))
        yaw = math.degrees(math.asin(math.sin(yaw)))
    
        return (rotation_vector, translation_vector),(round(yaw,2), round(pitch,2), round(roll,2))
Exemplo n.º 19
0
def face_orientation(frame, landmarks):
    size = frame.shape
    image_points = landmarks.astype('double')
    model_points = np.array([
                            (0.0, 0.0, 0.0),             # Nose tip
                            (0.0, -330.0, -65.0),        # Chin
                            (-165.0, 170.0, -135.0),     # Left eye left corner
                            (165.0, 170.0, -135.0),      # Right eye right corne
                            (-150.0, -150.0, -125.0),    # Left Mouth corner
                            (150.0, -150.0, -125.0)      # Right mouth corner
                        ])
    center = (size[1]/2, size[0]/2)
    focal_length = center[0] / np.tan(60/2 * np.pi / 180)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                         [0, focal_length, center[1]],
                         [0, 0, 1]], dtype = "double")
    dist_coeffs = np.zeros((4,1))
    (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points,
                                                                  camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
    axis = np.float32([[500,0,0],
                          [0,500,0],
                          [0,0,500]])
    imgpts, jac = cv2.projectPoints(axis, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
    modelpts, jac2 = cv2.projectPoints(model_points, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
    rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
    proj_matrix = np.hstack((rvec_matrix, translation_vector))
    eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
    pitch, yaw, roll = [np.radians(_) for _ in eulerAngles]
    pitch = np.degrees(np.arcsin(np.sin(pitch)))
    roll = -np.degrees(np.arcsin(np.sin(roll)))
    yaw = np.degrees(np.arcsin(np.sin(yaw)))
    return (str(float("{0:.2f}".format(roll[0]))), str(float("{0:.2f}".format(pitch[0]))), str(float("{0:.2f}".format(yaw[0]))))
Exemplo n.º 20
0
def get_head_pose(shape):

    image_pts = np.float32([
        shape[17], shape[21], shape[22], shape[26], shape[36], shape[39],
        shape[42], shape[45], shape[31], shape[35], shape[48], shape[54],
        shape[57], shape[8]
    ])

    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts,
                                                    cam_matrix, dist_coeffs)

    print(rotation_vec.shape, translation_vec.shape)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec,
                                        translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle

    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    print(rotation_mat.shape)

    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    print(pose_mat.shape)

    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle
Exemplo n.º 21
0
def get_head_pose(shape):# 头部姿态估计
    # (像素坐标集合)填写2D参考点,注释遵循https://ibug.doc.ic.ac.uk/resources/300-W/
    # 17左眉左上角/21左眉右角/22右眉左上角/26右眉右上角/36左眼左上角/39左眼右上角/42右眼左上角/
    # 45右眼右上角/31鼻子左上角/35鼻子右上角/48左上角/54嘴右上角/57嘴中央下角/8下巴角
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35],
                            shape[48], shape[54], shape[57], shape[8]])
    # solvePnP计算姿势——求解旋转和平移矩阵:
    # rotation_vec表示旋转矩阵,translation_vec表示平移矩阵,cam_matrix与K矩阵对应,dist_coeffs与D矩阵对应。
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)
    # projectPoints重新投影误差:原2d点和重投影2d点的距离(输入3d点、相机内参、相机畸变、r、t,输出重投影2d点)
    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,dist_coeffs)
    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))# 以8行2列显示

    # 计算欧拉角calc euler angle
    # 参考https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#decomposeprojectionmatrix
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)#罗德里格斯公式(将旋转矩阵转换为旋转向量)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))# 水平拼接,vconcat垂直拼接
    # decomposeProjectionMatrix将投影矩阵分解为旋转矩阵和相机矩阵
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)
    
    pitch, yaw, roll = [math.radians(_) for _ in euler_angle]
 
 
    pitch = math.degrees(math.asin(math.sin(pitch)))
    roll = -math.degrees(math.asin(math.sin(roll)))
    yaw = math.degrees(math.asin(math.sin(yaw)))
    print('pitch:{}, yaw:{}, roll:{}'.format(pitch, yaw, roll))

    return reprojectdst, euler_angle# 投影误差,欧拉角
Exemplo n.º 22
0
def face_orientation(frame, landmarks):
    size = frame.shape  # (height, width, color_channel)

    image_points = np.array(
        [
            (landmarks[4], landmarks[5]),  # Nose tip
            # (landmarks[10], landmarks[11]),  # Chin
            (landmarks[0], landmarks[1]),  # Left eye left corner
            (landmarks[2], landmarks[3]),  # Right eye right corne
            (landmarks[6], landmarks[7]),  # Left Mouth corner
            (landmarks[8], landmarks[9])  # Right mouth corner
        ],
        dtype="double")

    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        # (0.0, -330.0, -65.0),  # Chin
        (-165.0, 170.0, -135.0),  # Left eye left corner
        (165.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])

    # Camera internals

    center = (size[1] / 2, size[0] / 2)
    focal_length = center[0] / np.tan(60 / 2 * np.pi / 180)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")

    dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
    (success, rotation_vector, translation_vector) = cv2.solvePnP(
        model_points,
        image_points,
        camera_matrix,
        dist_coeffs,
        # flags=cv2.SOLVEPNP_ITERATIVE
    )

    axis = np.float32([[500, 0, 0], [0, 500, 0], [0, 0, 500]])

    imgpts, jac = cv2.projectPoints(axis, rotation_vector, translation_vector,
                                    camera_matrix, dist_coeffs)
    modelpts, jac2 = cv2.projectPoints(model_points, rotation_vector,
                                       translation_vector, camera_matrix,
                                       dist_coeffs)
    rvec_matrix = cv2.Rodrigues(rotation_vector)[0]

    proj_matrix = np.hstack((rvec_matrix, translation_vector))
    eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]

    pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]

    pitch = math.degrees(math.asin(math.sin(pitch)))
    roll = -math.degrees(math.asin(math.sin(roll)))
    yaw = math.degrees(math.asin(math.sin(yaw)))

    return imgpts, modelpts, (str(int(roll)), str(int(pitch)),
                              str(int(yaw))), (landmarks[4], landmarks[5])
Exemplo n.º 23
0
 def _load_rt(self, path):
     p = self._load_projection_matrix(path)
     res = cv2.decomposeProjectionMatrix(p)
     cameraMatrix, rotMatrix, transVect = res[:3]
     R = rotMatrix.astype('float64')
     T = (transVect.astype('float64') / transVect.astype('float64')[3])[:3]
     return R, T
    def GetDistanceAndAngle(self, im, rvecs, tvecs, QRsize=13):
        """Use rotation and translation vectors and size of a QR code to determan euler angles and distance"""
        #get distance from matrix
        dis = (tvecs.ravel()[2] * QRsize).item()

        # calc euler angle
        rotation_mat, _ = cv2.Rodrigues(rvecs)
        pose_mat = cv2.hconcat((rotation_mat, tvecs))
        _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)
        if (self.visualizeResult):
            cv2.putText(im, ("Distance: " + str(round(dis, 2)) + "cm"),
                        (5, 25), font, 1, (255, 50, 50), 2)
            cv2.putText(
                im,
                ("angles X: " + str(round(euler_angle[0][0], 2)) + "degree"),
                (5, 65), font, 1, (255, 50, 50), 2)
            cv2.putText(
                im,
                ("angles Y: " + str(round(euler_angle[1][0], 2)) + "degree"),
                (5, 105), font, 1, (255, 50, 50), 2)
            cv2.putText(
                im,
                ("angles Z: " + str(round(euler_angle[2][0], 2)) + "degree"),
                (5, 145), font, 1, (255, 50, 50), 2)
        if (self.debug):
            print("Distance: " + str(dis) + "|AngX: " +
                  str(euler_angle[0][0]) + "|AngY: " + str(euler_angle[1][0]) +
                  "|AngZ: " + str(euler_angle[2][0]))

        return dis, euler_angle
def face_orientation(frame, landmarks):
    size = frame.shape  #(height, width, color_channel)

    image_points = np.array(
        [
            landmarks["30"],  # Nose
            landmarks["8"],  # Chin
            landmarks["45"],  # Left eye left corner
            landmarks["36"],  # Right eye right cornee
            landmarks["54"],  # Left Mouth corner
            landmarks["48"]  # Right mouth corner
        ],
        dtype="double")

    #  ---------------------------------
    # (441,649),   # Nose tip
    # (415,924),   # Chin
    # (574,534),   # Left eye left corner
    # (278,513),   # Right eye right corne
    # (528,729),   # Left Mouth corner
    # (305,714)    # Right mouth corner
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-165.0, 170.0, -135.0),  # Left eye left corner
        (165.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner                         
    ])

    center = (size[1] / 2, size[0] / 2)
    focal_length = center[0] / np.tan(60 / 2 * np.pi / 180)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")

    dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
    (success, rotation_vector,
     translation_vector) = cv2.solvePnP(model_points, image_points,
                                        camera_matrix, dist_coeffs)

    axis = np.float32([[500, 0, 0], [0, 500, 0], [0, 0, 500]])

    imgpts, jac = cv2.projectPoints(axis, rotation_vector, translation_vector,
                                    camera_matrix, dist_coeffs)
    modelpts, jac2 = cv2.projectPoints(model_points, rotation_vector,
                                       translation_vector, camera_matrix,
                                       dist_coeffs)
    rvec_matrix = cv2.Rodrigues(rotation_vector)[0]

    proj_matrix = np.hstack((rvec_matrix, translation_vector))
    eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]

    pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]

    pitch = math.degrees(math.asin(math.sin(pitch)))
    roll = -math.degrees(math.asin(math.sin(roll)))
    yaw = math.degrees(math.asin(math.sin(yaw)))
    return imgpts, modelpts, ((int(roll)), (int(pitch)),
                              (int(yaw))), (landmarks["4"], landmarks["5"])
def get_head_pose(shape, frame_counter, frames, pitch_angles, roll_angles,
                  yaw_angles):
    image_pts = np.float32([
        shape[17], shape[21], shape[22], shape[26], shape[36], shape[39],
        shape[42], shape[45], shape[31], shape[35], shape[48], shape[54],
        shape[57], shape[8]
    ])

    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts,
                                                    cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec,
                                        translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    pitch = euler_angle[0]
    roll = euler_angle[1]
    yaw = euler_angle[2]
    frames.append(frame_counter)
    pitch_angles.append(pitch)
    roll_angles.append(roll)
    yaw_angles.append(yaw)
    frame_counter += 1
    #    print(pitch)
    return reprojectdst, euler_angle, frame_counter, frames, pitch_angles, roll_angles, yaw_angles
Exemplo n.º 27
0
def load_K_Rt_from_P(filename, P=None):
    if P is None:
        lines = open(filename).read().splitlines()
        if len(lines) == 4:
            lines = lines[1:]
        lines = [[x[0], x[1], x[2], x[3]]
                 for x in (x.split(" ") for x in lines)]
        P = np.asarray(lines).astype(np.float32).squeeze()

    out = cv2.decomposeProjectionMatrix(P)
    K = out[0]
    R = out[1]
    t = out[2]

    K = K / K[2, 2]
    intrinsics = np.eye(4)
    intrinsics[:3, :3] = K

    pose = np.eye(4, dtype=np.float32)
    to_gl = np.eye(3, dtype=np.float32)
    to_gl[0, 0] = -1.
    to_gl[1, 1] = -1.
    pose[:3, :3] = np.dot(R.transpose(), to_gl)
    pose[:3, 3] = (t[:3] / t[3])[:, 0]

    return intrinsics, pose
Exemplo n.º 28
0
def get_head_pose(shape, img):
    h, w, _ = img.shape
    K = [w, 0.0, w // 2, 0.0, w, h // 2, 0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)

    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([
        shape[17], shape[21], shape[22], shape[26], shape[36], shape[39],
        shape[42], shape[45], shape[31], shape[35]
    ])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts,
                                                    cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec,
                                        translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle
Exemplo n.º 29
0
 def _get_pitch_yaw(self):
     """ Obtain the yaw and pitch from the :attr:`_rotation` in eular angles. """
     proj_matrix = np.zeros((3, 4), dtype="float32")
     proj_matrix[:3, :3] = cv2.Rodrigues(self._rotation)[0]
     euler = cv2.decomposeProjectionMatrix(proj_matrix)[-1]
     self._pitch_yaw = (euler[0][0], euler[1][0])
     logger.trace("yaw_pitch: %s", self._pitch_yaw)
Exemplo n.º 30
0
 def getFaceDirection(self, rotateMatrix, tVec):
     rotateMatrix = np.array(rotateMatrix)
     tVec = np.array(tVec).T
     projectionMatrix = np.concatenate((rotateMatrix, tVec), axis=1)
     _, _, _, _, _, _, eulerAngles = cv2.decomposeProjectionMatrix(
         projectionMatrix)
     print(eulerAngles)
Exemplo n.º 31
0
def get_head_pose(shape):
    # Dlib shape_predictor can get 68 points of face
    image_pts = np.float32([
        shape[17], shape[21], shape[22], shape[26], shape[36], shape[39],
        shape[42], shape[45], shape[31], shape[35], shape[48], shape[54],
        shape[57], shape[8]
    ])

    _, rotation_vec, translation_vec = cv2.solvePnP(pose.object_pts, image_pts,
                                                    pose.cam_matrix,
                                                    pose.dist_coeffs)

    # Project 3D points to image plane , output image points
    reprojectdst, _ = cv2.projectPoints(pose.reprojectsrc, rotation_vec,
                                        translation_vec, pose.cam_matrix,
                                        pose.dist_coeffs)
    # Reprojected 2D points
    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # Calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle
def get_head_pose(shape, cam_matrix, dist_coeffs):
    #image points are the landmark fiducial points on the image plane corresponding to the 3D model points (model_pts)
    image_pts = np.float32(
        [shape[30], shape[8], shape[36], shape[45], shape[48], shape[54]])
    #OpenCV solvepnp function to compute head pose
    (success, rotation_vec, translation_vec) = cv2.solvePnP(
        model_pts,
        image_pts,
        cam_matrix,
        dist_coeffs,
        flags=cv2.cv2.SOLVEPNP_ITERATIVE)  #, flags=cv2.CV_ITERATIVE)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec,
                                        translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    R_rvec = R.from_rotvec(rotation_vec.transpose())
    R_rotmat = R_rvec.as_matrix()
    print(rotation_mat, R_rotmat)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, rotation_vec, rotation_mat, translation_vec, euler_angle, image_pts
def get_head_pose(shape):
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35],
                            shape[48], shape[54], shape[57], shape[8]])

    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle
"""
Decomposition of the projection matrix
"""

import cv2
import numpy as np

cameraMatrix1 = np.identity(3,np.float)
rotMatrix1 = np.identity(3,np.float)
transVect1 = np.array([0, 0, 0, 0], np.float)

projMatrix1 = np.array([[1.008625769,-.1231000000,-2.726496908,100.7731396],
                      [.8361681672,4.182806666,.09730552466,157.7821517],
                      [.001940031162,-.0002500000000,.0004294973553,.5091827970]], np.float)

cv2.decomposeProjectionMatrix(projMatrix1, cameraMatrix1, rotMatrix1, transVect1)
print "cameraMatrix: ", cameraMatrix1
print "rotMatrix", rotMatrix1
print "transVect", transVect1

cameraMatrix2 = np.identity(3,np.float)
rotMatrix2 = np.identity(3,np.float)
transVect2 = np.array([0, 0, 0, 0], np.float)

projMatrix2 = np.array([[1.426624453,-.1471000000,-2.539954861,78.3047630],
                       [.7976452886,4.150586666,.1303686090,143.3641217],
                       [.001903601260,-.0001466666667,.0006532670189,.5012476834]], np.float)

cv2.decomposeProjectionMatrix(projMatrix2, cameraMatrix2, rotMatrix2, transVect2)
print "cameraMatrix: ", cameraMatrix2
print "rotMatrix", rotMatrix2
    def calibration_opencv(self,rgb,h,w,sq_size,use_pnp = True,use_ransac = True):
        self.opencv_th.set()
        #cameraMatrix = self.kinect.rgb_camera_info.K.reshape(3,3)
        projMatrix = np.matrix(self.kinect.rgb_camera_info.P).reshape(3,4)
        cameraMatrix, rotMatrix, transVect, rotMatrixX, rotMatrixY, rotMatrixZ, eulerAngles = cv2.decomposeProjectionMatrix(projMatrix)
            
        distCoeffs = np.array(self.kinect.rgb_camera_info.D)
        
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.1)
        gray = cv2.cvtColor(rgb,cv2.COLOR_BGR2GRAY)
        
        objp = np.zeros((h*w,3), np.float32)
        objp[:,:2] = np.mgrid[0:h,0:w].T.reshape(-1,2)
        objp = objp*sq_size
        objp[:,[0, 1]] = objp[:,[1, 0]]
        
        objpoints = [] # 3d point in real world space
        imgpoints = [] # 2d points in image plane.
        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (h,w),None)

        # If found, add object points, image points (after refining them)
        if ret == True:
            objpoints.append(objp)

            cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
            imgpoints.append(corners)
            
            # Draw and display the corners
            cv2.drawChessboardCorners(rgb, (h,w), corners,ret)

            if use_pnp:
                if use_ransac:
                    if not self.useExtrinsicGuess:
                        rvec,tvec,_ = cv2.solvePnPRansac(objp, corners,cameraMatrix ,distCoeffs)
                        self.rvec = rvec
                        self.tvec = tvec
                        self.useExtrinsicGuess = True
                    else:
                        r,t,_ = cv2.solvePnPRansac(objp, corners,cameraMatrix ,distCoeffs,self.rvec, self.tvec, self.useExtrinsicGuess)
                        rvec = self.rvec
                        tvec = self.tvec

            else:
                ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],cameraMatrix,None,flags=cv2.CALIB_USE_INTRINSIC_GUESS)#flags=cv2.CALIB_FIX_ASPECT_RATIO)
                rvec = rvecs[0]
                tvec = tvecs[0]
                
            tvect = np.matrix(tvec).reshape(3,1)
            imgpoints2, _ = cv2.projectPoints(objp, rvec, tvec, cameraMatrix, distCoeffs)
            
            for p in imgpoints2:
                cv2.circle(rgb,(int(p[0][0]),int(p[0][1])),2,(0,12,235),1)
                
            
            ret_R,_ = cv2.Rodrigues(rvec)
            
            #print tvec,rvec

            ret_Rt = np.matrix(ret_R)
            
            tmp = np.append(ret_Rt, np.array([0,0,0]).reshape(3,1), axis=1)
            aug=np.array([[0.0,0.0,0.0,1.0]])
            T = np.append(tmp,aug,axis=0)

            quaternion = quaternion_from_matrix(T)

            self.tfcv_thread.set_transformation(tvect,quaternion)
        cv2.imshow('findChessboardCorners - OpenCV',rgb)
        self.opencv_th.clear()
        return 
Exemplo n.º 36
0
def encode6b(Tr):
    r,t = cv2.decomposeProjectionMatrix(Tr[:3])[1:3]
    return r_[cv2.Rodrigues(r)[0].flatten(), t.flat[:3]/t[3]]
Exemplo n.º 37
-1
 def get_theta(self):
     __,__,__,x,y,z,euler =cv2.decomposeProjectionMatrix(self.Proj)
     #self.yaw = np.arccos(x[1,1])*180/np.pi #yaw
     #self.pitch = np.arccos(y[0,0])*180/np.pi #pitch
     #self.roll = np.arccos(z[0,0])*180/np.pi #roll
     self.yaw=euler[0]
     self.pitch=euler[1]
     self.roll=euler[2]
Exemplo n.º 38
-1
def calibrateImages(obj_pts, ptsL, ptsR, imsize):
    '''Calibrate the Stereo camera rig, given:
    @obj_pts: points that specify the calibration checkerboard pattern
    @ptsL: detected image points in the left calibration images
    @ptsR: detected image points in the right calibration images
    @imsize: the stipulated size of all calibration images
    return: updated StereoCam object
    '''
    # Perform Stereo Calibration
    retval, cam1, dist1, cam2, dist2, R, T, E, F = \
        cv2.stereoCalibrate(
            obj_pts, ptsL, ptsR, imsize)
    dist1 = dist1.ravel()
    dist2 = dist2.ravel()

    cameraObj = StereoCam()
    cameraObj.cam1, cameraObj.dist1 = cam1, dist1
    cameraObj.cam2, cameraObj.dist2 = cam2, dist2

    cameraObj.R, cameraObj.T, cameraObj.E, cameraObj.F = R, T, E, F

    # Perform Stereo Rectification
    (R1, R2, P1, P2, Q, roi1, roi2) = \
        cv2.stereoRectify(cam1, dist1, cam2, dist2, imsize, R, T)

    # update the left and right rotation and projection matrices
    cameraObj.R1, cameraObj.R2 = R1, R2
    cameraObj.P1, cameraObj.P2 = P1, P2

    # update the image projection (aka disparity-to-depth mapping) matrix
    cameraObj.Q = Q

    # Get optimal new camera matrices from the rectified projection matrices
    K_L = cv2.decomposeProjectionMatrix(P1)
    K1 = K_L[0]
    K_R = cv2.decomposeProjectionMatrix(P2)
    K2 = K_R[0]

    cameraObj.K1, cameraObj.K2 = K1, K2

    return cameraObj