コード例 #1
0
def angles_from_rvec(rvecs):
    r_mat, _jacobian = cv2.Rodrigues(rvecs)
    a = math.atan2(r_mat[2][1], r_mat[2][2])
    b = math.atan2(
        -r_mat[2][0],
        math.sqrt(math.pow(r_mat[2][1], 2) + math.pow(r_mat[2][2], 2)))
    c = math.atan2(r_mat[1][0], r_mat[0][0])
    return [a, b, c]
コード例 #2
0
    def find_extrinsic(self):
        with np.load('img_obj_points.npz') as X:
            imgpoints, objpoints = [X[i] for i in ('arr_0', 'arr_1')]
        with np.load('output.npz') as X:
            mtx, dist, _, _ = [
                X[i] for i in ('arr_0', 'arr_1', 'arr_2', 'arr_3')
            ]

        num = self.spinBox.value()

        retval, rvecs, tvecs = cv.solvePnP(objpoints[num - 1],
                                           imgpoints[num - 1], mtx, dist)
        dst, _ = cv.Rodrigues(rvecs)
        extrinsic_mtx = cv.hconcat([dst, tvecs])
        print(extrinsic_mtx)
コード例 #3
0
def refine_pose(p0, X, uv, K, weights=None):

    R0, _ = cv.Rodrigues(p0[:3])
    p0[:3] = np.zeros(3)

    if weights is None:
        res_fun = lambda p: np.ravel(project(K, pose(p, R0) @ X) - uv)
    else:
        res_fun = lambda p: weights @ np.ravel(
            project(K,
                    pose(p, R0) @ X) - uv)

    res = least_squares(res_fun, p0, verbose=2)
    p_opt = res.x
    J = res.jac

    return p_opt, J, R0
コード例 #4
0
        recordWriter.writerow([
            'Distance r(in m)', 'Azimuth(in deg)', 'Elevation(in deg)',
            'x-axis', 'y-axis', 'z-axis', 'Rvec1', 'Rvec2', 'Rvec3',
            'TimeStamp(UTC)', 'TimeStamp(Datetime)'
        ])
        for i, row in enumerate(readings):
            tvec1 = -float(row[3])
            tvec2 = -float(row[4])
            tvec3 = float(row[5])

            translation = np.array([tvec1, tvec2, tvec3])
            translation = translation.reshape(3, 1)
            rotation = np.array([float(row[6]), float(row[7]), float(row[8])])

            ###### Obtain the rotation matrix from rotation vector #######
            [rotMatrix, jacobian] = cv2.Rodrigues(rotation)

            ##### rot-tran Matrix ##########
            rotMatrix = np.hstack([rotMatrix, translation])
            rotMatrix = np.vstack([rotMatrix, line])

            ####### Calculate the relative position of the tag w.r.t the camera ######
            relPosition = np.matmul(rotMatrix, tagLoc)
            relPosition = np.delete(relPosition, -1)

            relX = -relPosition[0]
            relY = -relPosition[1]
            relZ = relPosition[2]
            az, el, r = cart2sph(relZ, relX, relY)
            recordWriter.writerow([(str)(r), (str)(az), (str)(el), (str)(relX),
                                   (str)(relY), (str)(relZ), (str)(row[6]),
コード例 #5
0
    def return_roll_pitch_yaw(self, image, radians=False):
        """ 
        Return the the roll pitch and yaw angles associated with the input image.

          @param image It is a colour image. It must be >= 64 pixel.
          @param radians When True it returns the angle in radians, otherwise in degrees.
         """
        # The dlib shape predictor returns 68 points,
        # we are interested only in a few of those
        TRACKED_POINTS = (0, 4, 8, 12, 16, 17, 26, 27, 30, 33, 36, 39, 42, 45,
                          62)

        # Antropometric constant values of the human head.
        # Check the wikipedia EN page and:
        # "Head-and-Face Anthropometric Survey of U.S. Respirator Users"
        #
        # X-Y-Z with X pointing forward and Y on the left and Z up.
        # The X-Y-Z coordinates used are like the standard
        # coordinates of ROS (robotic operative system)
        # OpenCV uses the reference usually used in computer vision:
        #    X points to the right, Y down, Z to the front #
        # The Male mean interpupillary distance is 64.7 mm
        #   (https://en.wikipedia.org/wiki/Interpupillary_distance)
        #
        P3D_RIGHT_SIDE = np.float32([-100.0, -77.5, -5.0])  #0
        P3D_GONION_RIGHT = np.float32([-110.0, -77.5, -85.0])  #4
        P3D_MENTON = np.float32([0.0, 0.0, -122.7])  #8
        P3D_GONION_LEFT = np.float32([-110.0, 77.5, -85.0])  #12
        P3D_LEFT_SIDE = np.float32([-100.0, 77.5, -5.0])  #16
        P3D_FRONTAL_BREADTH_RIGHT = np.float32([-20.0, -56.1, 10.0])  #17
        P3D_FRONTAL_BREADTH_LEFT = np.float32([-20.0, 56.1, 10.0])  #26
        P3D_SELLION = np.float32([0.0, 0.0, 0.0])  #27 This is the world origin
        P3D_NOSE = np.float32([21.1, 0.0, -48.0])  #30
        P3D_SUB_NOSE = np.float32([5.0, 0.0, -52.0])  #33
        P3D_RIGHT_EYE = np.float32([-20.0, -32.35, -5.0])  #36
        P3D_RIGHT_TEAR = np.float32([-10.0, -20.25, -5.0])  #39
        P3D_LEFT_TEAR = np.float32([-10.0, 20.25, -5.0])  #42
        P3D_LEFT_EYE = np.float32([-20.0, 32.35, -5.0])  #45
        #P3D_LIP_RIGHT = np.float32([-20.0, 65.5,-5.0]) #48
        #P3D_LIP_LEFT = np.float32([-20.0, 65.5,-5.0]) #54
        P3D_STOMION = np.float32([10.0, 0.0, -75.0])  #62

        # This matrix contains the 3D points of the
        # 11 landmarks we want to find. It has been
        # obtained from antrophometric measurement
        # of the human head.
        landmarks_3D = np.float32([
            P3D_RIGHT_SIDE, P3D_GONION_RIGHT, P3D_MENTON, P3D_GONION_LEFT,
            P3D_LEFT_SIDE, P3D_FRONTAL_BREADTH_RIGHT, P3D_FRONTAL_BREADTH_LEFT,
            P3D_SELLION, P3D_NOSE, P3D_SUB_NOSE, P3D_RIGHT_EYE, P3D_RIGHT_TEAR,
            P3D_LEFT_TEAR, P3D_LEFT_EYE, P3D_STOMION
        ])

        # Return the 2D position of our landmarks
        img_h, img_w, img_d = image.shape
        landmarks_2D = self._return_landmarks(inputImg=image,
                                              roiX=0,
                                              roiY=img_w,
                                              roiW=img_w,
                                              roiH=img_h,
                                              points_to_return=TRACKED_POINTS)

        # Print som red dots on the image
        # for point in landmarks_2D:
        #     cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)

        # Applying the PnP solver to find the 3D pose
        # of the head from the 2D position of the #landmarks.
        # retval - bool
        # rvec - Output rotation vector that, together with tvec, brings
        # points from the world coordinate system to the camera coordinate system.
        # tvec - Output translation vector. It is the position of the world origin (SELLION) in camera co-ords
        retval, rvec, tvec = cv2.solvePnP(landmarks_3D, landmarks_2D,
                                          self.camera_matrix,
                                          self.camera_distortion)

        # Get as input the rotational vector
        # Return a rotational matrix
        rmat, _ = cv2.Rodrigues(rvec)

        # euler_angles contain (pitch, yaw, roll)
        # euler_angles = cv.DecomposeProjectionMatrix(
        #                                    projMatrix=rmat,
        #                                    cameraMatrix=camera_matrix,
        #                                    rotMatrix,
        #                                    transVect,
        #                                    rotMatrX=None,
        #                                    rotMatrY=None,
        #                                    rotMatrZ=None)

        head_pose = [
            rmat[0, 0], rmat[0, 1], rmat[0, 2], tvec[0], rmat[1, 0],
            rmat[1, 1], rmat[1, 2], tvec[1], rmat[2, 0], rmat[2, 1],
            rmat[2, 2], tvec[2], 0.0, 0.0, 0.0, 1.0
        ]

        # print(head_pose) #TODO remove this line
        return self.rotationMatrixToEulerAngles(rmat)
コード例 #6
0
    def render(self, index, points, colors, size=4, view_distance=50):
        """
        Project given points into the image with given index. Returns rendered image

        :param index: index of the image / camera pose. See self.poses to select an image / pose.
        :param points: 3d points as n x 3 numpy array in image coordinates
        :param colors: colors of each point as n x 3 numpy array. Defined between 0 and 255
        :param size: size of each point in. points further away are drawn more small
        :param view_distance: only points that are within this distance in meter to the camera origin are drawn.
        :return: rendered color image
        """
        assert 0 <= index < len(self.poses)
        img_path = self.folder / self.poses[index]['filename']
        img = cv2.imread(str(img_path))
        # project_and_draw(img, points, colors, self.poses[index]['full-pose'], size, max_view_distance)

        pose = self.poses[index]['full-pose']
        rot_vec = -np.array([pose['rx'], pose['ry'], pose['rz']])
        t_vec = -np.array([pose['tx'], pose['ty'], pose['tz']
                           ]) @ cv2.Rodrigues(rot_vec)[0].T

        # select points which are close
        cam_pos = -np.matmul(cv2.Rodrigues(rot_vec)[0].T, t_vec)
        distances = np.linalg.norm(points - cam_pos, axis=1)
        view_mask = distances < view_distance

        # select points which are in front of camera
        cam_points3d = points @ cv2.Rodrigues(rot_vec)[0].T + t_vec
        view_mask = view_mask & (cam_points3d[:, 2] > 0)

        view_points3d = points[view_mask]
        view_distances = distances[view_mask]
        view_colors = colors[view_mask]
        if len(view_points3d) == 0:
            return
        view_points2d = cv2.projectPoints(view_points3d, rot_vec, t_vec,
                                          self.K,
                                          self.distortion)[0].reshape(-1, 2)

        p = view_points2d
        selection = np.all((p[:, 0] >= 0, p[:, 0] < img.shape[1], p[:, 1] >= 0,
                            p[:, 1] < img.shape[0]),
                           axis=0)
        p = p[selection]

        # closest points are at 4 meter distance
        norm_distances = view_distances[selection] / 4.0
        shift = 3
        factor = (1 << shift)

        def I(x_):
            return int(x_ * factor + 0.5)

        for i in range(0, len(p)):
            cv2.circle(img, (I(p[i][0]), I(p[i][1])),
                       I(size / norm_distances[i]),
                       view_colors[i],
                       -1,
                       shift=shift)

        return img
コード例 #7
0
            az, ele, r = cart2sph(z, x, y)
            recordWriter.writerow([(str)(r), (str)(az), (str)(ele), (str)(x),
                                   (str)(y), (str)(z), (str)(rvec[0][0][0]),
                                   (str)(rvec[0][0][1]), (str)(rvec[0][0][2]),
                                   (str)(dateTimeObj.timestamp()),
                                   (str)(dateTimeObj)])
            matArray = [
                r, az, ele, x, y, z, rvec[0][0][0], rvec[0][0][1],
                rvec[0][0][2],
                dateTimeObj.timestamp()
            ]
            matArray = np.array(matArray)
            scipy.io.savemat(fileName + str(time) + '.mat',
                             {'csvmatrix': matArray})
            ######## Apppling Rodrigues method on the 3x1 rotational vectors to obtain the 3x3 rotation matrix ############
            [rotation, jacobian] = cv2.Rodrigues(rvec)
            print("Rotation: ", rotation)
            print("Re-test: ", cv2.Rodrigues(rotation))
            print("Translation:", x, y, z)
            print("rotational:", rvec)
            tvec1 = tvec.reshape(3, 1)
            print(tvec.shape)
            print(rotation.shape)
            print("Rotaion of z axis:", np.degrees(rvec[0][0][2]))
            line = np.array([0, 0, 0, 1])

            ###### Rel position of tag w.r.t marker Test marker position ######
            testMarker = tagLoc

            ##### rot-tran Matrix ##########
            matrixRot = np.hstack([rotation, tvec1])
コード例 #8
0
    def check_drowsiness_yawn(self, img, rect, dtype="int"):
        self.frame = img

        img = img[:, :, [2, 1, 0]]  # BGR => RGB
        yawn = False
        drowsiness = False

        landmarks = self.predictor(img, rect)

        # get the left and right eye coordinates
        left_eye = []
        for i in range(36, 42):
            left_eye.append([landmarks.part(i).x, landmarks.part(i).y])
        right_eye = []
        for i in range(42, 48):
            right_eye.append([landmarks.part(i).x, landmarks.part(i).y])

        # calculate the eye aspect ratio for both eyes
        left_ear = self.eye_aspect_ratio(left_eye)
        right_ear = self.eye_aspect_ratio(right_eye)

        # average the eye aspect ratio together for both eyes
        ear = (left_ear + right_ear) / 2.0

        # print('ear:', ear, 'eye_threshold', self.eye_threshold)
        # check to see if the eye aspect ratio is below the eye threshold
        if self.t_start and ear < self.eye_threshold:
            self.t_end = time.time()
            t = self.t_end - self.t_start
            if t >= 1.3:
                drowsiness = True
        else:
            self.t_start = time.time()

        # check yawn
        top_lips = []
        bottom_lips = []
        for i in range(0, 68):
            if 50 <= i <= 53 or 61 <= i <= 64:
                top_lips.append((landmarks.part(i).x, landmarks.part(i).y))

            elif 65 <= i <= 68 or 56 <= i <= 59:
                bottom_lips.append((landmarks.part(i).x, landmarks.part(i).y))

        top_lips = np.squeeze(np.asarray(top_lips))
        bottom_lips = np.squeeze(np.asarray(bottom_lips))
        top_lips_mean = np.array(np.mean(top_lips, axis=0), dtype=dtype)
        bottom_lips_mean = np.array(np.mean(bottom_lips, axis=0), dtype=dtype)
        top_lips_mean = top_lips_mean.reshape(-1)
        bottom_lips_mean = bottom_lips_mean.reshape(-1)

        #distance=math.sqrt((bottom_lips_mean[0] - top_lips_mean[0])**2 + (bottom_lips_mean[-1] - top_lips_mean[-1])**2)
        distance = bottom_lips_mean[-1] - top_lips_mean[-1]

        threshold = (rect.bottom() - rect.top()) * self.mouth_threshold
        if distance > threshold:
            yawn = True

        # gaze detection
        left_gaze_ratio = self.get_gaze_ratio(
            [landmarks.part(i) for i in range(36, 42)])
        right_gaze_ratio = self.get_gaze_ratio(
            [landmarks.part(i) for i in range(42, 48)])
        gaze_ratio = (right_gaze_ratio + left_gaze_ratio) / 2

        if gaze_ratio <= 0.75:
            gaze = 'RIGHT'
        elif 0.75 < gaze_ratio < 1.3:
            gaze = 'CENTER'
        else:
            gaze = 'LEFT'

        # head pose
        shape0 = np.array(face_utils.shape_to_np(landmarks))
        image_points = np.array(
            [
                (shape0[33, :]),  # Nose tip
                (shape0[8, :]),  # Chin
                (shape0[36, :]),  # Left eye left corner
                (shape0[45, :]),  # Right eye right corner
                (shape0[48, :]),  # Left Mouth corner
                (shape0[54, :])  # Right mouth corner
            ],
            dtype="double")

        model_points = np.array([
            (0.0, 0.0, 0.0),  # Nose tip
            (0.0, -330.0, -65.0),  # Chin
            (-225.0, 170.0, -135.0),  # Left eye left corner
            (225.0, 170.0, -135.0),  # Right eye right corne
            (-150.0, -150.0, -125.0),  # Left Mouth corner
            (150.0, -150.0, -125.0)  # Right mouth corner                     
        ])

        focal_length = 640
        center = (320, 180)
        camera_matrix = np.array([[focal_length, 0, center[0]],
                                  [0, focal_length, center[1]], [0, 0, 1]],
                                 dtype="double")

        # print ("Camera Matrix :\n {0}".format(camera_matrix))
        dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
        (success, rotation_vector,
         translation_vector) = cv.solvePnP(model_points,
                                           image_points,
                                           camera_matrix,
                                           dist_coeffs,
                                           flags=cv.SOLVEPNP_ITERATIVE)
        # print ("Rotation Vector:\n {0}".format(rotation_vector))
        # print ("Translation Vector:\n {0}".format(translation_vector))

        (nose_end_point2D,
         jacobian) = cv.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                      rotation_vector, translation_vector,
                                      camera_matrix, dist_coeffs)
        p1 = (int(image_points[0][0]), int(image_points[0][1]))
        p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
        # print(str(p2[0] - p1[0]), str(p2[1] - p1[1]))
        # axis = np.float32([[500,0,0],
        #                   [0,500,0],
        #                   [0,0,500]])
        # imgpts, jac = cv.projectPoints(axis, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
        # modelpts, jac2 = cv.projectPoints(model_points, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
        rvec_matrix = cv.Rodrigues(rotation_vector)[0]

        proj_matrix = np.hstack((rvec_matrix, translation_vector))
        eulerAngles = cv.decomposeProjectionMatrix(proj_matrix)[6]

        pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]

        pitch = math.degrees(math.asin(math.sin(pitch)))
        roll = -math.degrees(math.asin(math.sin(roll)))
        yaw = math.degrees(math.asin(math.sin(yaw)))

        # print(pitch, roll, yaw)

        # ''' x & y rotation '''
        # v_x = p2[0] - p1[0]
        # v_y = p2[1] - p1[1]

        # area = (shape0[45, 0] - shape0[36, 0]) * (shape0[8, 1] - (shape0[45, 1] + shape0[36, 1]) / 2)
        # area = math.sqrt(area) if area > 0 else 0
        # length = 1000 * area / 200

        # if length**2 - v_x**2 - v_y**2 <= 0:
        #     v_x = 0
        #     v_y = 0
        # h = math.sqrt(length**2 - v_x**2 - v_y**2)
        # theta_x = math.degrees(math.atan2(h, v_x)) - 90.0
        # theta_y = math.degrees(math.atan2(h, v_y)) - 90.0

        # theta_x = (theta_x / 3.5) ** 2 * 3.5 if theta_x > 0 else -(theta_x / 3.5) ** 2 * 3.5
        # theta_y = (theta_y / 3) ** 2 * 3 if theta_y > 0 else -(theta_y / 3) ** 2 * 3

        # print('angle:', theta_x, theta_y)

        return drowsiness, yawn, gaze, [p1, p2], [yaw, pitch * 4]