def visualize_headpose_result(img,
                                  facebox,
                                  est_headpose,
                                  people,
                                  GTlabel=[]):
        #ese_headpose = (phi_head, theta_head)
        """Here, we take the original eye eye_image and overlay the estimated headpose."""
        output_image = np.copy(img)
        box_point = np.copy(facebox)

        center_x = (box_point[2] + box_point[0]) / 2
        center_y = (box_point[3] + box_point[1]) / 2
        endpoint_x, endpoint_y = gaze_tools.get_endpoint(
            est_headpose[1], est_headpose[0], center_x, center_y, 100)

        # flip vector
        #flip_endpoint_x, flip_endpoint_y = gaze_tools.get_endpoint(est_headpose[1], -est_headpose[0], center_x, center_y, 100)
        # cv2.arrowedLine(output_image, (int(center_x), int(center_y)), (int(flip_endpoint_x), int(flip_endpoint_y)), (0, 0, 255),2)

        headpose_error = 0
        if GTlabel != []:
            GT_endpoint_x, GT_endpoint_y = gaze_tools.get_endpoint(
                GTlabel[2], GTlabel[1], center_x, center_y, 100)
            headpose_error = gaze_tools.get_error(
                [GT_endpoint_x, GT_endpoint_y], [endpoint_x, endpoint_y],
                [center_x, center_y])

        #Smoothing
        # endpoint_x ,endpoint_y =people.getEndpointAverage(endpoint_x, endpoint_y, "headpose")

        cv2.arrowedLine(output_image, (int(center_x), int(center_y)),
                        (int(endpoint_x), int(endpoint_y)), (0, 0, 255), 2)

        return output_image, headpose_error
    def visualize_eye_result(eye_image, est_gaze):
        """Here, we take the original eye eye_image and overlay the estimated gaze."""
        output_image = np.copy(eye_image)

        center_x = output_image.shape[1] / 2
        center_y = output_image.shape[0] / 2

        endpoint_x, endpoint_y = get_endpoint(est_gaze[0], est_gaze[1], center_x, center_y, 50)

        cv2.line(output_image, (int(center_x), int(center_y)), (int(endpoint_x), int(endpoint_y)), (255, 0, 0))
        return output_image
    def visualize_headpose_result(face_image, est_headpose):
        """Here, we take the original eye eye_image and overlay the estimated headpose."""
        output_image = np.copy(face_image)

        center_x = output_image.shape[1] / 2
        center_y = output_image.shape[0] / 2

        endpoint_x, endpoint_y = gaze_tools.get_endpoint(
            est_headpose[1], est_headpose[0], center_x, center_y, 100)

        cv2.line(output_image, (int(center_x), int(center_y)),
                 (int(endpoint_x), int(endpoint_y)), (0, 0, 255), 3)
        return output_image
예제 #4
0
    def visualize_eye_result(self, eye_image, est_gaze):
        """Here, we take the original eye eye_image and overlay the estimated gaze."""
        output_image = np.copy(eye_image)

        center_x = self.image_width / 2
        center_y = self.image_height / 2

        endpoint_x, endpoint_y = gaze_tools.get_endpoint(
            est_gaze[0], est_gaze[1], center_x, center_y, 50)

        cv2.line(output_image, (int(center_x), int(center_y)),
                 (int(endpoint_x), int(endpoint_y)), (255, 0, 0))
        return output_image
예제 #5
0
    def visualize_eye_result(color_img,
                             est_gaze,
                             center_coor,
                             facebox,
                             people,
                             selection,
                             GTlabel=[]):
        # est_gaze = [theta,phi]
        """Here, we take the original eye eye_image and overlay the estimated gaze."""
        output_image = np.copy(color_img)

        center_x = facebox[0] + center_coor[0]
        center_y = facebox[1] + center_coor[1]

        endpoint_x, endpoint_y = get_endpoint(est_gaze[0], est_gaze[1],
                                              center_x, center_y, 150)

        #flip vector
        # flip_endpoint_x, flip_endpoint_y = get_endpoint(est_gaze[0], -est_gaze[1], center_x, center_y, 150)
        # cv2.arrowedLine(output_image, (int(center_x), int(center_y)), (int(flip_endpoint_x), int(flip_endpoint_y)), (255, 0, 0), 2)
        # print("gaze_phi:",est_gaze[1])
        # print("flip gaze_phi:",-est_gaze[1])

        gaze_error = 0
        if GTlabel != []:
            GT_endpoint_x, GT_endpoint_y = get_endpoint(
                GTlabel[4], GTlabel[3], center_x, center_y, 150)
            gaze_error = get_error([GT_endpoint_x, GT_endpoint_y],
                                   [endpoint_x, endpoint_y],
                                   [center_x, center_y])

        #Smoothing
        # endpoint_x, endpoint_y = people.getEndpointAverage(endpoint_x, endpoint_y, selection)

        cv2.arrowedLine(output_image, (int(center_x), int(center_y)),
                        (int(endpoint_x), int(endpoint_y)), (255, 0, 0), 2)

        return output_image, gaze_error
예제 #6
0
    def visualize_eye_result(eye_image, est_gaze):
        """Here, we take the original eye eye_image and overlay the estimated gaze."""
        output_image = np.copy(eye_image)

        center_x = output_image.shape[1] / 2
        center_y = output_image.shape[0] / 2

        endpoint_x, endpoint_y = get_endpoint(est_gaze[0], est_gaze[1],
                                              center_x, center_y, 50)

        # cv2.line(output_image, (int(center_x), int(center_y)), (int(endpoint_x), int(endpoint_y)), (255, 0, 0))
        # cv2.arrowedLine(output_image, (int(center_x), int(center_y)), (int(endpoint_x), int(endpoint_y)), (0, 255, 0),
        #                 tipLength=0.2)
        cv2.arrowedLine(output_image, (int(center_x), int(center_y)),
                        (int(endpoint_x * 1.5), int(endpoint_y * 1.5)),
                        (0, 255, 0),
                        thickness=2,
                        tipLength=0.4)
        return output_image