예제 #1
0
def image(input_image_path, display, gradcam, output_csv_file, screen_size, device, branch, face_detection):
    """
    Receives the full path to a image file and recognizes
    facial expressions of the closets face in a frame-based approach.

    TODO: Write docstring

    :param input_image_path:
    :param display:
    :param gradcam:
    :param output_csv_file:
    :param screen_size:
    :param device:
    :param branch:
    :return:
    """

    image = uimage.read(input_image_path)

    # Call FER method
    fer = cvision.recognize_facial_expression(image, device, face_detection, gradcam)

    # TODO: Implement
    if output_csv_file:
        pass

    if display:
        fer_demo = FERDemo(screen_size=screen_size, display_individual_classification=branch, display_graph_ensemble=False)
        fer_demo.update(fer)
        while fer_demo.is_running():
            fer_demo.show()
        fer_demo.quit()
    return fer
예제 #2
0
def image(input_image_path, display, gradcam, output_csv_file, screen_size,
          device, branch, face_detection):
    """
    Receives the full path to a image file and recognizes
    facial expressions of the closets face in a frame-based approach.
    """

    write_to_file = not (output_csv_file is None)
    img = uimage.read(input_image_path)

    # Call FER method
    fer = cvision.recognize_facial_expression(img, device, face_detection,
                                              gradcam)

    if write_to_file:
        ufile.create_file(output_csv_file, input_image_path)
        ufile.write_to_file(fer, 0.0)
        ufile.close_file()

    if display:
        fer_demo = FERDemo(screen_size=screen_size,
                           display_individual_classification=branch,
                           display_graph_ensemble=False)
        fer_demo.update(fer)
        while fer_demo.is_running():
            fer_demo.show()
        fer_demo.quit()
예제 #3
0
def webcam(camera_id, display, gradcam, output_csv_file, screen_size, device,
           frames, branch, no_plot, face_detection):
    """
    Receives images from a camera and recognizes
    facial expressions of the closets face in a frame-based approach.

    TODO: Write docstring
    :param no_plot:
    :param camera_id:
    :param display:
    :param gradcam:
    :param output_csv_file:
    :param screen_size:
    :param device:
    :param frames:
    :param branch:
    :return:
    """
    fer_demo = None

    if not uimage.initialize_video_capture(camera_id):
        raise RuntimeError("Error on initializing video capture." +
                           "\nCheck whether a webcam is working or not." +
                           "In linux, you can use Cheese for testing.")

    uimage.set_fps(frames)

    # Initialize screen
    if display:
        fer_demo = FERDemo(screen_size=screen_size,
                           display_individual_classification=branch,
                           display_graph_ensemble=(not no_plot))

    try:
        # Loop to process each frame from a VideoCapture object.
        while uimage.is_video_capture_open() and (
            (not display) or (display and fer_demo.is_running())):
            # Get a frame
            image = uimage.get_frame()

            fer = None if (
                image is None) else cvision.recognize_facial_expression(
                    image, device, face_detection, gradcam)

            # Display blank screen if no face is detected, otherwise,
            # display detected faces and perceived facial expression labels
            if display:
                fer_demo.update(fer)
                fer_demo.show()

            # TODO: Implement
            if output_csv_file:
                pass

    except Exception as e:
        print("Error raised during video mode.")
        raise e
    finally:
        uimage.release_video_capture()
        fer_demo.quit()
예제 #4
0
def video(input_video_path, display, gradcam, output_csv_file, screen_size,
          device, frames, branch, no_plot, face_detection):
    """
    Receives the full path to a video file and recognizes
    facial expressions of the closets face in a frame-based approach.

    TODO: Write docstring

    :param input_video_path:
    :param display:
    :param gradcam:
    :param output_csv_file:
    :param screen_size:
    :param device:
    :param frames:
    :param branch:
    :return:
    """
    fer_demo = None
    candidate = []
    if not uimage.initialize_video_capture(input_video_path):
        raise RuntimeError(
            "Error on initializing video capture." +
            "\nCheck whether working versions of ffmpeg or gstreamer is installed."
            + "\nSupported file format: MPEG-4 (*.mp4).")

    uimage.set_fps(frames)

    # Initialize screen
    if display:
        fer_demo = FERDemo(screen_size=screen_size,
                           display_individual_classification=branch,
                           display_graph_ensemble=(not no_plot))

    try:
        # Loop to process each frame from a VideoCapture object.
        while uimage.is_video_capture_open() and (
            (not display) or (display and fer_demo.is_running())):
            # Get a frame
            image = uimage.get_frame()

            fer = None if (
                image is None) else cvision.recognize_facial_expression(
                    image, device, face_detection, gradcam)
            candidate.append(fer)
            #print(candidate)
            # Display blank screen if no face is detected, otherwise,
            # display detected faces and perceived facial expression labels
            if display:
                fer_demo.update(fer)
                fer_demo.show()

            # TODO: Implement
            if output_csv_file:
                pass
        return candidate
    except Exception as e:
        #print("Error raised during video mode.")
        return candidate
예제 #5
0
def webcam(camera_id, display, gradcam, output_csv_file, screen_size, device,
           frames, branch, no_plot, face_detection):
    """
    Receives images from a camera and recognizes
    facial expressions of the closets face in a frame-based approach.
    """

    fer_demo = None
    write_to_file = not (output_csv_file is None)
    starting_time = time.time()

    if not uimage.initialize_video_capture(camera_id):
        raise RuntimeError("Error on initializing video capture." +
                           "\nCheck whether a webcam is working or not." +
                           "In linux, you can use Cheese for testing.")

    uimage.set_fps(frames)

    # Initialize screen
    if display:
        fer_demo = FERDemo(screen_size=screen_size,
                           display_individual_classification=branch,
                           display_graph_ensemble=(not no_plot))
    else:
        print("Press 'Ctrl + C' to quit.")

    try:
        if write_to_file:
            ufile.create_file(output_csv_file, str(time.time()))

        # Loop to process each frame from a VideoCapture object.
        while uimage.is_video_capture_open() and (
            (not display) or (display and fer_demo.is_running())):
            # Get a frame
            img, _ = uimage.get_frame()

            fer = None if (
                img is None) else cvision.recognize_facial_expression(
                    img, device, face_detection, gradcam)

            # Display blank screen if no face is detected, otherwise,
            # display detected faces and perceived facial expression labels
            if display:
                fer_demo.update(fer)
                fer_demo.show()

            if write_to_file:
                ufile.write_to_file(fer, time.time() - starting_time)

    except Exception as e:
        print("Error raised during video mode.")
        raise e
    except KeyboardInterrupt as qe:
        print("Keyboard interrupt event raised.")
    finally:
        uimage.release_video_capture()

        if display:
            fer_demo.quit()

        if write_to_file:
            ufile.close_file()
예제 #6
0
def video(input_video_path, display, gradcam, output_csv_file, screen_size,
          device, frames, branch, no_plot, face_detection):
    """
    Receives the full path to a video file and recognizes
    facial expressions of the closets face in a frame-based approach.
    """

    fer_demo = None
    write_to_file = not (output_csv_file is None)

    if not uimage.initialize_video_capture(input_video_path):
        raise RuntimeError(
            "Error on initializing video capture." +
            "\nCheck whether working versions of ffmpeg or gstreamer is installed."
            + "\nSupported file format: MPEG-4 (*.mp4).")

    uimage.set_fps(frames)

    # Initialize screen
    if display:
        fer_demo = FERDemo(screen_size=screen_size,
                           display_individual_classification=branch,
                           display_graph_ensemble=(not no_plot))

    try:
        if write_to_file:
            ufile.create_file(output_csv_file, input_video_path)

        # Loop to process each frame from a VideoCapture object.
        while uimage.is_video_capture_open() and (
            (not display) or (display and fer_demo.is_running())):
            # Get a frame
            img, timestamp = uimage.get_frame()

            # Video has been processed
            if img is None:
                break
            else:  # Process frame
                fer = None if (
                    img is None) else cvision.recognize_facial_expression(
                        img, device, face_detection, gradcam)

                # Display blank screen if no face is detected, otherwise,
                # display detected faces and perceived facial expression labels
                if display:
                    fer_demo.update(fer)
                    fer_demo.show()

                if write_to_file:
                    ufile.write_to_file(fer, timestamp)

    except Exception as e:
        print("Error raised during video mode.")
        raise e
    finally:
        uimage.release_video_capture()

        if display:
            fer_demo.quit()

        if write_to_file:
            ufile.close_file()