Beispiel #1
0
def test_facial_landmarks_detection():
    model = Facial_Landmarks_Detection("models/intel/landmarks-regression-retail-0009/FP16-INT8/landmarks-regression-retail-0009.xml")
    model.load_model()
    image = cv2.imread("media/face1.jpg")
    height, width, _ = image.shape
    eye_landmarks = model.predict(image)
    landmarks_pos = [(int(l[0] * width), int(l[1] * height)) for l in eye_landmarks]
    left_eye_coord = landmarks_pos[0]
    right_eye_coord = landmarks_pos[1]
    
    x_offset = 50
    y_offset = 25
    left_eye = image[left_eye_coord[1] - y_offset:left_eye_coord[1] + y_offset, left_eye_coord[0] - x_offset:left_eye_coord[0] + x_offset]
    right_eye = image[right_eye_coord[1] - y_offset:right_eye_coord[1] + y_offset, right_eye_coord[0] - x_offset:right_eye_coord[0] + x_offset]
    
    cv2.imwrite("media/left_eye.jpg", left_eye)
    cv2.imwrite("media/right_eye.jpg", right_eye)

    radius = 5
    color = (255, 0, 0)
    thickness = 5
    cv2.circle(image, left_eye_coord, radius, color, thickness)
    cv2.circle(image, right_eye_coord, radius, color, thickness)
    cv2.imshow("Result", image)
    cv2.waitKey()
def init_models(device="CPU"):
    # Using global variables, not defining new variables
    global face_detection
    global facial_landmarks_detection
    global head_pose_estimation
    global gaze_estimation

    log.info("Loading Face Detection model...")
    face_detection = Face_Detection(path_face_detection, device)
    face_detection.load_model()
    log.info("DONE\n")

    log.info("Loading Face Landmarks Detection model...")
    facial_landmarks_detection = Facial_Landmarks_Detection(
        path_facial_landmarks_detection, device)
    facial_landmarks_detection.load_model()
    log.info("DONE\n")

    log.info("Loading Head Pose Estimation model...")
    head_pose_estimation = Head_Pose_Estimation(path_head_pose_estimation,
                                                device)
    head_pose_estimation.load_model()
    log.info("DONE\n")

    log.info("Loading Gaze Estimation model...")
    gaze_estimation = Gaze_Estimation(path_gaze_estimation, device)
    gaze_estimation.load_model()
    log.info("DONE\n")
def init_models(device="CPU"):
    # Using global variables, not defining new variables
    global face_detection
    global facial_landmarks_detection
    global head_pose_estimation
    global gaze_estimation

    start = time.time()
    face_detection = Face_Detection(path_face_detection, device)
    face_detection.load_model()
    fd_load_time = (time.time() - start)

    start = time.time()
    facial_landmarks_detection = Facial_Landmarks_Detection(
        path_facial_landmarks_detection, device)
    facial_landmarks_detection.load_model()
    fld_load_time = (time.time() - start)

    start = time.time()
    head_pose_estimation = Head_Pose_Estimation(path_head_pose_estimation,
                                                device)
    head_pose_estimation.load_model()
    hpe_load_time = (time.time() - start)

    start = time.time()
    gaze_estimation = Gaze_Estimation(path_gaze_estimation, device)
    gaze_estimation.load_model()
    ge_load_time = (time.time() - start)

    return (fd_load_time, fld_load_time, hpe_load_time, ge_load_time)
Beispiel #4
0
    def __init__(self, args):

        # load the objects corresponding to the models
        self.face_detection = Face_Detection(args.face_detection_model,
                                             args.device, args.extensions,
                                             args.perf_counts)
        self.gaze_estimation = Gaze_Estimation(args.gaze_estimation_model,
                                               args.device, args.extensions,
                                               args.perf_counts)
        self.head_pose_estimation = Head_Pose_Estimation(
            args.head_pose_estimation_model, args.device, args.extensions,
            args.perf_counts)
        self.facial_landmarks_detection = Facial_Landmarks_Detection(
            args.facial_landmarks_detection_model, args.device,
            args.extensions, args.perf_counts)

        start_models_load_time = time.time()
        self.face_detection.load_model()
        self.gaze_estimation.load_model()
        self.head_pose_estimation.load_model()
        self.facial_landmarks_detection.load_model()

        logger = logging.getLogger()
        input_T = args.input_type
        input_F = args.input_file

        if input_T.lower() == 'cam':
            # open the video feed
            self.feed = InputFeeder(args.input_type, args.input_file)
            self.feed.load_data()
        else:
            if not os.path.isfile(input_F):
                logger.error('Unable to find specified video file')
                exit(1)
            file_extension = input_F.split(".")[-1]
            if (file_extension in ['jpg', 'jpeg', 'bmp']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            elif (file_extension in ['avi', 'mp4']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            else:
                logger.error(
                    "Unsupported file Extension. Allowed ['jpg', 'jpeg', 'bmp', 'avi', 'mp4']"
                )
                exit(1)

        print("Models total loading time :",
              time.time() - start_models_load_time)

        # init mouse controller
        self.mouse_controller = MouseController('low', 'fast')
Beispiel #5
0
def main():
    args = build_argparser().parse_args()

    frame_num = 0
    inference_time = 0
    counter = 0

    # Initialize the Inference Engine
    fd = FaceDetection()
    fld = Facial_Landmarks_Detection()
    ge = Gaze_Estimation()
    hp = Head_Pose_Estimation()

    # Load Models
    fd.load_model(args.face_detection_model, args.device, args.cpu_extension)
    fld.load_model(args.facial_landmark_model, args.device, args.cpu_extension)
    ge.load_model(args.gaze_estimation_model, args.device, args.cpu_extension)
    hp.load_model(args.head_pose_model, args.device, args.cpu_extension)

    # Mouse Controller precision and speed
    mc = MouseController('medium', 'fast')

    # feed input from an image, webcam, or video to model
    if args.input == "cam":
        feed = InputFeeder("cam")
    else:
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        feed = InputFeeder("video", args.input)
    feed.load_data()
    frame_count = 0
    for frame in feed.next_batch():
        frame_count += 1
        inf_start = time.time()
        if frame is not None:
            try:
                key = cv2.waitKey(60)

                det_time = time.time() - inf_start

                # make predictions
                detected_face, face_coords = fd.predict(
                    frame.copy(), args.prob_threshold)
                hp_output = hp.predict(detected_face.copy())
                left_eye, right_eye, eye_coords = fld.predict(
                    detected_face.copy())
                new_mouse_coord, gaze_vector = ge.predict(
                    left_eye, right_eye, hp_output)

                stop_inference = time.time()
                inference_time = inference_time + stop_inference - inf_start
                counter = counter + 1

                # Visualization
                preview = args.visualization
                if preview:
                    preview_frame = frame.copy()
                    face_frame = detected_face.copy()

                    draw_face_bbox(preview_frame, face_coords)
                    display_hp(preview_frame, hp_output, face_coords)
                    draw_landmarks(face_frame, eye_coords)
                    draw_gaze(face_frame, gaze_vector, left_eye.copy(),
                              right_eye.copy(), eye_coords)

                if preview:
                    img = np.hstack((cv2.resize(preview_frame, (500, 500)),
                                     cv2.resize(face_frame, (500, 500))))
                else:
                    img = cv2.resize(frame, (500, 500))

                cv2.imshow('Visualization', img)

                # set speed
                if frame_count % 5 == 0:
                    mc.move(new_mouse_coord[0], new_mouse_coord[1])

                # INFO
                log.info("NUMBER OF FRAMES: {} ".format(frame_num))
                log.info("INFERENCE TIME: {}ms".format(det_time * 1000))

                frame_num += 1

                if key == 27:
                    break
            except:
                print(
                    'Not supported image or video file format. Please send in a supported video format.'
                )
                exit()
    feed.close()
Beispiel #6
0
def infer_on_video(args):
    draw_flag = args.b
    device = args.device
    input_path = args.input_path
    input_type = args.input_type
    output_path = args.output_path
    precision = args.accuracy

    locations = {}

    locations[FACE_DETECTION_MODEL] = os.path.join(
        MODEL_PATH, FACE_DETECTION_MODEL, 'INT1',
        FACE_DETECTION_MODEL + ".xml")

    if precision is not None:
        log.info(
            "The face-detection-adas-binary-0001 always use INT1 precision")

    for model_name in [
            FACIAL_LANDMARKS_DETECTION_MODEL, HEAD_POSE_ESTIMATION_MODEL,
            GAZE_ESTIMATION_MODEL
    ]:
        locations[model_name] = find_exist_model_file(precision, model_name)

    # Initilize feeder
    feed = InputFeeder(input_type=input_type, input_file=input_path)
    feed.load_data()

    # Grab the shape of the input
    input_width = feed.getWidth()
    input_height = feed.getHeight()

    # Create a video writer for the output video
    # out = cv2.VideoWriter('../out.mp4', CODEC, 30, (input_width,input_height))

    mouse_controller = MouseController(MOUSE_PRECISION, MOUSE_SPEED)

    start_model_load_time = time.time()

    # model initialization
    face_detection = Face_Detection(locations[FACE_DETECTION_MODEL],
                                    device,
                                    extensions=CPU_EXTENSION)
    facial_landmarks_detection = Facial_Landmarks_Detection(
        locations[FACIAL_LANDMARKS_DETECTION_MODEL],
        device,
        extensions=CPU_EXTENSION)
    head_pose_estimation = Head_Pose_Estimation(
        locations[HEAD_POSE_ESTIMATION_MODEL],
        device,
        extensions=CPU_EXTENSION)
    gaze_estimation = Gaze_Estimation(locations[GAZE_ESTIMATION_MODEL],
                                      device,
                                      extensions=CPU_EXTENSION)

    total_model_load_time = time.time() - start_model_load_time

    counter = 0
    start_inference_time = time.time()

    # Process frames until the video ends, or process is exited
    for ret, batch in feed.next_batch(BATCH_SIZE):
        if not ret:
            break
        counter += 1
        gaze_lines = []
        out_frame = batch.copy()

        key = cv2.waitKey(60)

        # Face detection
        face_detection_output = face_detection.predict(batch)

        # face_detection_output = [ image_id, label, conf, xmin, ymin, xmax, ymax ]
        face_xmin = abs(int(face_detection_output[3] * input_width))
        face_ymin = abs(int(face_detection_output[4] * input_height))
        face_xmax = abs(int(face_detection_output[5] * input_width))
        face_ymax = abs(int(face_detection_output[6] * input_height))

        if (face_ymax - face_ymin) <= 0 or (face_xmax - face_xmin) <= 0:
            continue

        # Crop the face image
        face = batch[face_ymin:face_ymax, face_xmin:face_xmax]

        if draw_flag == True:
            cv2.rectangle(out_frame, (face_xmin, face_ymin),
                          (face_xmax, face_ymax), (255, 255, 0), 2)

        # Find facial landmarks (to find eyes)
        eyes = facial_landmarks_detection.predict(face)

        # Estimate head orientation (yaw=Y, pitch=X, role=Z)
        yaw, pitch, roll = head_pose_estimation.predict(face)

        eye_images = []
        for eye in eyes:
            face_height, face_width, _ = face.shape
            eye_xmin = int(eye[_X] * face_width - EYE_RADIUS)
            eye_ymin = int(eye[_Y] * face_height - EYE_RADIUS)
            eye_xmax = int(eye[_X] * face_width + EYE_RADIUS)
            eye_ymax = int(eye[_Y] * face_height + EYE_RADIUS)

            if (eye_ymax - eye_ymin) <= 0 or (eye_xmax - eye_xmin) <= 0:
                continue

            # crop and resize
            eye_images.append(face[eye_ymin:eye_ymax,
                                   eye_xmin:eye_xmax].copy())

            # Draw eye boundary boxes
            if draw_flag == True:
                cv2.rectangle(out_frame,
                              (eye_xmin + face_xmin, eye_ymin + face_ymin),
                              (eye_xmax + face_xmin, eye_ymax + face_ymin),
                              (0, 255, 0), 2)

        # gaze estimation
        gaze_vec_norm = gaze_estimation.predict(eye_images, [yaw, pitch, 0])

        cos = math.cos(math.radians(roll))
        sin = math.sin(math.radians(roll))
        tmpx = gaze_vec_norm[0] * cos + gaze_vec_norm[1] * sin
        tmpy = -gaze_vec_norm[0] * sin + gaze_vec_norm[1] * cos
        gaze_vec_norm = [tmpx, tmpy]

        # Store gaze line coordinations
        for eye in eyes:
            eye[_X] = int(eye[_X] * face_width)
            eye[_Y] = int(eye[_Y] * face_height)
            gaze_lines.append(
                get_gaze_line(eye, face_xmin, face_ymin, gaze_vec_norm))

        if draw_flag:
            # Drawing gaze lines
            for gaze_line in gaze_lines:
                start_point = (gaze_line[0][_X], gaze_line[0][_Y])
                end_point = (gaze_line[1][_X], gaze_line[1][_Y])

                draw_gaze_line(out_frame, start_point, end_point)

        # start point of middle gaze line
        start_point = ((gaze_lines[0][0][_X] + gaze_lines[1][0][_X]) / 2,
                       (gaze_lines[0][0][_Y] + gaze_lines[1][0][_Y]) / 2)

        # end point of middle gaze line
        end_point = ((gaze_lines[0][1][_X] + gaze_lines[1][1][_X]) / 2,
                     (gaze_lines[0][1][_Y] + gaze_lines[1][1][_Y]) / 2)

        gaze_mid_line = [start_point, end_point]

        mouse_point = get_mouse_point(gaze_mid_line, input_width, input_height)

        log.debug("mouse_point[_X], mouse_point[_Y]: %s, %s", mouse_point[_X],
                  mouse_point[_Y])

        # cv2.circle(out_frame, mouse_point, 10, (255, 255, 255), -1)
        mouse_controller.move(mouse_point[_X], mouse_point[_Y])

        # write out_frames with batch size
        for _ in range(BATCH_SIZE):
            cv2.imshow("video", out_frame)
            # out.write(out_frame)

        if key == 27:
            break

    total_inference_time = time.time() - start_inference_time
    total_inference_time = round(total_inference_time, 1)
    fps = counter / total_inference_time

    with open(os.path.join(output_path, 'stats.txt'), 'w') as f:
        f.write(str(total_inference_time) + '\n')
        f.write(str(fps) + '\n')
        f.write(str(total_model_load_time) + '\n')

    # Release the out writer, capture, and destroy any OpenCV windows
    log.info("Input stream ended...")
    cv2.destroyAllWindows()
    # out.release()
    feed.close()
Beispiel #7
0
class Computer_Pointer_Controller:
    def __init__(self, args):

        # load the objects corresponding to the models
        self.face_detection = Face_Detection(args.face_detection_model,
                                             args.device, args.extensions,
                                             args.perf_counts)
        self.gaze_estimation = Gaze_Estimation(args.gaze_estimation_model,
                                               args.device, args.extensions,
                                               args.perf_counts)
        self.head_pose_estimation = Head_Pose_Estimation(
            args.head_pose_estimation_model, args.device, args.extensions,
            args.perf_counts)
        self.facial_landmarks_detection = Facial_Landmarks_Detection(
            args.facial_landmarks_detection_model, args.device,
            args.extensions, args.perf_counts)

        start_models_load_time = time.time()
        self.face_detection.load_model()
        self.gaze_estimation.load_model()
        self.head_pose_estimation.load_model()
        self.facial_landmarks_detection.load_model()

        logger = logging.getLogger()
        input_T = args.input_type
        input_F = args.input_file

        if input_T.lower() == 'cam':
            # open the video feed
            self.feed = InputFeeder(args.input_type, args.input_file)
            self.feed.load_data()
        else:
            if not os.path.isfile(input_F):
                logger.error('Unable to find specified video file')
                exit(1)
            file_extension = input_F.split(".")[-1]
            if (file_extension in ['jpg', 'jpeg', 'bmp']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            elif (file_extension in ['avi', 'mp4']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            else:
                logger.error(
                    "Unsupported file Extension. Allowed ['jpg', 'jpeg', 'bmp', 'avi', 'mp4']"
                )
                exit(1)

        print("Models total loading time :",
              time.time() - start_models_load_time)

        # init mouse controller
        self.mouse_controller = MouseController('low', 'fast')

    def run(self):
        inferences_times = []
        face_detections_times = []
        for batch in self.feed.next_batch():
            if batch is None:
                break

            # as we want the webcam to act as a mirror, flip the frame
            batch = cv2.flip(batch, 1)

            inference_time = time.time()
            face = self.face_detection.predict(batch)
            if face is None:
                logger.error('Unable to detect the face.')
                continue
            else:
                face_detections_times.append(time.time() - inference_time)

                left_eye_image, right_eye_image = self.facial_landmarks_detection.predict(
                    face)
                if left_eye_image is None or right_eye_image is None:
                    continue
                head_pose_angles = self.head_pose_estimation.predict(face)
                if head_pose_angles is None:
                    continue
                vector = self.gaze_estimation.predict(left_eye_image,
                                                      right_eye_image,
                                                      head_pose_angles)
                inferences_times.append(time.time() - inference_time)
                if args.show_face == "True":
                    cv2.imshow("Detected face", face)
                    cv2.waitKey(1)
                self.mouse_controller.move(vector[0], vector[1])

        self.feed.close()
        cv2.destroyAllWindows()
        print("Average face detection inference time:",
              sum(face_detections_times) / len(face_detections_times))
        print("Average total inferences time:",
              sum(inferences_times) / len(inferences_times))
def infer_on_stream(args):

    network_fd = Face_Detection(args.face_detection_model, args.device)
    network_hp = Head_Pose_Estimation(args.head_pose_model, args.device)
    network_fl = Facial_Landmarks_Detection(args.facial_landmarks_model,
                                            args.device)
    network_ge = Gaze_Estimation(args.gaze_estimation_model, args.device)

    mouse_cont = MouseController(args.mouse_precision, args.mouse_speed)

    starting_loading = time.time()

    network_fd.load_model()
    network_hp.load_model()
    network_fl.load_model()
    network_ge.load_model()

    duration_loading = time.time() - starting_loading

    input_type = handle_input(args.input)

    feed = InputFeeder(input_type=input_type, input_file=args.input)

    feed.load_data()

    starting_inference = time.time()

    for flag, frame in feed.next_batch():
        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        out_frame, face, face_coords = network_fd.predict(
            frame, args.prob_threshold, args.display)

        if len(face_coords) == 0:
            log.error("There is no face in the stream!")
            continue

        out_frame, head_angle = network_hp.predict(out_frame, face,
                                                   face_coords, args.display)
        out_frame, eye_left, eye_right, eye_center = network_fl.predict(
            out_frame, face, face_coords, args.display)
        out_frame, gaze = network_ge.predict(out_frame, eye_left, eye_right,
                                             eye_center, head_angle,
                                             args.display)

        mouse_cont.move(gaze[0], gaze[1])

        if key_pressed == 27:
            break

        cv2.imshow('Visualization', cv2.resize(out_frame, (600, 400)))

    duration_inference = time.time() - starting_inference

    print("Total loading time is: {}\nTotal inference time is: {} ".format(
        duration_loading, duration_inference))

    feed.close()
    cv2.destroyAllWindows
Beispiel #9
0
def main():
    args = build_argparser().parse_args()

    frameNum = 0
    inferenceTime = 0
    counter = 0

    # Initialize the Inference Engine
    fd = FaceDetection()
    ld = Facial_Landmarks_Detection()
    ge = gazeEstimation()
    hp = headPose()
    modelStart = time.time()
    # Load Models
    fd.loadModel(args.faceDetectionModel, args.device)
    ld.loadModel(args.faceLandmarkModel, args.device)
    ge.loadModel(args.gazeEstimationModel, args.device)
    hp.loadModel(args.headPoseModel, args.device)
    print("Model Load timing:", (time.time() - modelStart) * 1000, "ms")

    # Get the input feeder
    if args.input == "cam":
        feed = InputFeeder("cam")
    else:
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        feed = InputFeeder("video", args.input)
    feed.load_data()
    frameCount = 0
    # Mouse Controller precision and speed
    mc = MouseController('medium', 'fast')

    for frame in feed.next_batch():
        frameCount += 1

        if frame is not None:
            key = cv2.waitKey(60)

            inferenceStart = time.time()

            # make predictions
            detected_face, faceCoords = fd.predict(frame.copy(),
                                                   args.prob_threshold)
            hpOutput = hp.predict(detected_face.copy())
            leftEye, rightEye, eyeCoords = ld.predict(detected_face.copy())
            new_mouse_coord, gazeVector = ge.predict(leftEye, rightEye,
                                                     hpOutput)

            inferenceTime = time.time() - inferenceStart
            counter = counter + 1

            # Visualization
            preview = args.visualization
            if preview:
                preview_frame = frame.copy()
                faceFrame = detected_face.copy()

                drawFaceBoundingBox(preview_frame, faceCoords)
                displayHp(preview_frame, hpOutput, faceCoords)
                draw_landmarks(faceFrame, eyeCoords)
                draw_gaze(faceFrame, gazeVector, leftEye.copy(),
                          rightEye.copy(), eyeCoords)
            if preview:
                img = np.hstack((cv2.resize(preview_frame, (500, 500)),
                                 cv2.resize(faceFrame, (500, 500))))
            else:
                img = cv2.resize(frame, (500, 500))
            cv2.imshow('Visualization', img)

            # set speed
            if frameCount % 5 == 0:
                mc.move(new_mouse_coord[0], new_mouse_coord[1])

            print("Frame Number:", frameNum)
            print("Inference Time:", inferenceTime * 1000)

            frameNum += 1

            if key == 27:
                break
    feed.close()
Beispiel #10
0
def model_pipelines(args):
    
    # Parameters which were parsed are assigned
    
    #device = args.dev
    #customLayers = args.lay
    inputFile = args.inp
    visual_flag = args.vf
    
    faceDetectionModel = args.mfd
    landmarksDetectionModel = args.mld
    headPoseEstimationModel = args.mhp
    gazeDetectionModel = args.mgd
    start_time = time.time()
    # Logging is enabled 
    log = logging.getLogger(__name__)
    
    log.info('----------THE BEGINNING----------')
    log.info('Start Time: {0}'. format(str(start_time))) 

    # The feed is initialised
    single_image = ['jpg','tif','png','jpeg', 'bmp']
    if inputFile.split(".")[-1].lower() in single_image:
        input_feed = InputFeeder('image', inputFile)
    elif args.inp == 'cam':
        input_feed = InputFeeder('cam')
    else:
        input_feed = InputFeeder('video', inputFile)

    # Feed data is loaded
    log.info('Loading data...')
    input_feed.load_data()
    log.info('Data Loaded. Beginning inference...')

    # The models are initialised and loaded here

    face_model_load_start_time = time.time()
    landmark_model_load_start_time = time.time()
    headpose_model_load_start_time = time.time()
    gaze_model_load_start_time = time.time()
    
    ppl_fd = Face_Detection(faceDetectionModel)
    ppl_fl = Facial_Landmarks_Detection(landmarksDetectionModel)
    ppl_hd = Head_Pose_Estimation(headPoseEstimationModel)
    ppl_ge = Gaze_Estimation(gazeDetectionModel)
    
    face_model_load_time = time.time() - face_model_load_start_time
    landmark_model_load_time = time.time() - landmark_model_load_start_time
    headpose_model_load_time = time.time() - headpose_model_load_start_time
    gaze_model_load_time = time.time() - gaze_model_load_start_time
    
    log.info('Face Detection object initialized')
    log.info('Facial Landmarks object initialized')
    log.info('Head Pose object initialized')
    log.info('Gaze object initialized')
    
    log.info('All models loaded and checked')
    
    load_time = [face_model_load_time, landmark_model_load_time, headpose_model_load_time, gaze_model_load_time]
      
    # count the number of frames
    frameCount = 0

    # collate frames from the feeder and feed into the detection pipelines
    for _, frame in input_feed.next_batch():

        if not _:
            break
        frameCount += 1
        
        if frameCount % 5 == 0:
            cv2.imshow('video', cv2.resize(frame, (500, 500)))

        key = cv2.waitKey(100)
        
        # Get the time for the model inference
        face_inference_start_time = time.time()
        face_crop = ppl_fd.predict(frame)
        face_inference_time = time.time() - face_inference_start_time
        
        if 'mfd' in visual_flag:
            cv2.imshow('The cropped face', face_crop)
            
        if type(face_crop) == int:
            log.info("No face can be detected")
            
            if key == 27:
                break
            
            continue
        
        # Get the time for the model inference
        landmark_inference_start_time = time.time()
        eye_image_left, eye_image_right, face_landmarked = ppl_fl.predict(face_crop.copy())
        landmark_inference_time = time.time() - landmark_inference_start_time
       
        # Get face landmark results
        if 'mld' in visual_flag:
            cv2.imshow('Face output', face_landmarked)
            
        if eye_image_left.any() == None or eye_image_right.any() == None:
            log.info("Landmarks could not be detected, check that the eyes are visible and the image is bright")
            continue
        
        # Get the time for the model inference
        headpose_inference_start_time = time.time()
        head_pose_angles, head_pose_image = ppl_hd.predict(face_crop.copy())   
        headpose_inference_time = time.time() - headpose_inference_start_time
        
        # Get head pose results
        if 'mhp' in visual_flag:
            cv2.imshow('Head Pose Angles', head_pose_image)
        
        # Get the time for the model inference
        gaze_inference_start_time = time.time()
        coord_x, coord_y = ppl_ge.predict(eye_image_left ,eye_image_right, head_pose_angles)
        gaze_inference_time = time.time() - gaze_inference_start_time

        # Get gaze detection results
        if 'mgd' in visual_flag:
            cv2.putText(face_landmarked, "Estimated x:{:.2f} | Estimated y:{:.2f}".format(coord_x, coord_y), (10,20), cv2.FONT_HERSHEY_COMPLEX, 0.25, (0,255,0),1)
            cv2.imshow('Gaze Estimation', face_landmarked)


        mCoord = MouseController('medium','fast')
        
        # Move the mouse based on the coordinates received
        if frameCount % 5 == 0:
            mCoord.move(coord_x, coord_y)

        if key == 27:
            break
        
        inference_time = [face_inference_time, landmark_inference_time, headpose_inference_time, gaze_inference_time]
        results(args, inference_time, load_time)
        
        if key == ord('x'):
            log.warning('KeyboardInterrupt: `X` was pressed')
            results(args, inference_time, load_time)
            sys.exit()
        
        
    log.info('End Time: {0}'. format(str(time.time() - start_time)))   
    log.info('----------THE END----------')
    cv2.destroyAllWindows()
    input_feed.close()
def main():

    try:
        logging.basicConfig(
            level=logging.INFO,
            format="%(asctime)s [%(levelname)s] %(message)s",
            handlers=[
                logging.FileHandler("Computer_Pointer_Controller.log"),
                logging.StreamHandler()
            ])
    except:
        print("File cannot be created")

    args = build_argparser()
    video_path = args.i
    visualize = args.flags
    count = 0
    fd_inference_time = 0
    fld_inference_time = 0
    hp_inference_time = 0
    ge_inference_time = 0

    MC = MouseController('medium', 'fast')

    logging.info("############## Model Load Time #############")

    start_time = time.time()
    first_model_time = start_time
    FD = Face_Detection(device=args.d, threshold=args.prob, extensions=args.l)
    FD.load_model(model_path=args.f)
    logging.info("Face Detection Model: {:.3f}ms".format(
        1000 * (time.time() - first_model_time)))

    second_model_time = time.time()
    FLD = Facial_Landmarks_Detection(device=args.d, extensions=args.l)
    FLD.load_model(model_path=args.fl)
    logging.info("Facial Landmarks Detection Model: {:.3f}ms".format(
        1000 * (time.time() - second_model_time)))

    third_model_time = time.time()
    HPE = Head_Pose_Estimation(device=args.d, extensions=args.l)
    HPE.load_model(model_path=args.hp)
    logging.info("Head Pose Estimation Model: {:.3f}ms".format(
        1000 * (time.time() - third_model_time)))

    fourth_model_time = time.time()
    GE = Gaze_Estimation(device=args.d, extensions=args.l)
    GE.load_model(model_path=args.g)
    logging.info("Gaze Estimation Model: {:.3f}ms".format(
        1000 * (time.time() - fourth_model_time)))
    logging.info("############## End ######################### ")

    Total_Model_Load_Time = 1000 * (time.time() - start_time)

    ##### LOADING VIDEO FILE #####

    if (video_path == "cam"):
        IF = InputFeeder("cam")
    else:
        IF = InputFeeder("video", video_path)
    IF.load_data()

    ##### MODEL INFERENCE #####

    start_inf_time = time.time()
    for flag, frame in IF.next_batch():

        if not flag:
            break

        if (count % 5 == 0):
            cv2.imshow('frame', cv2.resize(frame, (500, 500)))

        key = cv2.waitKey(60)

        count = count + 1

        start_time_1 = time.time()
        face, face_coordinates = FD.predict(frame, args.it)
        fd_inference_time += (time.time() - start_time_1)

        start_time_2 = time.time()
        left_eye_image, right_eye_image, eye_coordinates = FLD.predict(
            face, args.it)
        fld_inference_time += (time.time() - start_time_2)

        start_time_3 = time.time()
        head_pose_angles = HPE.predict(face, args.it)
        hp_inference_time += (time.time() - start_time_3)

        start_time_4 = time.time()
        mouse_coordinates, gaze_vector = GE.predict(left_eye_image,
                                                    right_eye_image,
                                                    head_pose_angles, args.it)
        ge_inference_time += (time.time() - start_time_4)

        if (len(visualize) != 0):
            frame_visualize = frame.copy()

            if ("fd" in visualize):
                if (len(visualize) == 1):
                    cv2.rectangle(frame_visualize,
                                  (face_coordinates[0], face_coordinates[1]),
                                  (face_coordinates[2], face_coordinates[3]),
                                  (255, 0, 255), 2)
                else:
                    frame_visualize = face.copy()

            if ("fld" in visualize):
                if not "fd" in visualize:
                    frame_visualize = face.copy()

                cv2.circle(frame_visualize, (eye_coordinates['left_eye'][0],
                                             eye_coordinates['left_eye'][1]),
                           25, (0, 0, 255), 2)
                cv2.circle(frame_visualize, (eye_coordinates['right_eye'][0],
                                             eye_coordinates['right_eye'][1]),
                           25, (0, 0, 255), 2)

            if ("hp" in visualize):
                cv2.putText(
                    frame_visualize,
                    "Pose Angles: yaw:{:.2f} | pitch:{:.2f} | roll:{:.2f}".
                    format(head_pose_angles[0], head_pose_angles[1],
                           head_pose_angles[2]), (10, 20),
                    cv2.FONT_HERSHEY_COMPLEX, 0.255, (0, 255, 0), 1)

            if ("ge" in visualize):
                h = face.shape[0]
                arrow = h * 0.7
                arrow_X = gaze_vector[0] * arrow
                arrow_Y = -gaze_vector[1] * arrow
                cv2.arrowedLine(
                    frame_visualize, (eye_coordinates['left_eye'][0],
                                      eye_coordinates['left_eye'][1]),
                    (int(eye_coordinates['left_eye'][0] + arrow_X),
                     int(eye_coordinates['left_eye'][1] + arrow_Y)),
                    (255, 0, 0), 2)
                cv2.arrowedLine(
                    frame_visualize, (eye_coordinates['right_eye'][0],
                                      eye_coordinates['right_eye'][1]),
                    (int(eye_coordinates['right_eye'][0] + arrow_X),
                     int(eye_coordinates['right_eye'][1] + arrow_Y)),
                    (255, 0, 0), 2)
            if (count % 5 == 0):

                cv2.imshow('Visualization',
                           cv2.resize(frame_visualize, (500, 500)))

        if (count % 5 == 0):
            MC.move(mouse_coordinates[0], mouse_coordinates[1])

        if key == 27:
            break

    Total_Inference_Time = time.time() - start_inf_time
    if (count > 0):
        logging.info("############## Models Inference time #######")
        logging.info("Face Detection:{:.3f}ms".format(
            1000 * fd_inference_time / count))
        logging.info("Facial Landmarks Detection:{:.3f}ms".format(
            1000 * fld_inference_time / count))
        logging.info("Headpose Estimation:{:.3f}ms".format(
            1000 * hp_inference_time / count))
        logging.info("Gaze Estimation:{:.3f}ms".format(
            1000 * ge_inference_time / count))
        logging.info("############## End #########################")

    logging.info("############## Summarized Results ##########")
    logging.info(
        "Total Model Load Time: {:.3f}ms".format(Total_Model_Load_Time))
    logging.info("Total Inference Time: {:.3f}s".format(Total_Inference_Time))
    logging.info("FPS:{}".format(count / Total_Inference_Time))
    logging.info("############ End ###########################")
    cv2.destroyAllWindows()
    IF.close()