Esempio n. 1
0
def main():
    # Load parameters
    params = get_args()

    mouse_prec = params['mouse_prec']
    mouse_speed = params['mouse_speed']
    mouse = MouseController(mouse_prec, mouse_speed)
    models = load_models(params)

    # Load input feed
    input_type = params['input_type']
    if input_type=='cam':
        input_file = None
    else:
        input_file = params['input_file_path']

    feed=InputFeeder(input_type=input_type, input_file=input_file)
    feed.load_data()
    for batch in feed.next_batch():
        if batch is not None:
            image, pos = main_loop(batch, models)
            cv2.imshow('frame', image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            mouse.move(pos[0], pos[1])
            # break
        else:
            break
    feed.close()
def run_test(args):
    mc = MouseController('medium', 'fast')

    model_face = Face_Detector()
    model_face.load_model(args.model_fd, args.device, CPU_EXTENSION)

    model_pose = Pose_Estimator()
    model_pose.load_model(args.model_pe, args.device, CPU_EXTENSION)

    model_landmark = Facial_Landmarks()
    model_landmark.load_model(args.model_fl, args.device, CPU_EXTENSION)

    model_gaze = Gaze_Estimator()
    model_gaze.load_model(args.model_ge, args.device, CPU_EXTENSION)

    frame = cv2.imread(args.input)
    crop_face, face_count, points = model_face.predict(frame, args.thres_fd)

    if (face_count == 0):
        print('no face is detected')

    angles = model_pose.predict(frame, crop_face)
    left_eye, right_eye, eye_points = model_landmark.predict(
        frame, crop_face, points)

    mx, my = model_gaze.predict(frame, left_eye, right_eye, angles, eye_points)
    cv2.imwrite('images/ne.jpg', frame)

    mc.move(mx, my)
    def __init__(self,
                 device='CPU',
                 mouse_con=False,
                 face_dec=None,
                 fac_land=None,
                 head_pose=None,
                 gaze=None,
                 show_video=False,
                 save_video=False):
        '''
        all models should be put in here 
        '''
        if face_dec and fac_land and head_pose and gaze:
            self.face_dec, self.fac_land, self.head_pose, self.gaze = FaceDetectionModel(
                face_dec, device=device), FacialLandmarksDetection(
                    fac_land, device=device), Head_Pose_Estimation(
                        head_pose,
                        device=device), Gaze_Estimation(gaze, device=device)
            self.face_dec.load_model()
            self.fac_land.load_model()
            self.head_pose.load_model()
            self.gaze.load_model()
        else:
            raise ValueError('Missing Arguments')

        if mouse_con:
            self.mouse_con = MouseController("low", "fast")

        self.show_video, self.save_video = show_video, save_video
    def __init__(self, args):
        '''
        This method instances variables for the Facial Landmarks Detection Model.

        Args:
        args = All arguments parsed by the arguments parser function

        Return:
        None
        '''

        init_start_time = time.time()
        self.output_path = args.output_path
        self.show_output = args.show_output
        self.total_processing_time = 0
        self.count_batch = 0
        self.inference_speed = []
        self.avg_inference_speed = 0

        if args.all_devices != 'CPU':
            args.face_device = args.all_devices
            args.face_landmark_device = args.all_devices
            args.head_pose_device = args.all_devices
            args.gaze_device = args.all_devices

        model_init_start = time.time()
        self.face_model = FaceDetection(args.face_model, args.face_device,
                                        args.face_device_ext,
                                        args.face_prob_threshold)
        self.landmarks_model = FacialLandmarksDetection(
            args.face_landmark_model, args.face_landmark_device,
            args.face_landmark_device_ext, args.face_landmark_prob_threshold)
        self.head_pose_model = HeadPoseEstimation(
            args.head_pose_model, args.head_pose_device,
            args.head_pose_device_ext, args.head_pose_prob_threshold)
        self.gaze_model = GazeEstimation(args.gaze_model, args.gaze_device,
                                         args.gaze_device_ext,
                                         args.gaze_prob_threshold)
        self.model_init_time = time.time() - model_init_start
        log.info('[ Main ] All required models initiallized')

        self.mouse_control = MouseController(args.precision, args.speed)
        log.info('[ Main ] Mouse controller successfully initialized')

        self.input_feeder = InputFeeder(args.batch_size, args.input_type,
                                        args.input_file)
        log.info('[ Main ] Initialized input feeder')

        model_load_start = time.time()
        self.face_model.load_model()
        self.landmarks_model.load_model()
        self.head_pose_model.load_model()
        self.gaze_model.load_model()

        self.model_load_time = time.time() - model_load_start
        self.app_init_time = time.time() - init_start_time
        log.info('[ Main ] All moadels loaded to Inference Engine\n')

        return None
def main():
    """
    Load the network and parse the output.

    :return: None
    """
    # Grab command line args

    mc = MouseController("high", "fast")

    mc.move(randint(10, 109) / 100, -randint(10, 100) / 100)
Esempio n. 6
0
    def __init__(self, args):

        # load the objects corresponding to the models
        self.face_detection = Face_Detection(args.face_detection_model,
                                             args.device, args.extensions,
                                             args.perf_counts)
        self.gaze_estimation = Gaze_Estimation(args.gaze_estimation_model,
                                               args.device, args.extensions,
                                               args.perf_counts)
        self.head_pose_estimation = Head_Pose_Estimation(
            args.head_pose_estimation_model, args.device, args.extensions,
            args.perf_counts)
        self.facial_landmarks_detection = Facial_Landmarks_Detection(
            args.facial_landmarks_detection_model, args.device,
            args.extensions, args.perf_counts)

        start_models_load_time = time.time()
        self.face_detection.load_model()
        self.gaze_estimation.load_model()
        self.head_pose_estimation.load_model()
        self.facial_landmarks_detection.load_model()

        logger = logging.getLogger()
        input_T = args.input_type
        input_F = args.input_file

        if input_T.lower() == 'cam':
            # open the video feed
            self.feed = InputFeeder(args.input_type, args.input_file)
            self.feed.load_data()
        else:
            if not os.path.isfile(input_F):
                logger.error('Unable to find specified video file')
                exit(1)
            file_extension = input_F.split(".")[-1]
            if (file_extension in ['jpg', 'jpeg', 'bmp']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            elif (file_extension in ['avi', 'mp4']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            else:
                logger.error(
                    "Unsupported file Extension. Allowed ['jpg', 'jpeg', 'bmp', 'avi', 'mp4']"
                )
                exit(1)

        print("Models total loading time :",
              time.time() - start_models_load_time)

        # init mouse controller
        self.mouse_controller = MouseController('low', 'fast')
Esempio n. 7
0
def main(args):

    mouse_controller = MouseController('medium', 'fast')

    print("Model Loading..")

    face_detection = Model_FaceDetection(args.face_detection, args.device)
    face_landmark = Model_FacialLandmarksDetection(args.face_landmark, args.device)
    head_pose = Model_HeadPoseEstimation(args.head_pose, args.device)
    gaze_estimation = Model_GazeEstimation(args.gaze_estimation, args.device)
    
    print("Model loaded successfully")

    input_feeder  = InputFeeder(input_type='video', input_file=args.input)
    input_feeder.load_data()

    face_detection.load_model()
    head_pose.load_model()
    face_landmark.load_model()
    gaze_estimation.load_model()

    for frame in input_feeder.next_batch():
        try:
            frame.shape
        except Exception as err:
            break

        key = cv2.waitKey(60)

        face,face_coord = face_detection.predict(frame.copy(), args.prob_threshold)

        if type(face)==int:
            print("Unable to detect the face.")
            if key==27:
                break
            continue
        
        headPose = head_pose.predict(face.copy())
        
        left_eye, right_eye, eye_coord  = face_landmark.predict(face.copy())
        
        mouse_coord, gaze_vector = gaze_estimation.predict(left_eye, right_eye, headPose)
        
        cv2.imshow('video',frame)
        mouse_controller.move(mouse_coord[0], mouse_coord[1])


    input_feeder.close()
    cv2.destroyAllWindows()
Esempio n. 8
0
def model_instants(args):

    face_detection_instant = FaceDetectionModel(model_name=args.face_detection,
                                                device=args.device,
                                                threshold=args.prob_threshold,
                                                extensions=args.cpu_extension)

    head_pose_estimation_instant = HeadPoseEstimationModel(
        model_name=args.head_pose_estimation,
        device=args.device,
        extensions=args.cpu_extension)

    facial_landmarks_instant = FacialLandmarksDetectionModel(
        model_name=args.facial_landmarks_detection,
        device=args.device,
        extensions=args.cpu_extension)

    gaze_estimation_instant = GazeEstimationModel(
        model_name=args.gaze_estimation,
        device=args.device,
        extensions=args.cpu_extension)

    mouse_controller_instant = MouseController('medium', 'fast')

    return face_detection_instant, head_pose_estimation_instant, facial_landmarks_instant, gaze_estimation_instant, mouse_controller_instant
Esempio n. 9
0
def main(model_dir, device, precision, input_type, input_file, inspect):
    mouse_controller = MouseController("medium", "fast")
    input_feeder = InputFeeder(input_type=input_type, input_file=input_file)
    input_feeder.load_data()

    gaze_detect = GazeDetect(model_dir=model_dir, device=device, precision=precision)
    gaze_detect.load_model()

    for image in input_feeder.next_batch():
        with Timer() as t:
            outputs = gaze_detect.predict(image)
        if outputs is not None:
            angle_y_fc, angle_p_fc, angle_r_fc = outputs.reshape(3)
            mouse_controller.move(-angle_y_fc, angle_p_fc)
            print(
                f"Mouse move x: {-angle_y_fc}, y: {angle_p_fc}, execution time: {t.elapsed}"
            )
    def __init__(self, args):
        self.log_level = "INFO" if os.environ.get(
            "LOGLEVEL") == "INFO" or args.verbose_stage else "WARNING"
        log.basicConfig(level=self.log_level)

        input_type = 'cam' if args.cam else 'video'
        self.feed = InputFeeder(input_type, args.video)
        if not self.feed.load_data():
            raise Exception('Input valid image or video file')

        fps, w, h = self.feed.get_props()
        self.out_video = cv2.VideoWriter(args.out,
                                         cv2.VideoWriter_fourcc(*'MJPG'), fps,
                                         (w, h), True)

        args.head_pose_model = os.path.join(
            args.head_pose_model, args.precision,
            os.path.basename(args.head_pose_model))
        args.landmarks_model = os.path.join(
            args.landmarks_model, args.precision,
            os.path.basename(args.landmarks_model))
        args.gaze_model = os.path.join(args.gaze_model, args.precision,
                                       os.path.basename(args.gaze_model))

        self.fd = FaceDetect(args.face_model, args.device, args.extension,
                             args.threshold)
        self.fd.load_model()
        self.fd.set_out_size(w, h)

        self.hp = HeadPoseEstimate(args.head_pose_model, args.device,
                                   args.extension, args.threshold)
        self.hp.load_model()

        self.fl = FacialLandMarkDetect(args.landmarks_model, args.device,
                                       args.extension, args.threshold)
        self.fl.load_model()

        self.gz = GazeEstimate(args.gaze_model, args.device, args.extension,
                               args.threshold)
        self.gz.load_model()

        self.mc = MouseController()
        self.verbose_stage = args.verbose_stage
Esempio n. 11
0
def main():
    """
    Load the network and parse the output.
    :return: None
    """
    # Grab command line args
    args = build_argparser().parse_args()
        
    start_time = time.time()
    face_detector = FaceDetect(model_name=args.face, device=args.device, output=args.output)
    face_detector.load_model()
    print("Time taken to load face detection model (in seconds):", time.time()-start_time)

    start_time = time.time()
    eyes_detector = EyesDetect(model_name=args.eyes, device=args.device, output=args.output)
    eyes_detector.load_model()
    print("Time taken to load landmark detection model (in seconds):", time.time()-start_time)

    start_time = time.time()
    angle_detector = AngleDetect(model_name=args.angle, device=args.device)
    angle_detector.load_model()
    print("Time taken to load head pose estimation model (in seconds):", time.time()-start_time)

    start_time = time.time()
    gaze_detector = GazeDetect(model_name=args.gaze, device=args.device)
    gaze_detector.load_model()
    print("Time taken to load gaze estimation model (in seconds):", time.time()-start_time)

    mouse_controller = MouseController('medium','medium')
    
    feed=InputFeeder(input_type=args.video, input_file=args.input)
    feed.load_data()
    for batch in feed.next_batch():
        if batch is None: # catch last frame
            break
        face = face_detector.predict(batch)
        left_eye, right_eye = eyes_detector.predict(face)
        angles = angle_detector.predict(face)
        x, y = gaze_detector.predict(left_eye, right_eye, angles)
        mouse_controller.move(x, y)

    feed.close()
Esempio n. 12
0
 def __init__(self):
     self.args = None
     self.feed = None
     self.face_detection_model = None
     self.facial_landmark_detection_model = None
     self.gaze_estimation_model = None
     self.head_pose_estimation_model = None
     self.frame = None
     self.width = None
     self.Height = None
     self.mc = MouseController("high", "fast")
     self.face_detection_load_time = 0
     self.facial_landmark_detection_load_time = 0
     self.gaze_estimation_load_time = 0
     self.head_pose_estimation_load_time = 0
     self.face_detection_infer_time = 0
     self.facial_landmark_detection_infer_time = 0
     self.gaze_estimation_infer_time = 0
     self.head_pose_estimation_infer_time = 0
     self.frames = 0
def main(args):
    feed = InputFeeder(input_type=args.it, input_file=args.i)

    face_model = FaceDetectionModel(args.fm, args.d, args.c, float(args.p))
    face_model.load_model()

    landmarks_model = LandmarksDetectionModel(args.lm, args.d, args.c)
    landmarks_model.load_model()

    headpose_model = HeadPoseDetectionModel(args.hpm, args.d, args.c)
    headpose_model.load_model()

    gaze_model = GazeEstimationModel(args.gem, args.d, args.c)
    gaze_model.load_model()

    mouse = MouseController("medium", "fast")

    feed.load_data()
    for batch in feed.next_batch():
        # try:
        cropped_face, coords, _ = face_model.predict(batch)
        cv2.rectangle(batch, (coords[0], coords[1]), (coords[2], coords[3]),
                      (255, 0, 0), 2)

        left_eye, right_eye, eyes_coords, _ = landmarks_model.predict(
            cropped_face)

        head_pose_angles, _ = headpose_model.predict(cropped_face)
        x, y, z, _ = gaze_model.predict(left_eye, right_eye, head_pose_angles,
                                        cropped_face, eyes_coords)

        mouse.move(x, y)

        cv2.imshow("img", batch)
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break
    # except:
    #     print("Frame without prediction. Error: ", sys.exc_info()[0])
    #     log.error(sys.exc_info()[0])
    feed.close()
Esempio n. 14
0
def main():
    mouseController = MouseController()

    with camera() as cam:
        while True:
            ok, read_ = cam.read()
            if not ok:
                break

            frame = cv2.flip(read_, 1)
            roi = frame[ROI_SLICE].copy()

            key = cv2.waitKey(2)
            if key == ord('r'):
                print('Background reset')
                setBackground(roi)
                continue

            elif key in (27, ord('q')):
                print('Exit')
                break

            mask = getMask(roi)
            contour = getHandContour(roi, mask)
            if contour is None or cv2.contourArea(contour) < MIN_HAND_SIZE:
                continue

            bottommost, leftmost, topmost, _rightmost = getHandPosition(
                contour)
            drawHandPositions(roi, bottommost, leftmost, topmost)
            angle = getAngle(bottommost, leftmost, topmost)
            if angle is None:
                continue

            mouseController.performActions(roi, topmost, angle)
            showResult(frame, roi)
Esempio n. 15
0
def inference_frame(m1,m2,m3,m4,inF,args):
    """
    Funtion to calculate frame count and infernece time.
    """
    visualize = args.visualize
    mc = MouseController('high','fast')
    total = 0
    fc = 0
    inf_time = 0
    for ret, frame in inF.next_batch():
        if not ret:
            break;
        if frame is not None:
            fc += 1
            if fc%5 == 0:
                cv2.imshow('video', cv2.resize(frame, (500, 500)))        
            key = cv2.waitKey(60)
            start_inf = time.time()
            face_crop, face_dim = m1.predict(frame.copy(), args.prob_threshold)
            if type(face_crop) == int:
                print("No face detected.")
                if key == 27:
                    break
                continue            
            hp_out = m2.predict(face_crop.copy())            
            le, re, eye_dim = m3.predict(face_crop.copy())            
            new_dim, gv = m4.predict(le, re, hp_out)            
            end_inf = time.time()
            inf_time = inf_time + end_inf - start_inf
            total = total + 1            
            visualization(visualize, frame, face_crop, face_dim, eye_dim, hp_out, gv, le, re)            
            if fc%5 == 0:
                mc.move(new_dim[0], new_dim[1])             
            if key == 27:
                break
    return fc,inf_time
def start_pipeline(cla, codec):
    """
    Initializes feeds inputs to models, moving the mouse cursor based on the final gaze estimation.
    :param cla: Command line arguments for configuring the pipeline.
    :param codec: Depending on the platform this is run on, OpenCV requires a codec to be specified. Supply it here.
    :return: None
    """
    preview_flags = cla.preview_flags

    logger = logging.getLogger()
    input_file_path = cla.input

    if input_file_path.lower() == "cam":
        in_feeder = InputFeeder("cam")
    elif not os.path.isfile(input_file_path):
        # top = os.path.dirname(os.path.realpath(__file__))
        # walktree(top, visit_file)
        logger.error("Cannot locate video file provided. Exiting..")
        sys.exit(1)
    else:
        in_feeder = InputFeeder("video", input_file_path)

    start_model_load_time = time.time()
    fdm, fldm, hpem, gem = prep_models(cla)
    total_model_load_time = time.time() - start_model_load_time

    mc = None
    if not cla.is_benchmark:
        mc = MouseController('medium', 'fast')

    in_feeder.load_data()

    fps, total_inference_time, total_time = handle_input_feed(
        logger, preview_flags, fdm, fldm, hpem, gem, mc, in_feeder,
        cla.frame_out_rate, codec, cla.output_path)

    with open(os.path.join(cla.output_path, 'stats.txt'), 'w') as f:
        f.write("Total inference time, " + str(total_inference_time) + '\n')
        f.write("FPS, " + str(fps) + '\n')
        f.write("Total model load time, " + str(total_model_load_time) + '\n')
        f.write("Total time, " + str(total_time) + '\n')

    logger.error("Video stream ended...")
    cv2.destroyAllWindows()
    in_feeder.close()
Esempio n. 17
0
def init_models(args):
    global model_face, model_landmarks, model_hpose, model_gaze_estimation, mouse_controller
    precision = args.precision
    device = args.device
    threshold = args.threshold

    model_face = ModelFaceDetection(model_dir_face[precision], device, threshold)
    model_landmarks = ModelFacialLandmarksDetection(model_dir_landmarks[precision], device, threshold)
    model_hpose = ModelHeadPoseEstimation(model_dir_hpose[precision], device, threshold)
    model_gaze_estimation = ModelGazeEstimation(model_dir_gaze[precision], device, threshold)

    model_face.load_model()
    model_landmarks.load_model()
    model_hpose.load_model()
    model_gaze_estimation.load_model()

    mouse_controller = None
    if args.mouse_precision in ['high', 'low', 'medium'] and args.mouse_speed in ['fast', 'slow', 'medium']:
        mouse_controller = MouseController(args.mouse_precision, args.mouse_speed)
def setup(args):
    global input_path, output_path, device, cpu_extension, prob_threshold, flags, mouse_controller, feeder, video_writer, model_dict, model_loading_total_time
    model_args = [
        args.face_detection_model,
        args.facial_landmarks_detection_model,
        args.head_pose_estimation_model,
        args.gaze_estimation_model,
    ]
    model_class = [
        Model_FaceDetection,
        Model_FacialLandMarkDetection,
        Model_HeadPoseEstimation,
        Model_GazeEstimation,
    ]
    input_path = input_path_generator(args.input) if args.input != "CAM" else None
    output_path = output_path_generator(args.output)
    device = args.device
    cpu_extension = args.cpu_extension
    prob_threshold = args.prob_threshold
    flags = args.flags
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    mouse_controller = MouseController("low", "fast")
    if input_path:
        if input_path.endswith(".jpg"):
            feeder = InputFeeder("image", input_path)
        else:
            feeder = InputFeeder("video", input_path)
    else:
        feeder = InputFeeder("cam")
    feeder.load_data()
    fps = feeder.fps()
    initial_w, initial_h, video_len = feeder.frame_initials_and_length()
    video_writer = cv2.VideoWriter(
        os.path.join(output_path, "output_video.mp4"),
        cv2.VideoWriter_fourcc(*"avc1"),
        fps / 10,
        (initial_w, initial_h),
        True,
    )
    model_dict, model_loading_total_time = generate_model_dict(model_args, model_class)
    return
Esempio n. 19
0
def init_model(args):
    global face_model, landmark_model, head_pose_model, gaze_model, mouse_controller
    device_name = args.device
    prob_threshold = args.prob_threshold

    # Initialize variables with the input arguments for easy access
    model_path_dict = {
        'FaceDetectionModel': args.faceDetectionModel,
        'LandmarkRegressionModel': args.landmarkRegressionModel,
        'HeadPoseEstimationModel': args.headPoseEstimationModel,
        'GazeEstimationModel': args.gazeEstimationModel
    }

    # Instantiate model
    face_model = Model_Face(model_path_dict['FaceDetectionModel'],
                            device_name,
                            threshold=prob_threshold)
    landmark_model = Model_Landmark(model_path_dict['LandmarkRegressionModel'],
                                    device_name,
                                    threshold=prob_threshold)
    head_pose_model = Model_Pose(model_path_dict['HeadPoseEstimationModel'],
                                 device_name,
                                 threshold=prob_threshold)
    gaze_model = Model_Gaze(model_path_dict['GazeEstimationModel'],
                            device_name,
                            threshold=prob_threshold)
    mouse_controller = MouseController('medium', 'fast')

    # Load Models
    face_model.load_model()
    landmark_model.load_model()
    head_pose_model.load_model()
    gaze_model.load_model()

    # Check extention of these unsupported layers
    face_model.check_model()
    landmark_model.check_model()
    head_pose_model.check_model()
    gaze_model.check_model()
def main():
    logging.info("Parsing the arguments.")
    args = get_parser().parse_args()

    logging.info("Arguments parsed successfully. Now initialing feedreader.")
    input_feeder = init_feeder(args)

    logging.info("FeedReader initialized, loading the models.")
    fd_model, fld_model, ge_model, hpe_model, total_model_load_time = load_all_models(
        args)
    mc = MouseController('medium', 'fast')

    logging.info("Starting the workflow")
    fps, total_inference_time, effective_fps = run_workflow(
        fd_model, fld_model, ge_model, hpe_model, input_feeder, mc,
        args.show_intermediate_visualization)

    logging.debug("Writing the stats.")
    with open(os.path.join(args.output_dir, 'stats.txt'), 'w') as f:
        f.write(str(total_inference_time) + '\n')
        f.write(str(fps) + '\n')
        f.write(str(effective_fps) + '\n')
        f.write(str(total_model_load_time) + '\n')
Esempio n. 21
0
def gaze_pointer_controller(args, facedetector, facelm, headpose, gaze):

    mouse_controller = MouseController(precision='high', speed='fast')

    # Handle input type:
    inference_time_face = []
    inference_time_landmarks = []
    inference_time_headpose = []
    inference_time_gaze = []
    if args.input != 'CAM':
        try:
            # It seems that OpenCV can use VideoCapture to treat videos and images:
            input_stream = cv2.VideoCapture(args.input)
            length = int(input_stream.get(cv2.CAP_PROP_FRAME_COUNT))
            webcamera = False

            # Check if input is an image or video file:
            if length > 1:
                single_image_mode = False
            else:
                single_image_mode = True

        except:
            print(
                'Not supported image or video file format. Please pass a supported one.'
            )
            exit()

    else:
        input_stream = cv2.VideoCapture(0)
        single_image_mode = False
        webcamera = True

    if not single_image_mode:
        count = 0
        while (input_stream.isOpened()):

            # Read the next frame:
            flag, frame = input_stream.read()

            if not flag:
                break

            if count % args.frame_count == 0:
                start = time.time()
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                # We get a detected face crop and its coordinates:
                face_crop, detection = facedetector.get_face_crop(frame, args)
                finish_face_detector_time = time.time()
                face_detector_time = round(finish_face_detector_time - start,
                                           5)
                log.info("Face detection took {} seconds.".format(
                    face_detector_time))
                inference_time_face.append(face_detector_time)

                # Obtain eyes coordinates:
                right_eye, left_eye = facelm.get_eyes_coordinates(face_crop)

                # Obtain eyes crops:
                right_eye_crop, left_eye_crop, right_eye_coords, left_eye_coords = utils.get_eyes_crops(
                    face_crop, right_eye, left_eye)
                finish_eyes_coordinates = time.time()
                eyes_detector_time = round(
                    finish_eyes_coordinates - finish_face_detector_time, 5)
                log.info("Eyes detection took {} seconds.".format(
                    eyes_detector_time))
                inference_time_landmarks.append(eyes_detector_time)

                # Obtain headpose angles:
                headpose_angles = headpose.get_headpose_angles(face_crop)
                finish_headpose_angles = time.time()
                headpose_detector_time = round(
                    finish_headpose_angles - finish_eyes_coordinates, 5)
                log.info("Headpose angles detection took {} seconds.".format(
                    headpose_detector_time))
                inference_time_headpose.append(headpose_detector_time)

                # Obtain gaze vector and mouse movement values:
                (x_movement, y_movement), gaze_vector = gaze.get_gaze(
                    right_eye_crop, left_eye_crop, headpose_angles)
                finish_gaze_detection_time = time.time()
                gaze_detector_time = round(
                    finish_gaze_detection_time - finish_headpose_angles, 5)
                log.info("Gaze detection took {} seconds.".format(
                    gaze_detector_time))
                inference_time_gaze.append(gaze_detector_time)

                # Optional visualization configuration:
                if args.view_face:
                    frame = cv2.rectangle(frame, (detection[0], detection[1]),
                                          (detection[2], detection[3]),
                                          color=(0, 255, 0),
                                          thickness=5)

                if args.view_eyes:

                    right_eye_coords = [
                        right_eye_coords[0] + detection[1],
                        right_eye_coords[1] + detection[1],
                        right_eye_coords[2] + detection[0],
                        right_eye_coords[3] + detection[0]
                    ]

                    left_eye_coords = [
                        left_eye_coords[0] + detection[1],
                        left_eye_coords[1] + detection[1],
                        left_eye_coords[2] + detection[0],
                        left_eye_coords[3] + detection[0]
                    ]
                    frame = cv2.rectangle(
                        frame, (right_eye_coords[2], right_eye_coords[1]),
                        (right_eye_coords[3], right_eye_coords[0]),
                        color=(255, 0, 0),
                        thickness=5)

                    frame = cv2.rectangle(
                        frame, (left_eye_coords[2], left_eye_coords[1]),
                        (left_eye_coords[3], left_eye_coords[0]),
                        color=(255, 0, 0),
                        thickness=5)

                if args.view_gaze:

                    # Right eye:
                    x_r_eye = int(right_eye[0] * face_crop.shape[1] +
                                  detection[0])
                    y_r_eye = int(right_eye[1] * face_crop.shape[0] +
                                  detection[1])
                    x_r_shift, y_r_shift = int(x_r_eye + gaze_vector[0] *
                                               100), int(y_r_eye -
                                                         gaze_vector[1] * 100)

                    # Left eye:
                    x_l_eye = int(left_eye[0] * face_crop.shape[1] +
                                  detection[0])
                    y_l_eye = int(left_eye[1] * face_crop.shape[0] +
                                  detection[1])
                    x_l_shift, y_l_shift = int(x_l_eye + gaze_vector[0] *
                                               100), int(y_l_eye -
                                                         gaze_vector[1] * 100)

                    frame = cv2.arrowedLine(frame, (x_r_eye, y_r_eye),
                                            (x_r_shift, y_r_shift),
                                            (0, 0, 255), 2)
                    frame = cv2.arrowedLine(frame, (x_l_eye, y_l_eye),
                                            (x_l_shift, y_l_shift),
                                            (0, 0, 255), 2)

                if args.view_headpose:

                    frame = cv2.putText(
                        frame, 'Yaw: ' + str(headpose_angles[0]) + ' ' +
                        'Pitch: ' + str(headpose_angles[1]) + ' ' + 'Roll: ' +
                        str(headpose_angles[2]), (15, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 0), 2)

                # Resizing window for visualization convenience:
                cv2.namedWindow('Prueba', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('Prueba', 600, 400)
                cv2.imshow('Prueba', frame)

                mouse_controller.move(x_movement, y_movement)
            count = count + 1

        input_stream.release()
        with open('times.csv', 'w', newline='') as csvfile:
            writer = csv.writer(csvfile, delimiter=',')
            writer.writerow([
                'Face Detector', 'Eyes Detector', 'Headpose Detector',
                'Gaze Detector'
            ])
            for i in range(len(inference_time_face)):
                writer.writerow([
                    inference_time_face[i], inference_time_landmarks[i],
                    inference_time_headpose[i], inference_time_gaze[i]
                ])

    cv2.destroyAllWindows()
def main():

    args = build_argparser().parse_args()
    visual = args.visual_flag
    log = logging.getLogger()
    input_source = args.input_source

    try:
        video_path = args.input_path
    except Exception as e:
        video_path = None
    feed = None
    if input_source.lower() == 'cam':
        feed = InputFeeder('cam')
    elif input_source.lower() == 'video' and os.path.isfile(video_path):
        feed = InputFeeder('video', video_path)
    else:
        log.error('Wrong input feed. (check the video path).')
        exit(1)

    fd = Model_Face(args.face_detection_model, args.device, args.extension)
    hp = Model_HeadPose(args.head_pose_model, args.device, args.extension)
    fl = Model_Faciallandmark(args.facial_landmarks_model, args.device,
                              args.extension)
    ga = Model_Gaze(args.gaze_model, args.device, args.extension)
    ### You can specify the value of precision and speed directly.
    ##  OR
    ## 'high'(100),'low'(1000),'medium','low-med' - precision
    ## 'fast'(1), 'slow'(10), 'medium', 'slow-med' - speed
    #     mouse = MouseController('low-med', 'slow-med')
    mouse = MouseController(500, 4)

    feed.load_data()

    # load models
    fd.load_model()
    hp.load_model()
    fl.load_model()
    ga.load_model()
    count = 0
    for ret, frame in feed.next_batch():
        if not ret:
            break
        count += 1
        if count % 5 == 0:
            cv2.imshow('video', cv2.resize(frame, (500, 500)))
        key = cv2.waitKey(60)
        frame_cp = frame.copy()
        face, face_position = fd.predict(frame_cp, args.threshold)
        if type(face) == int:
            log.error('Prediction Error: Cant find face.')
            if key == 27:
                break
            continue
        face_cp = face.copy()
        hp_output = hp.predict(face_cp)
        left_eye, right_eye, facial = fl.predict(face_cp)
        #         print('left',left_eye,'\n','right',right_eye,'\n')
        mouse_coord, gaze_vector = ga.predict(left_eye, right_eye, hp_output)

        if (not len(visual) == 0):
            visual_frame = frame.copy()
            ### Visual FLAGS
            # face detection
            if 'fd' in visual:
                visual_frame = face
            # Head pose
            if 'hp' in visual:
                cv2.putText(
                    visual_frame,
                    "Yaw: {:.2f} Pitch: {:.2f} Roll: {:.2f}".format(
                        hp_output[0], hp_output[1], hp_output[2]), (10, 20),
                    cv2.FONT_HERSHEY_COMPLEX, 0.3, (0, 255, 50), 1)
            # Facial landmarks
            if 'fl' in visual:
                cv2.rectangle(face, (facial[0][0] - 10, facial[0][1] - 10),
                              (facial[0][2] + 10, facial[0][3] + 10),
                              (255, 0, 0), 3)
                cv2.rectangle(face, (facial[1][0] - 10, facial[1][1] - 10),
                              (facial[1][2] + 10, facial[1][3] + 10),
                              (255, 0, 0), 3)
            # Gaze estimation
            if 'ga' in visual:
                x, y, w = int(gaze_vector[0] * 12), int(gaze_vector[1] *
                                                        12), 160
                le = cv2.line(left_eye.copy(), (x - w, y - w), (x + w, y + w),
                              (255, 255, 0), 2)
                cv2.line(le, (x - w, y + w), (x + w, y - w), (255, 50, 150), 2)
                re = cv2.line(right_eye.copy(), (x - w, y - w), (x + w, y + w),
                              (255, 255, 0), 2)
                cv2.line(re, (x - w, y + w), (x + w, y - w), (255, 50, 150), 2)
                face[facial[0][1]:facial[0][3], facial[0][0]:facial[0][2]] = le
                face[facial[1][1]:facial[1][3], facial[1][0]:facial[1][2]] = re
            cv2.namedWindow('Visualization', cv2.WINDOW_AUTOSIZE)
            cv2.moveWindow('Visualization', 900, 900)
            cv2.imshow('Visualization', cv2.resize(visual_frame, (500, 500)))
            if args.visual_save.lower() == 'y':
                if count % 10 == 0:
                    cv2.imwrite(str(count) + '_visual.jpg', visual_frame)
        if count % 5 == 0:
            mouse.move(mouse_coord[0], mouse_coord[1])
        if key == 27:
            break

    log.error('INFO: Ended!')
    cv2.destroyAllWindows()
    feed.close()
def main():

    args = get_args().parse_args()
    path_filender = args.input
    four_flags = args.flags_checker
    loger = logging.getLogger()
    feeder_in = None
    out_path = args.out_path

    if path_filender.lower() == "cam":
        feeder_in = InputFeeder("cam")
    else:
        if not os.path.isfile(path_filender):
            loger.error("The video was not found")
            exit(1)
        feeder_in = InputFeeder("video", path_filender)

    model_locations = {
        'FaceDetection': args.face_detection_model,
        'HeadPoseEstimation': args.head_pose_estimation_model,
        'FacialLandmarksDetection': args.facial_landmarks_detection_model,
        'GazeEstimation': args.gaze_estimation_model
    }

    for key_name in model_locations.keys():
        if not os.path.isfile(model_locations[key_name]):
            loger.error("The system cannot find the " + key_name + " xml file")
            exit(1)

    dt = FaceDetection(model_locations['FaceDetection'], args.device,
                       args.cpu_extension)
    pe = HeadPoseEstimation(model_locations['HeadPoseEstimation'], args.device,
                            args.cpu_extension)
    ld = FacialLandmarksDetection(model_locations['FacialLandmarksDetection'],
                                  args.device, args.cpu_extension)
    ge = GazeEstimation(model_locations['GazeEstimation'], args.device,
                        args.cpu_extension)

    cursor = MouseController('medium', 'fast')

    feeder_in.load_data()
    model_load_time_start = time.time()
    dt.load_model()
    pe.load_model()
    ld.load_model()
    ge.load_model()
    total_load_time = time.time() - model_load_time_start

    frame_counter = 0
    inference_time_start = time.time()
    for ret, frame in feeder_in.next_batch():
        if not ret:
            break
        frame_counter = frame_counter + 1
        if frame_counter % 1 == 0:
            cv2.imshow('video', cv2.resize(frame, (600, 600)))

        key = cv2.waitKey(60)

        face_detected, coords_face = dt.predict(frame, args.p_th)
        if type(face_detected) == int:
            loger.error("The system cannot detect any face.")
            if key == 27:
                break
            continue

        head_pose_output = pe.predict(face_detected)
        eye_left_detect, eye_right_detect, eye_coordinates_detect = ld.predict(
            face_detected)
        coordi_update_pointer, coordi_gaze = ge.predict(
            eye_left_detect, eye_right_detect, head_pose_output)

        if (not len(four_flags) == 0):
            result_app = frame
            if 'fad' in four_flags:
                result_app = face_detected
            if 'hpe' in four_flags:
                cv2.putText(
                    result_app,
                    "HP Angles: YAW:{:.3f} * PITCH:{:.3f} * ROLL:{:.3f}".
                    format(head_pose_output[0], head_pose_output[1],
                           head_pose_output[2]), (5, 40),
                    cv2.FONT_HERSHEY_COMPLEX, 0.25, (153, 76, 0), 0)
            if 'fld' in four_flags:
                cv2.rectangle(face_detected,
                              (eye_coordinates_detect[0][0] - 4,
                               eye_coordinates_detect[0][1] - 4),
                              (eye_coordinates_detect[0][2] + 4,
                               eye_coordinates_detect[0][3] + 4),
                              (255, 255, 0), 4)
                cv2.rectangle(face_detected,
                              (eye_coordinates_detect[1][0] - 4,
                               eye_coordinates_detect[1][1] - 4),
                              (eye_coordinates_detect[1][2] + 4,
                               eye_coordinates_detect[1][3] + 4),
                              (255, 255, 0), 4)
            if 'gae' in four_flags:
                x = int(coordi_gaze[0] * 2)
                y = int(coordi_gaze[1] * 2)
                w = 150
                right_E = cv2.line(eye_right_detect, (x - w, y - w),
                                   (x + w, y + w), (51, 255, 153), 1)
                cv2.line(right_E, (x - w, y + w), (x + w, y - w),
                         (51, 255, 253), 1)
                left_E = cv2.line(eye_left_detect, (x - w, y - w),
                                  (x + w, y + w), (51, 255, 153), 1)
                cv2.line(left_E, (x - w, y + w), (x + w, y - w),
                         (51, 255, 253), 1)
                face_detected[
                    eye_coordinates_detect[1][1]:eye_coordinates_detect[1][3],
                    eye_coordinates_detect[1][0]:eye_coordinates_detect[1]
                    [2]] = right_E
                face_detected[
                    eye_coordinates_detect[0][1]:eye_coordinates_detect[0][3],
                    eye_coordinates_detect[0][0]:eye_coordinates_detect[0]
                    [2]] = left_E

            cv2.imshow("Result of the App", cv2.resize(result_app, (600, 600)))

        if frame_counter % 5 == 0:
            cursor.move(coordi_update_pointer[0], coordi_update_pointer[1])
        if key == 27:
            break

    total_time = time.time() - inference_time_start
    total_time_for_inference = round(total_time, 1)
    fps = frame_counter / total_time_for_inference

    with open(out_path + 'stats.txt', 'w') as f:
        f.write('Inference time: ' + str(total_time_for_inference) + '\n')
        f.write('FPS: ' + str(fps) + '\n')
        f.write('Model load time: ' + str(total_load_time) + '\n')

    loger.error("The video stream is over...")
    cv2.destroyAllWindows()
    feeder_in.close()
def main():

    # Grab command line args
    args = build_argparser().parse_args()
    flags = args.models_outputs_flags

    logger = logging.getLogger()
    input_file_path = args.input
    input_feeder = None
    if input_file_path.lower() == "cam":
        input_feeder = InputFeeder("cam")
    else:
        if not os.path.isfile(input_file_path):
            logger.error("Unable to find specified video file")
            exit(1)
        input_feeder = InputFeeder("video", input_file_path)

    model_path_dict = {
        'FaceDetection': args.face_detection_model,
        'FacialLandmarks': args.facial_landmarks_model,
        'GazeEstimation': args.gaze_estimation_model,
        'HeadPoseEstimation': args.head_pose_estimation_model
    }

    for file_name_key in model_path_dict.keys():
        if not os.path.isfile(model_path_dict[file_name_key]):
            logger.error("Unable to find specified " + file_name_key +
                         " xml file")
            exit(1)

    fdm = FaceDetection(model_path_dict['FaceDetection'], args.device,
                        args.cpu_extension)
    flm = FacialLandmarks(model_path_dict['FacialLandmarks'], args.device,
                          args.cpu_extension)
    gem = GazeEstimation(model_path_dict['GazeEstimation'], args.device,
                         args.cpu_extension)
    hpem = HeadPoseEstimation(model_path_dict['HeadPoseEstimation'],
                              args.device, args.cpu_extension)

    mc = MouseController('medium', 'fast')

    input_feeder.load_data()
    fdm.load_model()
    flm.load_model()
    hpem.load_model()
    gem.load_model()

    frame_count = 0
    for ret, frame in input_feeder.next_batch():
        if not ret:
            break
        frame_count += 1
        if frame_count % 5 == 0:
            cv2.imshow('video', cv2.resize(frame, (500, 500)))

        key = cv2.waitKey(60)
        cropped_face, face_coords = fdm.predict(frame, args.prob_threshold)
        if type(cropped_face) == int:
            logger.error("Unable to detect any face.")
            if key == 27:
                break
            continue

        hp_output = hpem.predict(cropped_face)

        left_eye_img, right_eye_img, eye_coords = flm.predict(cropped_face)

        new_mouse_coord, gaze_vector = gem.predict(left_eye_img, right_eye_img,
                                                   hp_output)

        if (not len(flags) == 0):
            preview_frame = frame
            if 'fd' in flags:
                preview_frame = cropped_face
            if 'fld' in flags:
                cv2.rectangle(cropped_face,
                              (eye_coords[0][0] - 10, eye_coords[0][1] - 10),
                              (eye_coords[0][2] + 10, eye_coords[0][3] + 10),
                              (0, 255, 0), 3)
                cv2.rectangle(cropped_face,
                              (eye_coords[1][0] - 10, eye_coords[1][1] - 10),
                              (eye_coords[1][2] + 10, eye_coords[1][3] + 10),
                              (0, 255, 0), 3)

            if 'hp' in flags:
                cv2.putText(
                    preview_frame,
                    "Pose Angles: yaw:{:.2f} | pitch:{:.2f} | roll:{:.2f}".
                    format(hp_output[0], hp_output[1], hp_output[2]), (10, 20),
                    cv2.FONT_HERSHEY_COMPLEX, 0.25, (0, 255, 0), 1)
            if 'ge' in flags:
                x, y, w = int(gaze_vector[0] * 12), int(gaze_vector[1] *
                                                        12), 160
                left_eye = cv2.line(left_eye_img, (x - w, y - w),
                                    (x + w, y + w), (255, 0, 255), 2)
                cv2.line(left_eye, (x - w, y + w), (x + w, y - w),
                         (255, 0, 255), 2)
                right_eye = cv2.line(right_eye_img, (x - w, y - w),
                                     (x + w, y + w), (255, 0, 255), 2)
                cv2.line(right_eye, (x - w, y + w), (x + w, y - w),
                         (255, 0, 255), 2)
                cropped_face[eye_coords[0][1]:eye_coords[0][3],
                             eye_coords[0][0]:eye_coords[0][2]] = left_eye
                cropped_face[eye_coords[1][1]:eye_coords[1][3],
                             eye_coords[1][0]:eye_coords[1][2]] = right_eye

            cv2.imshow("Visualization", cv2.resize(preview_frame, (500, 500)))

        if frame_count % 5 == 0:
            mc.move(new_mouse_coord[0], new_mouse_coord[1])
        if key == 27:
            break
    logger.error("VideoStream ended...")
    cv2.destroyAllWindows()
    input_feeder.close()
Esempio n. 25
0
def application(args, facedetector, facelm, headpose, gaze):

    pointer_controller = MouseController(precision='high', speed='fast')

    
   
    if args.input != 'CAM':
        try:
            
            in_stream = cv2.VideoCapture(args.input)
            l = int(in_stream.get(cv2.CAP_PROP_FRAME_COUNT))
            webcam = False

            
            if l > 1:
                single_image_mode = False
            else:
                single_image_mode = True

        except:
            print('Not supported image or video file format. Please pass a supported one.')
            exit()

    else:
        in_stream = cv2.VideoCapture(0)
        single_img_mode = False
        webcam = True

    if not single_image_mode:
        count = 0
        while(in_stream.isOpened()):
        
            
            flag, frame = in_stream.read()

            if not flag:
                break

            if count % 25 == 0:

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                
                face_crop, detection = facedetector.get_face_crop(frame, args)

               
                right_eye, left_eye = facelm.get_eyes_coordinates(face_crop)

                
                right_eye_crop, left_eye_crop, right_eye_coords, left_eye_coords = helpers.get_eyes_crops(face_crop, right_eye,left_eye)
               


                
                headpose_angles = headpose.get_headpose_angles(face_crop)


                


                
                (x_movement, y_movement), gaze_vector = gaze.get_gaze(right_eye_crop, left_eye_crop, headpose_angles)


                
                if args.show_face:
                    frame = cv2.rectangle(frame,
                                          (detection[0],detection[1]),
                                          (detection[2],detection[3]), 
                                          color=(0,255,0), 
                                          thickness=5)
                if args.show_headpose:

                    frame = cv2.putText(frame, 'Roll: '+
                                    str(headpose_angles[2])+' '+
                                    'Pitch: '+str(headpose_angles[1])+' '+
                                    'Yaw: '+str(headpose_angles[0]),(15,20),cv2.FONT_HERSHEY_SIMPLEX,0.65,(0,0,0),2)

                if args.show_eyes:

                    right_eye_coords = [right_eye_coords[0]+detection[1], right_eye_coords[1]+detection[1], 
                                right_eye_coords[2]+detection[0], right_eye_coords[3]+detection[0]]

                    left_eye_coords = [left_eye_coords[0]+detection[1], left_eye_coords[1]+detection[1], 
                                left_eye_coords[2]+detection[0], left_eye_coords[3]+detection[0]]
                    frame = cv2.rectangle(frame,
                                            (right_eye_coords[2],right_eye_coords[1]),
                                            (right_eye_coords[3],right_eye_coords[0]), 
                                            color=(255,0,0), 
                                            thickness=5)

                    frame = cv2.rectangle(frame,
                                            (left_eye_coords[2],left_eye_coords[1]),
                                            (left_eye_coords[3],left_eye_coords[0]), 
                                            color=(255,0,0), 
                                            thickness=5)

                if args.show_gaze:
                
                    # Right eye:
                    x_r_eye = int(right_eye[0]*face_crop.shape[1]+detection[0])
                    y_r_eye = int(right_eye[1]*face_crop.shape[0]+detection[1])
                    x_r_shift, y_r_shift = int(x_r_eye+gaze_vector[0]*100), int(y_r_eye-gaze_vector[1]*100)

                    # Left eye:
                    x_l_eye = int(left_eye[0]*face_crop.shape[1]+detection[0])
                    y_l_eye = int(left_eye[1]*face_crop.shape[0]+detection[1])
                    x_l_shift, y_l_shift = int(x_l_eye+gaze_vector[0]*100), int(y_l_eye-gaze_vector[1]*100)

                    frame = cv2.arrowedLine(frame, (x_r_eye, y_r_eye), (x_r_shift, y_r_shift), (0, 255, 0), 4)
                    frame = cv2.arrowedLine(frame, (x_l_eye, y_l_eye), (x_l_shift, y_l_shift), (0, 255, 0), 4)

                

                 
                cv2.namedWindow('Output',cv2.WINDOW_NORMAL)
                cv2.resizeWindow('Output', 800,600)
                cv2.imshow('Output', frame)

                #pointer_controller.move(x_movement,y_movement)
            count = count + 1

        in_stream.release()
        
    cv2.destroyAllWindows()
def main(args):

    # getting the arguments
    if args.get_perf_counts.lower() == "true":
        perf_counts = True
    elif args.get_perf_counts.lower() == "false":
        perf_counts = False
    precision = args.precision.lower()
    speed = args.speed.lower()
    media_type = args.media_type.lower()
    media_path = args.media_file
    toggle_ui = args.show_video
    print(toggle_ui)
    batch_size = args.batch_size
    device = args.device
    iterations = 1 if media_type == "cam" else int(args.iterations)

    #initialize the mouse object
    mouse = MouseController(precision, speed)

    # Initialize the input feeder
    feed = InputFeeder(media_type, batch_size, media_path)

    # Initialize and load the inference models
    model = Model(face_detection, facial_landmarks, gaze_estimation,
                  head_pose_estimation, device)
    model.load_models()

    for _ in range(iterations):

        feed.load_data()

        #This will be used as a way to keep track of the average time for the preprocessing and inference of the models
        times = np.zeros((8, ))
        counter_frames = 0

        if media_type != "image":
            width = feed.cap.get(3)
            height = feed.cap.get(4)
        else:
            height, width, _ = feed.cap.shape
        try:
            for frame in feed.next_batch(media_type):
                counter_frames += 1
                #generates the prediction
                x, y, gaze_vector, times = model.predict(
                    frame, width, height, times)
                #generates the movement on the cursor
                mouse.move(x, y)

                if perf_counts:
                    cv2.putText(
                        frame, "Preprocess Face Detection: " +
                        str(times[0] / counter_frames * 1000) + " ms", (0, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Face Detection: " +
                        str(times[1] / counter_frames * 1000) + " ms",
                        (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Preprocess Facial Landmarks: " +
                        str(times[2] / counter_frames * 1000) + " ms",
                        (0, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Facial Landmarks: " +
                        str(times[3] / counter_frames * 1000) + " ms",
                        (0, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Preprocess Head Pose: " +
                        str(times[4] / counter_frames * 1000) + " ms",
                        (0, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Head Pose: " +
                        str(times[5] / counter_frames * 1000) + " ms",
                        (0, 300), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Preprocess Gaze Estimation: " +
                        str(times[6] / counter_frames * 1000) + " ms",
                        (0, 350), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Gaze Estimation: " +
                        str(times[7] / counter_frames * 1000) + " ms",
                        (0, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    print("Preprocess Face Detection: " +
                          str(times[0] / counter_frames * 1000) + " ms")
                    print("Inference Face Detection: " +
                          str(times[1] / counter_frames * 1000) + " ms")
                    print("Preprocess Facial Landmarks: " +
                          str(times[2] / counter_frames * 1000) + " ms")
                    print("Inference Facial Landmarks: " +
                          str(times[3] / counter_frames * 1000) + " ms")
                    print("Preprocess Head Pose: " +
                          str(times[4] / counter_frames * 1000) + " ms")
                    print("Inference Head Pose: " +
                          str(times[5] / counter_frames * 1000) + " ms")
                    print("Preprocess Gaze Estimation: " +
                          str(times[6] / counter_frames * 1000) + " ms")
                    print("Inference Gaze Estimation: " +
                          str(times[7] / counter_frames * 1000) + " ms")

                if toggle_ui == True:
                    cv2.imshow("Frame", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                if cv2.waitKey(1) & 0xFF == ord('i'):
                    toggle_UI = False if toggle_UI else True

        except:
            print("Video has ended or couldn't continue")
        if perf_counts:
            print("Final average: ")
            print("Preprocess Face Detection: " +
                  str(times[0] / counter_frames * 1000) + " ms")
            print("Inference Face Detection: " +
                  str(times[1] / counter_frames * 1000) + " ms")
            print("Preprocess Facial Landmarks: " +
                  str(times[2] / counter_frames * 1000) + " ms")
            print("Inference Facial Landmarks: " +
                  str(times[3] / counter_frames * 1000) + " ms")
            print("Preprocess Head Pose: " +
                  str(times[4] / counter_frames * 1000) + " ms")
            print("Inference Head Pose: " +
                  str(times[5] / counter_frames * 1000) + " ms")
            print("Preprocess Gaze Estimation: " +
                  str(times[6] / counter_frames * 1000) + " ms")
            print("Inference Gaze Estimation: " +
                  str(times[7] / counter_frames * 1000) + " ms")
        feed.close()
        cv2.destroyAllWindows()
Esempio n. 27
0
    def infer(self, args):
        # Create instances from the models' classes
        FDM_net = ModelFaceDetection()
        HPE_net = ModelHeadPoseEstimation()
        FLD_net = ModelFacialLandmarksDetection()
        GEM_net = ModelGazeEstimation()
        mouse_controller = MouseController('high', 'fast')

        # Load the models
        start1 = time.time()
        FDM_net.load_model(args.face_detection_model, args.device)
        FDM_load_t = time.time() - start1

        start2 = time.time()
        HPE_net.load_model(args.head_pose_estimation_model, args.device)
        HPE_load_t = time.time() - start2

        start3 = time.time()
        FLD_net.load_model(args.facial_landmarks_detection_model, args.device)
        FLD_load_t = time.time() - start3

        start4 = time.time()
        GEM_net.load_model(args.gaze_estimation_model, args.device)
        GEM_load_t = time.time() - start4

        print('All models are loaded!')

        #Check the inputs
        # To make the mouse moving we need video stream either from camera or video path
        if args.input.lower() == 'cam':
            # Initialise the InputFeeder class
            input_feeder = InputFeeder(input_type='cam', input_file=args.input)
        else:
            if not os.path.isfile(args.input):
                log.error("Please insert valid video path to run the app.")
                exit()
            # Initialise the InputFeeder class
            input_feeder = InputFeeder(input_type='video',
                                       input_file=args.input)

        # Load the video capture
        input_feeder.load_data()

        # Inference time
        inference = time.time()

        # Read from the video capture
        for flag, frame in input_feeder.next_batch():
            if not flag:
                break
            key_pressed = cv2.waitKey(60)

            # Run inference on the models
            start5 = time.time()
            face_coords = FDM_net.predict(frame)
            FDM_infer_t = time.time() - start5

            # crop the face from the frame
            cropped_face = frame[face_coords[1]:face_coords[3],
                                 face_coords[0]:face_coords[2]]

            #Everything depends on the face detection output, if no face detected then repeat
            if len(face_coords) == 0:
                log.error("There is no faces detected.")
                continue
            start6 = time.time()
            HP_angles = HPE_net.predict(cropped_face, face_coords)
            HPE_infer_t = time.time() - start6

            if args.display_flag:
                #### display the face
                O_frame = cv2.rectangle(frame.copy(),
                                        (face_coords[0], face_coords[1]),
                                        (face_coords[2], face_coords[3]),
                                        (255, 255, 0), 2)

                #### display the pose angles
                # Link for pose estimation output code resource: https://sudonull.com/post/6484-Intel-OpenVINO-on-Raspberry-Pi-2018-harvest
                cos_r = cos(HP_angles[2] * pi / 180)
                sin_r = sin(HP_angles[2] * pi / 180)
                cos_y = cos(HP_angles[0] * pi / 180)
                sin_y = sin(HP_angles[0] * pi / 180)
                cos_p = cos(HP_angles[1] * pi / 180)
                sin_p = sin(HP_angles[1] * pi / 180)

                x = int((face_coords[0] + face_coords[2]) / 2)
                y = int((face_coords[1] + face_coords[3]) / 2)
                cv2.line(O_frame, (x, y),
                         (x + int(65 *
                                  (cos_r * cos_y + sin_y * sin_p * sin_r)),
                          y + int(65 * cos_p * sin_r)), (255, 0, 0),
                         thickness=2)
                cv2.line(O_frame, (x, y),
                         (x + int(65 *
                                  (cos_r * sin_y * sin_p + cos_y * sin_r)),
                          y - int(65 * cos_p * cos_r)), (0, 255, 0),
                         thickness=2)
                cv2.line(O_frame, (x, y),
                         (x + int(65 * sin_y * cos_p), y + int(65 * sin_p)),
                         (0, 0, 255),
                         thickness=2)

            start7 = time.time()
            l_e, r_e, l_e_image, r_e_image, e_center = FLD_net.predict(
                O_frame, cropped_face, face_coords)
            FLD_infer_t = time.time() - start7

            ###display landmarks for both eyes
            if args.display_flag:
                cv2.circle(O_frame,
                           (face_coords[0] + l_e[0], face_coords[1] + l_e[1]),
                           29, (0, 255, 255), 2)
                cv2.circle(O_frame,
                           (face_coords[0] + r_e[0], face_coords[1] + r_e[1]),
                           29, (0, 255, 255), 2)

            start8 = time.time()
            g_vec = GEM_net.predict(l_e_image, r_e_image, HP_angles)
            GEM_infer_t = time.time() - start8

            ###display gaze model output
            if args.display_flag:
                cv2.arrowedLine(O_frame,
                                (int(e_center[0][0]), int(e_center[0][1])),
                                (int(e_center[0][0]) + int(g_vec[0] * 90),
                                 int(e_center[0][1]) + int(-g_vec[1] * 90)),
                                (203, 192, 255), 2)
                cv2.arrowedLine(O_frame,
                                (int(e_center[1][0]), int(e_center[1][1])),
                                (int(e_center[1][0]) + int(g_vec[0] * 90),
                                 int(e_center[1][1]) + int(-g_vec[1] * 90)),
                                (203, 192, 255), 2)

            # change the pointer position according to the estimated gaze direction
            mouse_controller.move(g_vec[0], g_vec[1])

            if key_pressed == 27:
                break

            # Display the resulting frame
            cv2.imshow('Mouse Controller App Results',
                       cv2.resize(O_frame, (750, 550)))

        inference_time = time.time() - inference

        print("Loading time: \n1-Face detection: " + str(FDM_load_t) +
              "\n2- Head pose estimation: " + str(HPE_load_t) +
              "\n3-Facial landmarks model: " + str(FLD_load_t) +
              "\n4-Gaze estimation model:  " + str(GEM_load_t))
        print("Output inference time: \n1-Face detection: " +
              str(FDM_infer_t) + "\n2- Head pose estimation: " +
              str(HPE_infer_t) + "\n3-Facial landmarks model: " +
              str(FLD_infer_t) + "\n4-Gaze estimation model:  " +
              str(GEM_infer_t))

        # close the input feeder and destroy all opened windows
        input_feeder.close()
        cv2.destroyAllWindows
Esempio n. 28
0
def main():
    args = build_argparser().parse_args()

    frame_num = 0
    inference_time = 0
    counter = 0

    # Initialize the Inference Engine
    fd = FaceDetection()
    fld = Facial_Landmarks_Detection()
    ge = Gaze_Estimation()
    hp = Head_Pose_Estimation()

    # Load Models
    fd.load_model(args.face_detection_model, args.device, args.cpu_extension)
    fld.load_model(args.facial_landmark_model, args.device, args.cpu_extension)
    ge.load_model(args.gaze_estimation_model, args.device, args.cpu_extension)
    hp.load_model(args.head_pose_model, args.device, args.cpu_extension)

    # Mouse Controller precision and speed
    mc = MouseController('medium', 'fast')

    # feed input from an image, webcam, or video to model
    if args.input == "cam":
        feed = InputFeeder("cam")
    else:
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        feed = InputFeeder("video", args.input)
    feed.load_data()
    frame_count = 0
    for frame in feed.next_batch():
        frame_count += 1
        inf_start = time.time()
        if frame is not None:
            try:
                key = cv2.waitKey(60)

                det_time = time.time() - inf_start

                # make predictions
                detected_face, face_coords = fd.predict(
                    frame.copy(), args.prob_threshold)
                hp_output = hp.predict(detected_face.copy())
                left_eye, right_eye, eye_coords = fld.predict(
                    detected_face.copy())
                new_mouse_coord, gaze_vector = ge.predict(
                    left_eye, right_eye, hp_output)

                stop_inference = time.time()
                inference_time = inference_time + stop_inference - inf_start
                counter = counter + 1

                # Visualization
                preview = args.visualization
                if preview:
                    preview_frame = frame.copy()
                    face_frame = detected_face.copy()

                    draw_face_bbox(preview_frame, face_coords)
                    display_hp(preview_frame, hp_output, face_coords)
                    draw_landmarks(face_frame, eye_coords)
                    draw_gaze(face_frame, gaze_vector, left_eye.copy(),
                              right_eye.copy(), eye_coords)

                if preview:
                    img = np.hstack((cv2.resize(preview_frame, (500, 500)),
                                     cv2.resize(face_frame, (500, 500))))
                else:
                    img = cv2.resize(frame, (500, 500))

                cv2.imshow('Visualization', img)

                # set speed
                if frame_count % 5 == 0:
                    mc.move(new_mouse_coord[0], new_mouse_coord[1])

                # INFO
                log.info("NUMBER OF FRAMES: {} ".format(frame_num))
                log.info("INFERENCE TIME: {}ms".format(det_time * 1000))

                frame_num += 1

                if key == 27:
                    break
            except:
                print(
                    'Not supported image or video file format. Please send in a supported video format.'
                )
                exit()
    feed.close()
Esempio n. 29
0
def main():

    args = build_argparser().parse_args()
    logger = logging.getLogger('main')

    model_path_dict = {
        'FaceDetectionModel': args.faceDetectionModel,
        'FacialLandmarksModel': args.facialLandmarksModel,
        'HeadPoseEstimationModel': args.headPoseEstimationModel,
        'GazeEstimationModel': args.gazeEstimationModel
    }

    bbox_flag = args.bbox_flag
    input_filename = args.input
    device_name = args.device
    prob_threshold = args.prob_threshold
    output_path = args.output_path

    if input_filename.lower() == 'cam':
        feeder = InputFeeder(input_type='cam')
    else:
        if not os.path.isfile(input_filename):
            logger.error("Unable to find specified video file")
            exit(1)
        feeder = InputFeeder(input_type='video', input_file=input_filename)

    for model_path in list(model_path_dict.values()):
        if not os.path.isfile(model_path):
            logger.error("Unable to find specified model file" +
                         str(model_path))
            exit(1)

    face_detection_model = Face_detection(
        model_path_dict['FaceDetectionModel'],
        device_name,
        threshold=prob_threshold)
    facial_landmarks_detection_model = Landmark_Detection(
        model_path_dict['FacialLandmarksModel'],
        device_name,
        threshold=prob_threshold)
    head_pose_estimation_model = Head_pose(
        model_path_dict['HeadPoseEstimationModel'],
        device_name,
        threshold=prob_threshold)
    gaze_estimation_model = Gaze_estimation(
        model_path_dict['GazeEstimationModel'],
        device_name,
        threshold=prob_threshold)

    is_benchmarking = False

    if not is_benchmarking:
        mouse_controller = MouseController('medium', 'fast')

    start_model_load_time = time.time()
    face_detection_model.load_model()
    facial_landmarks_detection_model.load_model()
    head_pose_estimation_model.load_model()
    gaze_estimation_model.load_model()
    total_model_load_time = time.time() - start_model_load_time

    feeder.load_data()

    out_video = cv2.VideoWriter(os.path.join('output_video.mp4'),
                                cv2.VideoWriter_fourcc(*'avc1'),
                                int(feeder.get_fps() / 10), (1920, 1080), True)

    frame_count = 0
    start_inference_time = time.time()
    for ret, frame in feeder.next_batch():

        if not ret:
            break

        frame_count += 1

        key = cv2.waitKey(60)

        try:
            face_coords, image_copy = face_detection_model.predict(frame)

            if type(image_copy) == int:
                logger.warning("Unable to detect the face")
                if key == 27:
                    break
                continue

            left_eye, right_eye, eye_coords = facial_landmarks_detection_model.predict(
                image_copy)
            hp_output = head_pose_estimation_model.predict(image_copy)
            mouse_coords, gaze_coords = gaze_estimation_model.predict(
                left_eye, right_eye, hp_output)

        except Exception as e:
            logger.warning("Could predict using model" + str(e) +
                           " for frame " + str(frame_count))
            continue

        image = cv2.resize(frame, (500, 500))

        if not len(bbox_flag) == 0:
            bbox_frame = draw_bbox(frame, bbox_flag, image_copy, left_eye,
                                   right_eye, face_coords, eye_coords,
                                   hp_output, gaze_coords)
            image = np.hstack(
                (cv2.resize(frame,
                            (500, 500)), cv2.resize(bbox_frame, (500, 500))))

        cv2.imshow('preview', image)
        out_video.write(frame)

        if frame_count % 5 == 0 and not is_benchmarking:
            mouse_controller.move(mouse_coords[0], mouse_coords[1])

        if key == 27:
            break

    total_time = time.time() - start_inference_time
    total_inference_time = round(total_time, 1)
    fps = frame_count / total_inference_time

    try:
        os.mkdir(output_path)
    except OSError as error:
        logger.error(error)

    with open(output_path + 'stats.txt', 'w') as f:
        f.write(str(total_inference_time) + '\n')
        f.write(str(fps) + '\n')
        f.write(str(total_model_load_time) + '\n')

    logger.info('Model load time: ' + str(total_model_load_time))
    logger.info('Inference time: ' + str(total_inference_time))
    logger.info('FPS: ' + str(fps))

    logger.info('Video stream ended')
    cv2.destroyAllWindows()
    feeder.close()
class MoveMouse:
    '''
    Main Class for the Mouse Controller app. 
    This is the class where all the models are stitched together to control the mouse pointer
    '''
    def __init__(self, args):
        '''
        This method instances variables for the Facial Landmarks Detection Model.

        Args:
        args = All arguments parsed by the arguments parser function

        Return:
        None
        '''

        init_start_time = time.time()
        self.output_path = args.output_path
        self.show_output = args.show_output
        self.total_processing_time = 0
        self.count_batch = 0
        self.inference_speed = []
        self.avg_inference_speed = 0

        if args.all_devices != 'CPU':
            args.face_device = args.all_devices
            args.face_landmark_device = args.all_devices
            args.head_pose_device = args.all_devices
            args.gaze_device = args.all_devices

        model_init_start = time.time()
        self.face_model = FaceDetection(args.face_model, args.face_device,
                                        args.face_device_ext,
                                        args.face_prob_threshold)
        self.landmarks_model = FacialLandmarksDetection(
            args.face_landmark_model, args.face_landmark_device,
            args.face_landmark_device_ext, args.face_landmark_prob_threshold)
        self.head_pose_model = HeadPoseEstimation(
            args.head_pose_model, args.head_pose_device,
            args.head_pose_device_ext, args.head_pose_prob_threshold)
        self.gaze_model = GazeEstimation(args.gaze_model, args.gaze_device,
                                         args.gaze_device_ext,
                                         args.gaze_prob_threshold)
        self.model_init_time = time.time() - model_init_start
        log.info('[ Main ] All required models initiallized')

        self.mouse_control = MouseController(args.precision, args.speed)
        log.info('[ Main ] Mouse controller successfully initialized')

        self.input_feeder = InputFeeder(args.batch_size, args.input_type,
                                        args.input_file)
        log.info('[ Main ] Initialized input feeder')

        model_load_start = time.time()
        self.face_model.load_model()
        self.landmarks_model.load_model()
        self.head_pose_model.load_model()
        self.gaze_model.load_model()

        self.model_load_time = time.time() - model_load_start
        self.app_init_time = time.time() - init_start_time
        log.info('[ Main ] All moadels loaded to Inference Engine\n')

        return None

    def draw_face_box(self, frame, face_coords):
        '''
        Draws face's bounding box on the input frame
        Args:
        frame = Input frame from video or camera feed. It could also be an input image

        Return:
        frame = Frame with bounding box of faces drawn on it
        '''

        start_point = (face_coords[0][0], face_coords[0][1])
        end_point = (face_coords[0][2], face_coords[0][3])
        thickness = 5
        color = (255, 86, 0)

        frame = cv2.rectangle(frame, start_point, end_point, color, thickness)

        return frame

    def draw_eyes_boxes(self, frame, left_eye_coords, right_eye_coords):
        '''
        Draws face's bounding box on the input frame
        Args:
        frame = Input frame from video or camera feed. It could also be an input image

        Return:
        frame = Frame with bounding box of left and right eyes drawn on it
        '''

        left_eye_start_point = (left_eye_coords[0], left_eye_coords[1])
        left_eye_end_point = (left_eye_coords[2], left_eye_coords[3])
        right_eye_start_point = (right_eye_coords[0], right_eye_coords[1])
        right_eye_end_point = (right_eye_coords[2], right_eye_coords[3])
        thickness = 5
        color = (0, 210, 0)

        frame = cv2.rectangle(frame, left_eye_start_point, left_eye_end_point,
                              color, thickness)
        frame = cv2.rectangle(frame, right_eye_start_point,
                              right_eye_end_point, color, thickness)

        return frame

    def draw_outputs(self, frame):
        '''
        Draws the inference outputs (bounding boxes of the face and both eyes and 
        the 3D head pose directions) of the four models onto the frames.

        Args:
        frame = Input frame from video or camera feed. It could also be an input image

        Return:
        frame = Frame with all inference outputs drawn on it
        '''

        frame = self.draw_face_box(frame, self.face_coords)
        frame = self.draw_eyes_boxes(frame, self.left_eye_coords,
                                     self.right_eye_coords)

        frame_id = f'Batch id = {self.count_batch}'
        avg_inference_speed = f'Avg. inference speed = {self.avg_inference_speed:.3f}fps'
        total_processing_time = f'Total infer. time = {self.total_processing_time:.3f}s'

        cv2.putText(frame, frame_id, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.45,
                    (255, 86, 0), 1)
        cv2.putText(frame, avg_inference_speed, (15, 30),
                    cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 86, 0), 1)
        cv2.putText(frame, total_processing_time, (15, 45),
                    cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 86, 0), 1)

        return frame

    def run_inference(self, frame):
        '''
        Performs inference on the input video or image by passing it through all four
        models to get the desired coordinates for moving the mouse pointer.

        Args:
        frame = Input image, frame from video or camera feed

        Return:
        None
        '''

        self.input_feeder.load_data()

        for frame in self.input_feeder.next_batch():

            if self.input_feeder.frame_flag == True:
                log.info('[ Main ] Started processing a new batch')
                start_inference = time.time()
                self.face_coords, self.face_crop = self.face_model.predict(
                    frame)

                if self.face_coords == []:
                    log.info(
                        '[ Main ] No face detected.. Waiting for you to stare at the camera'
                    )
                    f.write('[ Error ] No face was detected')

                else:
                    self.head_pose_angles = self.head_pose_model.predict(
                        self.face_crop)
                    self.left_eye_coords, self.left_eye_image, self.right_eye_coords, self.right_eye_image = self.landmarks_model.predict(
                        self.face_crop)
                    self.x, self.y = self.gaze_model.predict(
                        self.left_eye_image, self.right_eye_image,
                        self.head_pose_angles)
                    log.info(
                        f'[ Main ] Relative pointer coordinates: [{self.x:.2f}, {self.y:.2f}]'
                    )

                    batch_process_time = time.time() - start_inference
                    self.total_processing_time += batch_process_time
                    self.count_batch += 1
                    log.info(
                        f'[ Main ] Finished processing batch. Time taken = {batch_process_time}s\n'
                    )

                    self.mouse_control.move(self.x, self.y)

                    if self.show_output:
                        self.draw_outputs(frame)

                    cv2.imshow('Computer Pointer Controller Output', frame)
                    self.inference_speed.append(self.count_batch /
                                                self.total_processing_time)
                    self.avg_inference_speed = sum(self.inference_speed) / len(
                        self.inference_speed)

                with open(os.path.join(self.output_path, 'outputs.txt'),
                          'w+') as f:
                    f.write('INFERENCE STATS\n')
                    f.write(
                        f'Total model initialization time : {self.model_init_time:.2f}s\n'
                    )
                    f.write(
                        f'Total model load time: {self.model_load_time:.2f}s\n'
                    )
                    f.write(
                        f'App initialization time: {self.app_init_time:.2f}s\n'
                    )
                    f.write(
                        f'Total processing time: {self.total_processing_time:.2f}s\n'
                    )
                    f.write(
                        f'Average inference speed: {self.avg_inference_speed:.2f}FPS\n'
                    )
                    f.write(f'Batch count: {self.count_batch}\n\n')

                    f.write('LAST OUTPUTS\n')
                    f.write(f'Face coordinates: {self.face_coords}\n')
                    f.write(f'Left eye coordinates: {self.left_eye_coords}\n')
                    f.write(
                        f'Right eye coordinates: {self.right_eye_coords}\n')
                    f.write(f'Head pose angles: {self.head_pose_angles}\n')
                    f.write(
                        f'Relative pointer coordinates/ Gaze vector: [{self.x:.2f}, {self.y:.2f}]'
                    )

            else:
                self.input_feeder.close()
                cv2.destroyAllWindows()

                log.info(
                    f'[ Main ] All input Batches processed in {self.total_processing_time:.2f}s'
                )
                log.info('[ Main ] Shutting down app...')
                log.info('[ Main ] Mouse controller app has been shut down.')
                break

        return