示例#1
0
def process_video(file_input, file_output, display_intermediate_output):
    if file_input is None:
        feed = InputFeeder(input_type='cam')
    else:
        feed = InputFeeder(input_type='video', input_file=file_input)

    feed.load_data()

    w = int(feed.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(feed.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(feed.cap.get(cv2.CAP_PROP_FPS))
    out = cv2.VideoWriter(file_output, cv2.VideoWriter_fourcc(*'avc1'), fps, (w, h), True)

    frame_counter = 0
    for batch in feed.next_batch():
        frame_counter += 1
        result, frame = process_single_frame(batch, display_intermediate_output)
        out.write(frame)

        logging.debug(f'Frame #{frame_counter} result: {result}')
        if type(result) == str and result == 'No face detected':
            logging.warning('Frame {}: No face detected', frame_counter)

        if mouse_controller is not None:
            mouse_controller.move(result[0], result[1])

    out.release()
    feed.close()
示例#2
0
def main():    
    args = build_argparser().parse_args()        
    inputFile = args.input
    inputFeeder = None
    if inputFile.lower() == "cam":
        inputFeeder = InputFeeder("cam")
    else:
        if not os.path.isfile(inputFile):
            print("Unable to find input file")
            exit(1)        
        inputFeeder = InputFeeder("video",inputFile)
    
    start_model_loading = time.time()    
    detect,landmark,gaze,pose=init_models(args)
    inputFeeder.load_data()
    LoadModel(detect, landmark, gaze, pose)
    model_loading_time = time.time() - start_model_loading    
    frame_count,inference_time = inference_frame(detect,pose,landmark,gaze,inputFeeder,args)
    fps = frame_count / inference_time

    print("video is complete!")
    print(f'Model took {model_loading_time} s to load')
    print(f'Inference time of the model is: {inference_time} s')
    print(f'Average inference time of the model is : {inference_time/frame_count} s')
    print(f'FPS is {fps/5} frame/second')

    cv2.destroyAllWindows()
    inputFeeder.close()
示例#3
0
def cam_or_video(inputs, inputFeeder):
    if inputs.lower() == "cam":
        inputFeeder = InputFeeder("cam")
    if not os.path.isfile(inputs):
        print("Unable to find input file")
        exit(1)
    inputFeeder = InputFeeder("video", inputs)
    return inputFeeder
def init_feeder(args):
    input_feeder = None
    if args.input.lower() == "cam":
        input_feeder = InputFeeder("cam")
    else:
        if not os.path.isfile(args.input):
            logging.error("Unable to find specified video file")
            exit(1)
        input_feeder = InputFeeder("video", args.input)
    return input_feeder
示例#5
0
def input_feeder_func(input_file_path):
    # Checks for live feed
    if input_file_path == 'CAM':
        input_feeder = InputFeeder("cam")

    # Checks for video file
    else:
        input_feeder = InputFeeder("video", input_file_path)
        assert os.path.isfile(
            input_file_path), "Specified input file doesn't exist"

    return input_feeder
示例#6
0
    def __init__(self, args):

        # load the objects corresponding to the models
        self.face_detection = Face_Detection(args.face_detection_model,
                                             args.device, args.extensions,
                                             args.perf_counts)
        self.gaze_estimation = Gaze_Estimation(args.gaze_estimation_model,
                                               args.device, args.extensions,
                                               args.perf_counts)
        self.head_pose_estimation = Head_Pose_Estimation(
            args.head_pose_estimation_model, args.device, args.extensions,
            args.perf_counts)
        self.facial_landmarks_detection = Facial_Landmarks_Detection(
            args.facial_landmarks_detection_model, args.device,
            args.extensions, args.perf_counts)

        start_models_load_time = time.time()
        self.face_detection.load_model()
        self.gaze_estimation.load_model()
        self.head_pose_estimation.load_model()
        self.facial_landmarks_detection.load_model()

        logger = logging.getLogger()
        input_T = args.input_type
        input_F = args.input_file

        if input_T.lower() == 'cam':
            # open the video feed
            self.feed = InputFeeder(args.input_type, args.input_file)
            self.feed.load_data()
        else:
            if not os.path.isfile(input_F):
                logger.error('Unable to find specified video file')
                exit(1)
            file_extension = input_F.split(".")[-1]
            if (file_extension in ['jpg', 'jpeg', 'bmp']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            elif (file_extension in ['avi', 'mp4']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            else:
                logger.error(
                    "Unsupported file Extension. Allowed ['jpg', 'jpeg', 'bmp', 'avi', 'mp4']"
                )
                exit(1)

        print("Models total loading time :",
              time.time() - start_models_load_time)

        # init mouse controller
        self.mouse_controller = MouseController('low', 'fast')
def check_source(filepath, logger):

    feeder = None

    if filepath.lower() == 'cam':
        feeder = InputFeeder('cam')
    else:
        if not os.path.isfile(filepath):
            logger.error("Unable to find specified video file")
            exit(1)

        feeder = InputFeeder('video', filepath)

    return feeder
示例#8
0
def feedInput(input):
	
    feeder = None
    input_type = "cam"
    if input != 'CAM':
        assert os.path.isfile(input)
        if input.endswith(('.jpg', '.bmp', '.png')):
            input_type = "image"
        else:
            input_type = "video"
        feeder = InputFeeder(input_type=input_type, input_file=input)
    else:
        feeder = InputFeeder(input_type=input_type)

    return feeder 
def run_inference(args):

    feed = InputFeeder(input_type='video', input_file=args.input)
    feed.load_data()
    for batch in feed.next_batch():
        cv2.imshow("Output", cv2.resize(batch, (500, 500)))
        key = cv2.waitKey(60)

        if (key == 27):
            break

        # getting face
        faceDetection = FaceDetection(model_name=args.face_detection_model)
        faceDetection.load_model()
        face = faceDetection.predict(batch)

        # getting eyes
        facialLandmarksDetection = FacialLandmarksDetection(
            args.facial_landmarks_detection_model)
        facialLandmarksDetection.load_model()
        left_eye, right_eye = facialLandmarksDetection.predict(face)

        # getting head pose angles
        headPoseEstimation = HeadPoseEstimation(
            args.head_pose_estimation_model)
        headPoseEstimation.load_model()
        head_pose = headPoseEstimation.predict(face)
        print("head pose angles: ", head_pose)

        # get mouse points
        gazeEstimation = GazeEstimation(args.gaze_estimation_model)
        gazeEstimation.load_model()
        mouse_coords = gazeEstimation.predict(left_eye, right_eye, head_pose)
        print("gaze  output: ", mouse_coords)
    feed.close()
示例#10
0
def main():
    # Load parameters
    params = get_args()

    mouse_prec = params['mouse_prec']
    mouse_speed = params['mouse_speed']
    mouse = MouseController(mouse_prec, mouse_speed)
    models = load_models(params)

    # Load input feed
    input_type = params['input_type']
    if input_type=='cam':
        input_file = None
    else:
        input_file = params['input_file_path']

    feed=InputFeeder(input_type=input_type, input_file=input_file)
    feed.load_data()
    for batch in feed.next_batch():
        if batch is not None:
            image, pos = main_loop(batch, models)
            cv2.imshow('frame', image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            mouse.move(pos[0], pos[1])
            # break
        else:
            break
    feed.close()
def process_video(input_video, video_output, visualize):
    if input_video is None:
        feed = InputFeeder(input_type='cam')
    else:
        feed = InputFeeder(input_type='video', input_file=input_video)

    feed.load_data()

    w = int(feed.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(feed.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(feed.cap.get(cv2.CAP_PROP_FPS))
    fps = int(fps / 4)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(video_output, fourcc, fps, (w, h), True)

    frame_counter = 0
    for frame in feed.next_batch():
        if frame is not None:
            frame_counter += 1
            key = cv2.waitKey(10)
            result, output_frame = process_frame(frame, visualize)

            out.write(output_frame)

            print("Frame: {} result: {}".format(frame_counter, result))
            logger.info("Frame: {} result: {}".format(frame_counter, result))

            esc_code = 27
            if key == esc_code:
                break

            if mouse_controller is not None:
                try:
                    mouse_controller.move(result[0], result[1])

                except Exception as e:
                    print("Mouse controller exception:\n", e)
                    logger.info("Mouse controller exception:{}".format(e))

        else:
            break

    cv2.destroyAllWindows()
    out.release()
    feed.close()
    print("Saved the video")
    logger.info("Saved the video")
    def __init__(self, args):
        '''
        This method instances variables for the Facial Landmarks Detection Model.

        Args:
        args = All arguments parsed by the arguments parser function

        Return:
        None
        '''

        init_start_time = time.time()
        self.output_path = args.output_path
        self.show_output = args.show_output
        self.total_processing_time = 0
        self.count_batch = 0
        self.inference_speed = []
        self.avg_inference_speed = 0

        if args.all_devices != 'CPU':
            args.face_device = args.all_devices
            args.face_landmark_device = args.all_devices
            args.head_pose_device = args.all_devices
            args.gaze_device = args.all_devices

        model_init_start = time.time()
        self.face_model = FaceDetection(args.face_model, args.face_device,
                                        args.face_device_ext,
                                        args.face_prob_threshold)
        self.landmarks_model = FacialLandmarksDetection(
            args.face_landmark_model, args.face_landmark_device,
            args.face_landmark_device_ext, args.face_landmark_prob_threshold)
        self.head_pose_model = HeadPoseEstimation(
            args.head_pose_model, args.head_pose_device,
            args.head_pose_device_ext, args.head_pose_prob_threshold)
        self.gaze_model = GazeEstimation(args.gaze_model, args.gaze_device,
                                         args.gaze_device_ext,
                                         args.gaze_prob_threshold)
        self.model_init_time = time.time() - model_init_start
        log.info('[ Main ] All required models initiallized')

        self.mouse_control = MouseController(args.precision, args.speed)
        log.info('[ Main ] Mouse controller successfully initialized')

        self.input_feeder = InputFeeder(args.batch_size, args.input_type,
                                        args.input_file)
        log.info('[ Main ] Initialized input feeder')

        model_load_start = time.time()
        self.face_model.load_model()
        self.landmarks_model.load_model()
        self.head_pose_model.load_model()
        self.gaze_model.load_model()

        self.model_load_time = time.time() - model_load_start
        self.app_init_time = time.time() - init_start_time
        log.info('[ Main ] All moadels loaded to Inference Engine\n')

        return None
def createInputFeeder(cmd_paras):
    if cmd_paras.input_type==CAM_INPUT_TYPE:
        input_file = None
    else:
        input_file = cmd_paras.input_file

    input_feeder=InputFeeder(input_type=cmd_paras.input_type, input_file=input_file)
    return input_feeder
def start_pipeline(cla, codec):
    """
    Initializes feeds inputs to models, moving the mouse cursor based on the final gaze estimation.
    :param cla: Command line arguments for configuring the pipeline.
    :param codec: Depending on the platform this is run on, OpenCV requires a codec to be specified. Supply it here.
    :return: None
    """
    preview_flags = cla.preview_flags

    logger = logging.getLogger()
    input_file_path = cla.input

    if input_file_path.lower() == "cam":
        in_feeder = InputFeeder("cam")
    elif not os.path.isfile(input_file_path):
        # top = os.path.dirname(os.path.realpath(__file__))
        # walktree(top, visit_file)
        logger.error("Cannot locate video file provided. Exiting..")
        sys.exit(1)
    else:
        in_feeder = InputFeeder("video", input_file_path)

    start_model_load_time = time.time()
    fdm, fldm, hpem, gem = prep_models(cla)
    total_model_load_time = time.time() - start_model_load_time

    mc = None
    if not cla.is_benchmark:
        mc = MouseController('medium', 'fast')

    in_feeder.load_data()

    fps, total_inference_time, total_time = handle_input_feed(
        logger, preview_flags, fdm, fldm, hpem, gem, mc, in_feeder,
        cla.frame_out_rate, codec, cla.output_path)

    with open(os.path.join(cla.output_path, 'stats.txt'), 'w') as f:
        f.write("Total inference time, " + str(total_inference_time) + '\n')
        f.write("FPS, " + str(fps) + '\n')
        f.write("Total model load time, " + str(total_model_load_time) + '\n')
        f.write("Total time, " + str(total_time) + '\n')

    logger.error("Video stream ended...")
    cv2.destroyAllWindows()
    in_feeder.close()
def setup(args):
    global input_path, output_path, device, cpu_extension, prob_threshold, flags, mouse_controller, feeder, video_writer, model_dict, model_loading_total_time
    model_args = [
        args.face_detection_model,
        args.facial_landmarks_detection_model,
        args.head_pose_estimation_model,
        args.gaze_estimation_model,
    ]
    model_class = [
        Model_FaceDetection,
        Model_FacialLandMarkDetection,
        Model_HeadPoseEstimation,
        Model_GazeEstimation,
    ]
    input_path = input_path_generator(args.input) if args.input != "CAM" else None
    output_path = output_path_generator(args.output)
    device = args.device
    cpu_extension = args.cpu_extension
    prob_threshold = args.prob_threshold
    flags = args.flags
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    mouse_controller = MouseController("low", "fast")
    if input_path:
        if input_path.endswith(".jpg"):
            feeder = InputFeeder("image", input_path)
        else:
            feeder = InputFeeder("video", input_path)
    else:
        feeder = InputFeeder("cam")
    feeder.load_data()
    fps = feeder.fps()
    initial_w, initial_h, video_len = feeder.frame_initials_and_length()
    video_writer = cv2.VideoWriter(
        os.path.join(output_path, "output_video.mp4"),
        cv2.VideoWriter_fourcc(*"avc1"),
        fps / 10,
        (initial_w, initial_h),
        True,
    )
    model_dict, model_loading_total_time = generate_model_dict(model_args, model_class)
    return
示例#16
0
def process_image(file_path, file_output, display_intermediate_output):
    feed = InputFeeder(input_type='image', input_file=file_path)
    feed.load_data()
    for batch in feed.next_batch():
        result, image = process_single_frame(batch, display_intermediate_output)
        # cv2.imshow('demo image', image)
        cv2.imwrite(file_output, image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    feed.close()
示例#17
0
def main(args):
    inference = Inference(args.model)
    inference.load_model()

    input = args.input

    if input == 0:
        input_feeder = InputFeeder('cam', input)
    elif input.endswith('.jpg') or input.endswith('.jpeg') or input.endswith(
            '.bmp'):
        input_feeder = InputFeeder('image', input)
        is_image = True
    else:
        input_feeder = InputFeeder('video', input)

    input_feeder.load_data()

    if is_image:
        outputs = inference.predict(input_feeder.cap)

        inference.preprocess_output(outputs)
        return 0

    frames = 0
    for ret, frame in input_feeder.next_batch():

        if not ret:
            break

        frames += 1

        key = cv2.waitKey(60)
        if key == 27:
            break

        outputs = inference.predict(frame)

        inference.preprocess_output(outputs)

    input_feeder.close()
示例#18
0
def main():
	args = build_argparser().parse_args()
	visual_flags = args.flag_visualization
	input_path = args.input

	Dict_model_path = {
		'Face': args.face_detection_path,
		'Landmarks': args.facial_landmarks_path,
		'Headpose': args.head_pose_path,
		'Gaze': args.gaze_estimation_path
	}

	if input_path == "CAM" or input_path=="cam":
		print("\n## You are using CAMERA right now..." + input_path.lower() + " detected!")
		input_feeder = InputFeeder(input_path.lower())		
	else:
		if not os.path.isfile(input_path):
			print("\n## Input file not exists in Path: " + input_path + ". Please check again !!!")
			exit(1)
		else:
			print('\n## Input path exists: '+ input_path + '\n')
			input_feeder = InputFeeder("video", input_path)

	for model_key in Dict_model_path.keys():
		print(Dict_model_path[model_key])
		if not os.path.isfile(Dict_model_path[model_key]):
			print("\n## " + model_key + " Model path not exists: " + Dict_model_path[model_key] + ' Please try again !!!')
			exit(1)
		else:
			print('## '+model_key + " Model path is correct: " + Dict_model_path[model_key])


	print(input_feeder)
	print(input_path)
	print(visual_flags)
	print(args.cpu_extension)
	print(args.device)
	print(args.prob_threshold)
示例#19
0
def main(args):

    mouse_controller = MouseController('medium', 'fast')

    print("Model Loading..")

    face_detection = Model_FaceDetection(args.face_detection, args.device)
    face_landmark = Model_FacialLandmarksDetection(args.face_landmark, args.device)
    head_pose = Model_HeadPoseEstimation(args.head_pose, args.device)
    gaze_estimation = Model_GazeEstimation(args.gaze_estimation, args.device)
    
    print("Model loaded successfully")

    input_feeder  = InputFeeder(input_type='video', input_file=args.input)
    input_feeder.load_data()

    face_detection.load_model()
    head_pose.load_model()
    face_landmark.load_model()
    gaze_estimation.load_model()

    for frame in input_feeder.next_batch():
        try:
            frame.shape
        except Exception as err:
            break

        key = cv2.waitKey(60)

        face,face_coord = face_detection.predict(frame.copy(), args.prob_threshold)

        if type(face)==int:
            print("Unable to detect the face.")
            if key==27:
                break
            continue
        
        headPose = head_pose.predict(face.copy())
        
        left_eye, right_eye, eye_coord  = face_landmark.predict(face.copy())
        
        mouse_coord, gaze_vector = gaze_estimation.predict(left_eye, right_eye, headPose)
        
        cv2.imshow('video',frame)
        mouse_controller.move(mouse_coord[0], mouse_coord[1])


    input_feeder.close()
    cv2.destroyAllWindows()
示例#20
0
def main(model_dir, device, precision, input_type, input_file, inspect):
    mouse_controller = MouseController("medium", "fast")
    input_feeder = InputFeeder(input_type=input_type, input_file=input_file)
    input_feeder.load_data()

    gaze_detect = GazeDetect(model_dir=model_dir, device=device, precision=precision)
    gaze_detect.load_model()

    for image in input_feeder.next_batch():
        with Timer() as t:
            outputs = gaze_detect.predict(image)
        if outputs is not None:
            angle_y_fc, angle_p_fc, angle_r_fc = outputs.reshape(3)
            mouse_controller.move(-angle_y_fc, angle_p_fc)
            print(
                f"Mouse move x: {-angle_y_fc}, y: {angle_p_fc}, execution time: {t.elapsed}"
            )
    def __init__(self, args):
        self.log_level = "INFO" if os.environ.get(
            "LOGLEVEL") == "INFO" or args.verbose_stage else "WARNING"
        log.basicConfig(level=self.log_level)

        input_type = 'cam' if args.cam else 'video'
        self.feed = InputFeeder(input_type, args.video)
        if not self.feed.load_data():
            raise Exception('Input valid image or video file')

        fps, w, h = self.feed.get_props()
        self.out_video = cv2.VideoWriter(args.out,
                                         cv2.VideoWriter_fourcc(*'MJPG'), fps,
                                         (w, h), True)

        args.head_pose_model = os.path.join(
            args.head_pose_model, args.precision,
            os.path.basename(args.head_pose_model))
        args.landmarks_model = os.path.join(
            args.landmarks_model, args.precision,
            os.path.basename(args.landmarks_model))
        args.gaze_model = os.path.join(args.gaze_model, args.precision,
                                       os.path.basename(args.gaze_model))

        self.fd = FaceDetect(args.face_model, args.device, args.extension,
                             args.threshold)
        self.fd.load_model()
        self.fd.set_out_size(w, h)

        self.hp = HeadPoseEstimate(args.head_pose_model, args.device,
                                   args.extension, args.threshold)
        self.hp.load_model()

        self.fl = FacialLandMarkDetect(args.landmarks_model, args.device,
                                       args.extension, args.threshold)
        self.fl.load_model()

        self.gz = GazeEstimate(args.gaze_model, args.device, args.extension,
                               args.threshold)
        self.gz.load_model()

        self.mc = MouseController()
        self.verbose_stage = args.verbose_stage
def main():
    """
    Load the network and parse the output.
    :return: None
    """
    # Grab command line args
    args = build_argparser().parse_args()
        
    start_time = time.time()
    face_detector = FaceDetect(model_name=args.face, device=args.device, output=args.output)
    face_detector.load_model()
    print("Time taken to load face detection model (in seconds):", time.time()-start_time)

    start_time = time.time()
    eyes_detector = EyesDetect(model_name=args.eyes, device=args.device, output=args.output)
    eyes_detector.load_model()
    print("Time taken to load landmark detection model (in seconds):", time.time()-start_time)

    start_time = time.time()
    angle_detector = AngleDetect(model_name=args.angle, device=args.device)
    angle_detector.load_model()
    print("Time taken to load head pose estimation model (in seconds):", time.time()-start_time)

    start_time = time.time()
    gaze_detector = GazeDetect(model_name=args.gaze, device=args.device)
    gaze_detector.load_model()
    print("Time taken to load gaze estimation model (in seconds):", time.time()-start_time)

    mouse_controller = MouseController('medium','medium')
    
    feed=InputFeeder(input_type=args.video, input_file=args.input)
    feed.load_data()
    for batch in feed.next_batch():
        if batch is None: # catch last frame
            break
        face = face_detector.predict(batch)
        left_eye, right_eye = eyes_detector.predict(face)
        angles = angle_detector.predict(face)
        x, y = gaze_detector.predict(left_eye, right_eye, angles)
        mouse_controller.move(x, y)

    feed.close()
示例#23
0
    def run(self, args):
        inputFeeder = InputFeeder(args.input)
        i = 0
        objectsDetection = ObjectsDetection()
        frame = None
        while self.execute:
            try:
                frame = next(inputFeeder.next_batch())
            except StopIteration:
                logging.error('Failed to obtain input stream.')
                break
            if frame is None:
                break
            objectsDetection.inputs(frame)
            objectsDetection.wait()
            outputs = objectsDetection.outputs()
            print(frame)
            i = i + 1

        print(i)
        inputFeeder.close()
def main(args):
    feed = InputFeeder(input_type=args.it, input_file=args.i)

    face_model = FaceDetectionModel(args.fm, args.d, args.c, float(args.p))
    face_model.load_model()

    landmarks_model = LandmarksDetectionModel(args.lm, args.d, args.c)
    landmarks_model.load_model()

    headpose_model = HeadPoseDetectionModel(args.hpm, args.d, args.c)
    headpose_model.load_model()

    gaze_model = GazeEstimationModel(args.gem, args.d, args.c)
    gaze_model.load_model()

    mouse = MouseController("medium", "fast")

    feed.load_data()
    for batch in feed.next_batch():
        # try:
        cropped_face, coords, _ = face_model.predict(batch)
        cv2.rectangle(batch, (coords[0], coords[1]), (coords[2], coords[3]),
                      (255, 0, 0), 2)

        left_eye, right_eye, eyes_coords, _ = landmarks_model.predict(
            cropped_face)

        head_pose_angles, _ = headpose_model.predict(cropped_face)
        x, y, z, _ = gaze_model.predict(left_eye, right_eye, head_pose_angles,
                                        cropped_face, eyes_coords)

        mouse.move(x, y)

        cv2.imshow("img", batch)
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break
    # except:
    #     print("Frame without prediction. Error: ", sys.exc_info()[0])
    #     log.error(sys.exc_info()[0])
    feed.close()
示例#25
0
def main():

    args = build_argparser().parse_args()
    logger = logging.getLogger('main')

    model_path_dict = {
        'FaceDetectionModel': args.faceDetectionModel,
        'FacialLandmarksModel': args.facialLandmarksModel,
        'HeadPoseEstimationModel': args.headPoseEstimationModel,
        'GazeEstimationModel': args.gazeEstimationModel
    }

    bbox_flag = args.bbox_flag
    input_filename = args.input
    device_name = args.device
    prob_threshold = args.prob_threshold
    output_path = args.output_path

    if input_filename.lower() == 'cam':
        feeder = InputFeeder(input_type='cam')
    else:
        if not os.path.isfile(input_filename):
            logger.error("Unable to find specified video file")
            exit(1)
        feeder = InputFeeder(input_type='video', input_file=input_filename)

    for model_path in list(model_path_dict.values()):
        if not os.path.isfile(model_path):
            logger.error("Unable to find specified model file" +
                         str(model_path))
            exit(1)

    face_detection_model = Face_detection(
        model_path_dict['FaceDetectionModel'],
        device_name,
        threshold=prob_threshold)
    facial_landmarks_detection_model = Landmark_Detection(
        model_path_dict['FacialLandmarksModel'],
        device_name,
        threshold=prob_threshold)
    head_pose_estimation_model = Head_pose(
        model_path_dict['HeadPoseEstimationModel'],
        device_name,
        threshold=prob_threshold)
    gaze_estimation_model = Gaze_estimation(
        model_path_dict['GazeEstimationModel'],
        device_name,
        threshold=prob_threshold)

    is_benchmarking = False

    if not is_benchmarking:
        mouse_controller = MouseController('medium', 'fast')

    start_model_load_time = time.time()
    face_detection_model.load_model()
    facial_landmarks_detection_model.load_model()
    head_pose_estimation_model.load_model()
    gaze_estimation_model.load_model()
    total_model_load_time = time.time() - start_model_load_time

    feeder.load_data()

    out_video = cv2.VideoWriter(os.path.join('output_video.mp4'),
                                cv2.VideoWriter_fourcc(*'avc1'),
                                int(feeder.get_fps() / 10), (1920, 1080), True)

    frame_count = 0
    start_inference_time = time.time()
    for ret, frame in feeder.next_batch():

        if not ret:
            break

        frame_count += 1

        key = cv2.waitKey(60)

        try:
            face_coords, image_copy = face_detection_model.predict(frame)

            if type(image_copy) == int:
                logger.warning("Unable to detect the face")
                if key == 27:
                    break
                continue

            left_eye, right_eye, eye_coords = facial_landmarks_detection_model.predict(
                image_copy)
            hp_output = head_pose_estimation_model.predict(image_copy)
            mouse_coords, gaze_coords = gaze_estimation_model.predict(
                left_eye, right_eye, hp_output)

        except Exception as e:
            logger.warning("Could predict using model" + str(e) +
                           " for frame " + str(frame_count))
            continue

        image = cv2.resize(frame, (500, 500))

        if not len(bbox_flag) == 0:
            bbox_frame = draw_bbox(frame, bbox_flag, image_copy, left_eye,
                                   right_eye, face_coords, eye_coords,
                                   hp_output, gaze_coords)
            image = np.hstack(
                (cv2.resize(frame,
                            (500, 500)), cv2.resize(bbox_frame, (500, 500))))

        cv2.imshow('preview', image)
        out_video.write(frame)

        if frame_count % 5 == 0 and not is_benchmarking:
            mouse_controller.move(mouse_coords[0], mouse_coords[1])

        if key == 27:
            break

    total_time = time.time() - start_inference_time
    total_inference_time = round(total_time, 1)
    fps = frame_count / total_inference_time

    try:
        os.mkdir(output_path)
    except OSError as error:
        logger.error(error)

    with open(output_path + 'stats.txt', 'w') as f:
        f.write(str(total_inference_time) + '\n')
        f.write(str(fps) + '\n')
        f.write(str(total_model_load_time) + '\n')

    logger.info('Model load time: ' + str(total_model_load_time))
    logger.info('Inference time: ' + str(total_inference_time))
    logger.info('FPS: ' + str(fps))

    logger.info('Video stream ended')
    cv2.destroyAllWindows()
    feeder.close()
示例#26
0
def main():
    args = build_argparser().parse_args()

    frame_num = 0
    inference_time = 0
    counter = 0

    # Initialize the Inference Engine
    fd = FaceDetection()
    fld = Facial_Landmarks_Detection()
    ge = Gaze_Estimation()
    hp = Head_Pose_Estimation()

    # Load Models
    fd.load_model(args.face_detection_model, args.device, args.cpu_extension)
    fld.load_model(args.facial_landmark_model, args.device, args.cpu_extension)
    ge.load_model(args.gaze_estimation_model, args.device, args.cpu_extension)
    hp.load_model(args.head_pose_model, args.device, args.cpu_extension)

    # Mouse Controller precision and speed
    mc = MouseController('medium', 'fast')

    # feed input from an image, webcam, or video to model
    if args.input == "cam":
        feed = InputFeeder("cam")
    else:
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
        feed = InputFeeder("video", args.input)
    feed.load_data()
    frame_count = 0
    for frame in feed.next_batch():
        frame_count += 1
        inf_start = time.time()
        if frame is not None:
            try:
                key = cv2.waitKey(60)

                det_time = time.time() - inf_start

                # make predictions
                detected_face, face_coords = fd.predict(
                    frame.copy(), args.prob_threshold)
                hp_output = hp.predict(detected_face.copy())
                left_eye, right_eye, eye_coords = fld.predict(
                    detected_face.copy())
                new_mouse_coord, gaze_vector = ge.predict(
                    left_eye, right_eye, hp_output)

                stop_inference = time.time()
                inference_time = inference_time + stop_inference - inf_start
                counter = counter + 1

                # Visualization
                preview = args.visualization
                if preview:
                    preview_frame = frame.copy()
                    face_frame = detected_face.copy()

                    draw_face_bbox(preview_frame, face_coords)
                    display_hp(preview_frame, hp_output, face_coords)
                    draw_landmarks(face_frame, eye_coords)
                    draw_gaze(face_frame, gaze_vector, left_eye.copy(),
                              right_eye.copy(), eye_coords)

                if preview:
                    img = np.hstack((cv2.resize(preview_frame, (500, 500)),
                                     cv2.resize(face_frame, (500, 500))))
                else:
                    img = cv2.resize(frame, (500, 500))

                cv2.imshow('Visualization', img)

                # set speed
                if frame_count % 5 == 0:
                    mc.move(new_mouse_coord[0], new_mouse_coord[1])

                # INFO
                log.info("NUMBER OF FRAMES: {} ".format(frame_num))
                log.info("INFERENCE TIME: {}ms".format(det_time * 1000))

                frame_num += 1

                if key == 27:
                    break
            except:
                print(
                    'Not supported image or video file format. Please send in a supported video format.'
                )
                exit()
    feed.close()
def main():

    args = build_argparser().parse_args()
    visual = args.visual_flag
    log = logging.getLogger()
    input_source = args.input_source

    try:
        video_path = args.input_path
    except Exception as e:
        video_path = None
    feed = None
    if input_source.lower() == 'cam':
        feed = InputFeeder('cam')
    elif input_source.lower() == 'video' and os.path.isfile(video_path):
        feed = InputFeeder('video', video_path)
    else:
        log.error('Wrong input feed. (check the video path).')
        exit(1)

    fd = Model_Face(args.face_detection_model, args.device, args.extension)
    hp = Model_HeadPose(args.head_pose_model, args.device, args.extension)
    fl = Model_Faciallandmark(args.facial_landmarks_model, args.device,
                              args.extension)
    ga = Model_Gaze(args.gaze_model, args.device, args.extension)
    ### You can specify the value of precision and speed directly.
    ##  OR
    ## 'high'(100),'low'(1000),'medium','low-med' - precision
    ## 'fast'(1), 'slow'(10), 'medium', 'slow-med' - speed
    #     mouse = MouseController('low-med', 'slow-med')
    mouse = MouseController(500, 4)

    feed.load_data()

    # load models
    fd.load_model()
    hp.load_model()
    fl.load_model()
    ga.load_model()
    count = 0
    for ret, frame in feed.next_batch():
        if not ret:
            break
        count += 1
        if count % 5 == 0:
            cv2.imshow('video', cv2.resize(frame, (500, 500)))
        key = cv2.waitKey(60)
        frame_cp = frame.copy()
        face, face_position = fd.predict(frame_cp, args.threshold)
        if type(face) == int:
            log.error('Prediction Error: Cant find face.')
            if key == 27:
                break
            continue
        face_cp = face.copy()
        hp_output = hp.predict(face_cp)
        left_eye, right_eye, facial = fl.predict(face_cp)
        #         print('left',left_eye,'\n','right',right_eye,'\n')
        mouse_coord, gaze_vector = ga.predict(left_eye, right_eye, hp_output)

        if (not len(visual) == 0):
            visual_frame = frame.copy()
            ### Visual FLAGS
            # face detection
            if 'fd' in visual:
                visual_frame = face
            # Head pose
            if 'hp' in visual:
                cv2.putText(
                    visual_frame,
                    "Yaw: {:.2f} Pitch: {:.2f} Roll: {:.2f}".format(
                        hp_output[0], hp_output[1], hp_output[2]), (10, 20),
                    cv2.FONT_HERSHEY_COMPLEX, 0.3, (0, 255, 50), 1)
            # Facial landmarks
            if 'fl' in visual:
                cv2.rectangle(face, (facial[0][0] - 10, facial[0][1] - 10),
                              (facial[0][2] + 10, facial[0][3] + 10),
                              (255, 0, 0), 3)
                cv2.rectangle(face, (facial[1][0] - 10, facial[1][1] - 10),
                              (facial[1][2] + 10, facial[1][3] + 10),
                              (255, 0, 0), 3)
            # Gaze estimation
            if 'ga' in visual:
                x, y, w = int(gaze_vector[0] * 12), int(gaze_vector[1] *
                                                        12), 160
                le = cv2.line(left_eye.copy(), (x - w, y - w), (x + w, y + w),
                              (255, 255, 0), 2)
                cv2.line(le, (x - w, y + w), (x + w, y - w), (255, 50, 150), 2)
                re = cv2.line(right_eye.copy(), (x - w, y - w), (x + w, y + w),
                              (255, 255, 0), 2)
                cv2.line(re, (x - w, y + w), (x + w, y - w), (255, 50, 150), 2)
                face[facial[0][1]:facial[0][3], facial[0][0]:facial[0][2]] = le
                face[facial[1][1]:facial[1][3], facial[1][0]:facial[1][2]] = re
            cv2.namedWindow('Visualization', cv2.WINDOW_AUTOSIZE)
            cv2.moveWindow('Visualization', 900, 900)
            cv2.imshow('Visualization', cv2.resize(visual_frame, (500, 500)))
            if args.visual_save.lower() == 'y':
                if count % 10 == 0:
                    cv2.imwrite(str(count) + '_visual.jpg', visual_frame)
        if count % 5 == 0:
            mouse.move(mouse_coord[0], mouse_coord[1])
        if key == 27:
            break

    log.error('INFO: Ended!')
    cv2.destroyAllWindows()
    feed.close()
示例#28
0
def infer_on_stream(args, model):
    '''

    :param args: argparser arguments
    :param model: loaded model
    '''

    # get the loaded model instance
    objectDetection = model

    # Handle the input stream
    # Check if the input is a webcam or video or image
    if args.input == 'cam':
        feed = InputFeeder(input_type='cam', flip=1)
        feed.set_camera_properties(args.width, args.height, args.fps)
    elif args.input == 'picam':
        feed = InputFeeder(input_type='picam')
        feed.set_camera_properties(args.width, args.height, args.fps)
    elif args.input.endswith('.jpg') or args.input.endswith(
            '.bmp') or args.input.endswith('.png'):
        feed = InputFeeder(input_type='image', input_file=args.input)
    elif args.input.endswith('.mp4'):
        feed = InputFeeder(input_type='video', input_file=args.input)
    else:
        print(
            "ERROR: Invalid input, it must be CAM, image (.jpg, .bmp or .png) or video (.mp4)!"
        )
        raise NotImplementedError

    feed.load_data()

    # run-time switches
    ui_marking = True
    fps_marking = False
    label_background_color = (125, 175, 75)
    label_text_color = (255, 255, 255)  # white text

    cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
    cv2.setWindowProperty("Frame", cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    # Start recording of output saving is enabled
    if args.save_output:
        now = datetime.datetime.now()
        out = cv2.VideoWriter(now.strftime("out-%Y%m%d-%H%M%S.avi"),
                              cv2.VideoWriter_fourcc(*'MJPG'), 15,
                              (args.width, args.height))

    for batch in feed.next_batch():
        if batch is None:
            continue
        # start measuring overall execution time
        start_processing_time = time.time()
        # 1) First detect objects on the image
        start_object_infer_time = time.time()  # time measurement started
        objects = objectDetection.predict(batch)
        total_object_infer_time = time.time(
        ) - start_object_infer_time  # time measurement finished

        # executed only if there are objects on the image
        if len(objects) > 0:

            # if UI marking is turned on draw the vectors, rectangles, etc
            if ui_marking:
                # objects bounding boxes
                for obj in objects:
                    # draw the bounding box
                    cv2.rectangle(batch, (obj['xmin'], obj['ymin']),
                                  (obj['xmax'], obj['ymax']), obj['color'], 2)
                    # prepare the label
                    label_text = f"{obj['class']}: {obj['confidence']*100:.3}%"
                    label_size = cv2.getTextSize(label_text,
                                                 cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                                                 1)[0]
                    label_left = obj['xmin']
                    label_top = obj['ymin'] - label_size[1]
                    if (label_top < 1):
                        label_top = 1
                    label_right = label_left + label_size[0]
                    label_bottom = label_top + label_size[1] - 3
                    cv2.rectangle(batch, (label_left - 1, label_top - 6),
                                  (label_right + 1, label_bottom + 1),
                                  label_background_color, -1)
                    cv2.putText(batch, label_text, (label_left, label_bottom),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                                label_text_color, 1)

        # Measure overall FPS
        total_processing_time = time.time() - start_processing_time
        if total_processing_time == 0:
            total_processing_time = 0.001  # handle zero division
        total_fps = 1 / (total_processing_time)

        # if FPS marking run time switch is turned on print some details on the image
        if fps_marking:
            label_text = f"FPS: {total_fps:.3}"
            cv2.putText(batch, label_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 255), 1)
            label_text = f"Object detection inference time: {total_object_infer_time*1000:.4}ms"
            cv2.putText(batch, label_text, (10, 50), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 255), 1)

        # Show the output image and save the output video
        cv2.imshow('Frame', batch)
        if args.save_output:
            out.write(batch)

        # Press q on keyboard to exit
        # Press r on keyboard to toggle roll compensation
        # Press u on keyboard to toggle ui drawings
        # Press f on keyboard to fps drawings
        ret = cv2.waitKey(20)
        if ret & 0xFF == ord('q'):
            break
        elif ret & 0xFF == ord('u'):
            ui_marking = not ui_marking
        elif ret & 0xFF == ord('f'):
            fps_marking = not fps_marking

    # close the feed when stopping and finish the video saving
    #feed.close()
    if args.save_output:
        out.release()
def main():

    # Grab command line args
    args = build_argparser().parse_args()
    flags = args.models_outputs_flags

    logger = logging.getLogger()
    input_file_path = args.input
    input_feeder = None
    if input_file_path.lower() == "cam":
        input_feeder = InputFeeder("cam")
    else:
        if not os.path.isfile(input_file_path):
            logger.error("Unable to find specified video file")
            exit(1)
        input_feeder = InputFeeder("video", input_file_path)

    model_path_dict = {
        'FaceDetection': args.face_detection_model,
        'FacialLandmarks': args.facial_landmarks_model,
        'GazeEstimation': args.gaze_estimation_model,
        'HeadPoseEstimation': args.head_pose_estimation_model
    }

    for file_name_key in model_path_dict.keys():
        if not os.path.isfile(model_path_dict[file_name_key]):
            logger.error("Unable to find specified " + file_name_key +
                         " xml file")
            exit(1)

    fdm = FaceDetection(model_path_dict['FaceDetection'], args.device,
                        args.cpu_extension)
    flm = FacialLandmarks(model_path_dict['FacialLandmarks'], args.device,
                          args.cpu_extension)
    gem = GazeEstimation(model_path_dict['GazeEstimation'], args.device,
                         args.cpu_extension)
    hpem = HeadPoseEstimation(model_path_dict['HeadPoseEstimation'],
                              args.device, args.cpu_extension)

    mc = MouseController('medium', 'fast')

    input_feeder.load_data()
    fdm.load_model()
    flm.load_model()
    hpem.load_model()
    gem.load_model()

    frame_count = 0
    for ret, frame in input_feeder.next_batch():
        if not ret:
            break
        frame_count += 1
        if frame_count % 5 == 0:
            cv2.imshow('video', cv2.resize(frame, (500, 500)))

        key = cv2.waitKey(60)
        cropped_face, face_coords = fdm.predict(frame, args.prob_threshold)
        if type(cropped_face) == int:
            logger.error("Unable to detect any face.")
            if key == 27:
                break
            continue

        hp_output = hpem.predict(cropped_face)

        left_eye_img, right_eye_img, eye_coords = flm.predict(cropped_face)

        new_mouse_coord, gaze_vector = gem.predict(left_eye_img, right_eye_img,
                                                   hp_output)

        if (not len(flags) == 0):
            preview_frame = frame
            if 'fd' in flags:
                preview_frame = cropped_face
            if 'fld' in flags:
                cv2.rectangle(cropped_face,
                              (eye_coords[0][0] - 10, eye_coords[0][1] - 10),
                              (eye_coords[0][2] + 10, eye_coords[0][3] + 10),
                              (0, 255, 0), 3)
                cv2.rectangle(cropped_face,
                              (eye_coords[1][0] - 10, eye_coords[1][1] - 10),
                              (eye_coords[1][2] + 10, eye_coords[1][3] + 10),
                              (0, 255, 0), 3)

            if 'hp' in flags:
                cv2.putText(
                    preview_frame,
                    "Pose Angles: yaw:{:.2f} | pitch:{:.2f} | roll:{:.2f}".
                    format(hp_output[0], hp_output[1], hp_output[2]), (10, 20),
                    cv2.FONT_HERSHEY_COMPLEX, 0.25, (0, 255, 0), 1)
            if 'ge' in flags:
                x, y, w = int(gaze_vector[0] * 12), int(gaze_vector[1] *
                                                        12), 160
                left_eye = cv2.line(left_eye_img, (x - w, y - w),
                                    (x + w, y + w), (255, 0, 255), 2)
                cv2.line(left_eye, (x - w, y + w), (x + w, y - w),
                         (255, 0, 255), 2)
                right_eye = cv2.line(right_eye_img, (x - w, y - w),
                                     (x + w, y + w), (255, 0, 255), 2)
                cv2.line(right_eye, (x - w, y + w), (x + w, y - w),
                         (255, 0, 255), 2)
                cropped_face[eye_coords[0][1]:eye_coords[0][3],
                             eye_coords[0][0]:eye_coords[0][2]] = left_eye
                cropped_face[eye_coords[1][1]:eye_coords[1][3],
                             eye_coords[1][0]:eye_coords[1][2]] = right_eye

            cv2.imshow("Visualization", cv2.resize(preview_frame, (500, 500)))

        if frame_count % 5 == 0:
            mc.move(new_mouse_coord[0], new_mouse_coord[1])
        if key == 27:
            break
    logger.error("VideoStream ended...")
    cv2.destroyAllWindows()
    input_feeder.close()
def main(args):

    # getting the arguments
    if args.get_perf_counts.lower() == "true":
        perf_counts = True
    elif args.get_perf_counts.lower() == "false":
        perf_counts = False
    precision = args.precision.lower()
    speed = args.speed.lower()
    media_type = args.media_type.lower()
    media_path = args.media_file
    toggle_ui = args.show_video
    print(toggle_ui)
    batch_size = args.batch_size
    device = args.device
    iterations = 1 if media_type == "cam" else int(args.iterations)

    #initialize the mouse object
    mouse = MouseController(precision, speed)

    # Initialize the input feeder
    feed = InputFeeder(media_type, batch_size, media_path)

    # Initialize and load the inference models
    model = Model(face_detection, facial_landmarks, gaze_estimation,
                  head_pose_estimation, device)
    model.load_models()

    for _ in range(iterations):

        feed.load_data()

        #This will be used as a way to keep track of the average time for the preprocessing and inference of the models
        times = np.zeros((8, ))
        counter_frames = 0

        if media_type != "image":
            width = feed.cap.get(3)
            height = feed.cap.get(4)
        else:
            height, width, _ = feed.cap.shape
        try:
            for frame in feed.next_batch(media_type):
                counter_frames += 1
                #generates the prediction
                x, y, gaze_vector, times = model.predict(
                    frame, width, height, times)
                #generates the movement on the cursor
                mouse.move(x, y)

                if perf_counts:
                    cv2.putText(
                        frame, "Preprocess Face Detection: " +
                        str(times[0] / counter_frames * 1000) + " ms", (0, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Face Detection: " +
                        str(times[1] / counter_frames * 1000) + " ms",
                        (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Preprocess Facial Landmarks: " +
                        str(times[2] / counter_frames * 1000) + " ms",
                        (0, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Facial Landmarks: " +
                        str(times[3] / counter_frames * 1000) + " ms",
                        (0, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Preprocess Head Pose: " +
                        str(times[4] / counter_frames * 1000) + " ms",
                        (0, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Head Pose: " +
                        str(times[5] / counter_frames * 1000) + " ms",
                        (0, 300), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Preprocess Gaze Estimation: " +
                        str(times[6] / counter_frames * 1000) + " ms",
                        (0, 350), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    cv2.putText(
                        frame, "Inference Gaze Estimation: " +
                        str(times[7] / counter_frames * 1000) + " ms",
                        (0, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0), 3)
                    print("Preprocess Face Detection: " +
                          str(times[0] / counter_frames * 1000) + " ms")
                    print("Inference Face Detection: " +
                          str(times[1] / counter_frames * 1000) + " ms")
                    print("Preprocess Facial Landmarks: " +
                          str(times[2] / counter_frames * 1000) + " ms")
                    print("Inference Facial Landmarks: " +
                          str(times[3] / counter_frames * 1000) + " ms")
                    print("Preprocess Head Pose: " +
                          str(times[4] / counter_frames * 1000) + " ms")
                    print("Inference Head Pose: " +
                          str(times[5] / counter_frames * 1000) + " ms")
                    print("Preprocess Gaze Estimation: " +
                          str(times[6] / counter_frames * 1000) + " ms")
                    print("Inference Gaze Estimation: " +
                          str(times[7] / counter_frames * 1000) + " ms")

                if toggle_ui == True:
                    cv2.imshow("Frame", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                if cv2.waitKey(1) & 0xFF == ord('i'):
                    toggle_UI = False if toggle_UI else True

        except:
            print("Video has ended or couldn't continue")
        if perf_counts:
            print("Final average: ")
            print("Preprocess Face Detection: " +
                  str(times[0] / counter_frames * 1000) + " ms")
            print("Inference Face Detection: " +
                  str(times[1] / counter_frames * 1000) + " ms")
            print("Preprocess Facial Landmarks: " +
                  str(times[2] / counter_frames * 1000) + " ms")
            print("Inference Facial Landmarks: " +
                  str(times[3] / counter_frames * 1000) + " ms")
            print("Preprocess Head Pose: " +
                  str(times[4] / counter_frames * 1000) + " ms")
            print("Inference Head Pose: " +
                  str(times[5] / counter_frames * 1000) + " ms")
            print("Preprocess Gaze Estimation: " +
                  str(times[6] / counter_frames * 1000) + " ms")
            print("Inference Gaze Estimation: " +
                  str(times[7] / counter_frames * 1000) + " ms")
        feed.close()
        cv2.destroyAllWindows()