예제 #1
0
def test_face_detection():
    model = Face_Detection("models/intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml")
    model.load_model()
    image = cv2.imread("media/sample.png")
    height, width, _ = image.shape
    box_coords = model.predict(image)
    count = 0
    face = None
    for box in box_coords:
        count += 1
        xmin = int(box[0] * width)
        ymin = int(box[1] * height)
        xmax = int(box[2] * width)
        ymax = int(box[3] * height)
        face = image[ymin:ymax, xmin:xmax]
        cv2.imwrite("bin/face" + str(count) + ".jpg", face)
        cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 1)
    
    cv2.imshow("Result", image)
    cv2.waitKey()
예제 #2
0
def main(args):
    
    
    input_type=args.t
    input_files=args.l
    flags=args.f
    
    face_detect=Face_Detection(face_model_path, args.d, args.p, args.e)

    face_detect.load_model()

    landmarks_model=LandmarksDetection(landmarks_model_path, args.d, args.e)

    landmarks_model.load_model()

    head_pose=Head_Pose(hpose_model_path, args.d, args.e)
    head_pose.load_model()

    gaze_estimation=Gaze_Estimation(gaze_model_path, args.d, args.e)
    gaze_estimation.load_model()

    if input_type == 'cam':
        feeder = InputFeeder(input_type='cam')
    else:
        if not os.path.isfile(input_files):
            logging.error("Could not find the input file")
            exit(1)
        feed= InputFeeder(input_type='video', input_file=input_files)
    #feed=InputFeeder(input_type=input_type, input_file= input_files)

    
    try:
        feed.load_data()
    except Exception:
        logging.error("Could not load data from input file", exc_info=True)
    
    
    
    for batch in feed.next_batch():
        
        try:
            
            cropped_face, coords=face_detect.predict(batch)
            
            if type(cropped_face) == int:
                logging.info("Face not detected")
                if key == 27:
                    break
                continue
            
            cropped_left_eye, cropped_right_eye, left_eye_cord, right_eye_cord = landmarks_model.predict(cropped_face)
            head_angles = head_pose.predict(cropped_face)
            x,y = gaze_estimation.predict(cropped_left_eye, cropped_right_eye, head_angles)
        
        except Exception:
            logging.error("An error occured while running predictions", exc_info=True)
        
        if flags != 0:
            
        
            if flags == 'FD':
                cv2.rectangle(batch, (coords[0], coords[1]), (coords[2], coords[3]), (255, 0, 0), 3)
            if flags =='FL':
                cv2.rectangle(cropped_face, (left_eye_cord[0], left_eye_cord[1]), (left_eye_cord[2], left_eye_cord[3]), (255, 0, 0), 3)
                cv2.rectangle(cropped_face, (right_eye_cord[0], right_eye_cord[1]), (right_eye_cord[2], right_eye_cord[3]), (255, 0, 0), 3)
            if flags =='HP':
                cv2.putText(batch,
                "Head angles: yaw={:.2f} , pitch={:.2f}, roll={:.2f}".format(
                    head_angles[0], head_angles[1], head_angles[2]),
                            (20, 40),
                            cv2.FONT_HERSHEY_COMPLEX,
                            1, (255, 0, 255), 2)
            if flags == 'GE':
                
                left_eye_mid_x= (left_eye_cord[2]-left_eye_cord[0])/2 + left_eye_cord[0]
                left_eye_mid_y=(left_eye_cord[3]-left_eye_cord[1])/2 + left_eye_cord[1]
                
                right_eye_mid_x=(right_eye_cord[2]-right_eye_cord[0])/2 + right_eye_cord[0]
                right_eye_mid_y=(right_eye_cord[3]- right_eye_cord[1])/2 + right_eye_cord[1]
                
                left_eye_new_x=int(left_eye_mid_x + x*160)
                left_eye_new_y=int(left_eye_mid_y + y*160*-1)
                right_eye_new_x=int(right_eye_mid_x + x*160)
                right_eye_new_y=int(right_eye_mid_y + y*160*-1)
                cv2.line(cropped_face, (int(left_eye_mid_x), int(left_eye_mid_y)), (int(left_eye_new_x), int(left_eye_new_y)), (255, 0, 255), 5)
                cv2.line(cropped_face, (int(right_eye_mid_x), int(right_eye_mid_y)), (int(right_eye_new_x), int(right_eye_new_y)), (255, 0, 255), 5)
                
        

                
                
        mouse=MouseController(precision='low', speed='fast')
        mouse.move(x,y)    
        
        
        batch = imutils.resize(batch, width=500)
        cv2.imshow('frame', batch)
        key = cv2.waitKey(1) & 0xFF
    feed.close()
예제 #3
0
def main(args):
    fd = Face_Detection(
        "models/intel/face-detection-adas-binary-0001/FP32-INT1/face-detection-adas-binary-0001",
        args.device, args.extensions)
    start = time.time()
    fd.load_model()
    logging.info(f"------Loading Times {args.precision}------")
    logging.info("Face Detection: {:.5f} sec".format(time.time() - start))

    fl = Facial_Landmarks(
        f"models/intel/landmarks-regression-retail-0009/{args.precision}/landmarks-regression-retail-0009",
        args.device, args.extensions)
    start = time.time()
    fl.load_model()
    logging.info("Facial Landmarks: {:.5f} sec".format(time.time() - start))
    hp = Head_Pose_Estimation(
        f"models/intel/head-pose-estimation-adas-0001/{args.precision}/head-pose-estimation-adas-0001",
        args.device, args.extensions)
    start = time.time()
    hp.load_model()
    logging.info("Head Pose Estimation: {:.5f} sec".format(time.time() -
                                                           start))
    gs = Gaze_Estimation(
        f"models/intel/gaze-estimation-adas-0002/{args.precision}/gaze-estimation-adas-0002",
        args.device, args.extensions)
    start = time.time()
    gs.load_model()
    logging.info("Gaze Estimation: {:.5f} sec".format(time.time() - start))

    input_feed = InputFeeder(args.type, args.input)
    input_feed.load_data()

    mc = MouseController("high", "fast")

    inf_time = [0, 0, 0, 0, 0]  # fd, fl, hp, gs, frames
    for frame in input_feed.next_batch():

        if frame is not None:
            inf_time[4] += 1
            # face detection
            start = time.time()
            face_frame = fd.predict(frame.copy())
            inf_time[0] += time.time() - start
            # eye detection through facial landmarks
            start = time.time()
            left_eye_image, left_x, left_y, right_eye_image, right_x, right_y = fl.predict(
                face_frame)
            inf_time[1] += time.time() - start
            # head pose
            start = time.time()
            yaw, pitch, roll = hp.predict(face_frame)
            inf_time[2] += time.time() - start
            # gaze estimation
            start = time.time()
            gaze_vector = gs.predict(left_eye_image, right_eye_image,
                                     (yaw, pitch, roll))
            inf_time[3] += time.time() - start

            # mouse move
            mc.move(gaze_vector[0], gaze_vector[1])

            if args.visualize:
                face_frame = cv2.circle(face_frame, (right_x, right_y), 5,
                                        (255, 0, 0), -5)
                face_frame = cv2.circle(face_frame, (left_x, left_y), 5,
                                        (255, 0, 0), -5)
                cv2.putText(
                    face_frame,
                    "yaw:{:.2f} - pitch:{:.2f} - roll:{:.2f}".format(
                        yaw, pitch, roll), (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
                    0.3, (255, 0, 0), 1)
                cv2.putText(
                    face_frame,
                    "gaze-vector x:{:.2f} - y:{:.2f} - z:{:.2f}".format(
                        yaw, pitch, roll), (20, 40), cv2.FONT_HERSHEY_SIMPLEX,
                    0.3, (255, 0, 0), 1)
                cv2.imshow('left eye', left_eye_image)
                cv2.imshow('right eye', right_eye_image)
                x, y, z = gaze_vector
                cv2.arrowedLine(
                    face_frame, (left_x, left_y),
                    (left_x + int(x * 100), left_y + int(-y * 100)),
                    (0, 0, 255), 2)
                cv2.arrowedLine(
                    face_frame, (right_x, right_y),
                    (right_x + int(x * 100), right_y + int(-y * 100)),
                    (0, 0, 255), 2)
                cv2.imshow('face detection', face_frame)
                cv2.waitKey(60)

        else:
            break
    # inference benchmarks

    logging.info(f"------Inference Times {args.precision}------")
    logging.info("Face Detection: {:.5f} sec".format(inf_time[0] /
                                                     inf_time[4]))
    logging.info("Facial Landmarks: {:.5f} sec".format(inf_time[1] /
                                                       inf_time[4]))
    logging.info("Head Pose Estimation: {:.5f} sec".format(inf_time[2] /
                                                           inf_time[4]))
    logging.info("Gaze Estimation: {:.5f} sec".format(inf_time[3] /
                                                      inf_time[4]))
    input_feed.close()
    cv2.destroyAllWindows()
예제 #4
0
def infer_on_video(args):
    draw_flag = args.b
    device = args.device
    input_path = args.input_path
    input_type = args.input_type
    output_path = args.output_path
    precision = args.accuracy

    locations = {}

    locations[FACE_DETECTION_MODEL] = os.path.join(
        MODEL_PATH, FACE_DETECTION_MODEL, 'INT1',
        FACE_DETECTION_MODEL + ".xml")

    if precision is not None:
        log.info(
            "The face-detection-adas-binary-0001 always use INT1 precision")

    for model_name in [
            FACIAL_LANDMARKS_DETECTION_MODEL, HEAD_POSE_ESTIMATION_MODEL,
            GAZE_ESTIMATION_MODEL
    ]:
        locations[model_name] = find_exist_model_file(precision, model_name)

    # Initilize feeder
    feed = InputFeeder(input_type=input_type, input_file=input_path)
    feed.load_data()

    # Grab the shape of the input
    input_width = feed.getWidth()
    input_height = feed.getHeight()

    # Create a video writer for the output video
    # out = cv2.VideoWriter('../out.mp4', CODEC, 30, (input_width,input_height))

    mouse_controller = MouseController(MOUSE_PRECISION, MOUSE_SPEED)

    start_model_load_time = time.time()

    # model initialization
    face_detection = Face_Detection(locations[FACE_DETECTION_MODEL],
                                    device,
                                    extensions=CPU_EXTENSION)
    facial_landmarks_detection = Facial_Landmarks_Detection(
        locations[FACIAL_LANDMARKS_DETECTION_MODEL],
        device,
        extensions=CPU_EXTENSION)
    head_pose_estimation = Head_Pose_Estimation(
        locations[HEAD_POSE_ESTIMATION_MODEL],
        device,
        extensions=CPU_EXTENSION)
    gaze_estimation = Gaze_Estimation(locations[GAZE_ESTIMATION_MODEL],
                                      device,
                                      extensions=CPU_EXTENSION)

    total_model_load_time = time.time() - start_model_load_time

    counter = 0
    start_inference_time = time.time()

    # Process frames until the video ends, or process is exited
    for ret, batch in feed.next_batch(BATCH_SIZE):
        if not ret:
            break
        counter += 1
        gaze_lines = []
        out_frame = batch.copy()

        key = cv2.waitKey(60)

        # Face detection
        face_detection_output = face_detection.predict(batch)

        # face_detection_output = [ image_id, label, conf, xmin, ymin, xmax, ymax ]
        face_xmin = abs(int(face_detection_output[3] * input_width))
        face_ymin = abs(int(face_detection_output[4] * input_height))
        face_xmax = abs(int(face_detection_output[5] * input_width))
        face_ymax = abs(int(face_detection_output[6] * input_height))

        if (face_ymax - face_ymin) <= 0 or (face_xmax - face_xmin) <= 0:
            continue

        # Crop the face image
        face = batch[face_ymin:face_ymax, face_xmin:face_xmax]

        if draw_flag == True:
            cv2.rectangle(out_frame, (face_xmin, face_ymin),
                          (face_xmax, face_ymax), (255, 255, 0), 2)

        # Find facial landmarks (to find eyes)
        eyes = facial_landmarks_detection.predict(face)

        # Estimate head orientation (yaw=Y, pitch=X, role=Z)
        yaw, pitch, roll = head_pose_estimation.predict(face)

        eye_images = []
        for eye in eyes:
            face_height, face_width, _ = face.shape
            eye_xmin = int(eye[_X] * face_width - EYE_RADIUS)
            eye_ymin = int(eye[_Y] * face_height - EYE_RADIUS)
            eye_xmax = int(eye[_X] * face_width + EYE_RADIUS)
            eye_ymax = int(eye[_Y] * face_height + EYE_RADIUS)

            if (eye_ymax - eye_ymin) <= 0 or (eye_xmax - eye_xmin) <= 0:
                continue

            # crop and resize
            eye_images.append(face[eye_ymin:eye_ymax,
                                   eye_xmin:eye_xmax].copy())

            # Draw eye boundary boxes
            if draw_flag == True:
                cv2.rectangle(out_frame,
                              (eye_xmin + face_xmin, eye_ymin + face_ymin),
                              (eye_xmax + face_xmin, eye_ymax + face_ymin),
                              (0, 255, 0), 2)

        # gaze estimation
        gaze_vec_norm = gaze_estimation.predict(eye_images, [yaw, pitch, 0])

        cos = math.cos(math.radians(roll))
        sin = math.sin(math.radians(roll))
        tmpx = gaze_vec_norm[0] * cos + gaze_vec_norm[1] * sin
        tmpy = -gaze_vec_norm[0] * sin + gaze_vec_norm[1] * cos
        gaze_vec_norm = [tmpx, tmpy]

        # Store gaze line coordinations
        for eye in eyes:
            eye[_X] = int(eye[_X] * face_width)
            eye[_Y] = int(eye[_Y] * face_height)
            gaze_lines.append(
                get_gaze_line(eye, face_xmin, face_ymin, gaze_vec_norm))

        if draw_flag:
            # Drawing gaze lines
            for gaze_line in gaze_lines:
                start_point = (gaze_line[0][_X], gaze_line[0][_Y])
                end_point = (gaze_line[1][_X], gaze_line[1][_Y])

                draw_gaze_line(out_frame, start_point, end_point)

        # start point of middle gaze line
        start_point = ((gaze_lines[0][0][_X] + gaze_lines[1][0][_X]) / 2,
                       (gaze_lines[0][0][_Y] + gaze_lines[1][0][_Y]) / 2)

        # end point of middle gaze line
        end_point = ((gaze_lines[0][1][_X] + gaze_lines[1][1][_X]) / 2,
                     (gaze_lines[0][1][_Y] + gaze_lines[1][1][_Y]) / 2)

        gaze_mid_line = [start_point, end_point]

        mouse_point = get_mouse_point(gaze_mid_line, input_width, input_height)

        log.debug("mouse_point[_X], mouse_point[_Y]: %s, %s", mouse_point[_X],
                  mouse_point[_Y])

        # cv2.circle(out_frame, mouse_point, 10, (255, 255, 255), -1)
        mouse_controller.move(mouse_point[_X], mouse_point[_Y])

        # write out_frames with batch size
        for _ in range(BATCH_SIZE):
            cv2.imshow("video", out_frame)
            # out.write(out_frame)

        if key == 27:
            break

    total_inference_time = time.time() - start_inference_time
    total_inference_time = round(total_inference_time, 1)
    fps = counter / total_inference_time

    with open(os.path.join(output_path, 'stats.txt'), 'w') as f:
        f.write(str(total_inference_time) + '\n')
        f.write(str(fps) + '\n')
        f.write(str(total_model_load_time) + '\n')

    # Release the out writer, capture, and destroy any OpenCV windows
    log.info("Input stream ended...")
    cv2.destroyAllWindows()
    # out.release()
    feed.close()
예제 #5
0
class Computer_Pointer_Controller:
    def __init__(self, args):

        # load the objects corresponding to the models
        self.face_detection = Face_Detection(args.face_detection_model,
                                             args.device, args.extensions,
                                             args.perf_counts)
        self.gaze_estimation = Gaze_Estimation(args.gaze_estimation_model,
                                               args.device, args.extensions,
                                               args.perf_counts)
        self.head_pose_estimation = Head_Pose_Estimation(
            args.head_pose_estimation_model, args.device, args.extensions,
            args.perf_counts)
        self.facial_landmarks_detection = Facial_Landmarks_Detection(
            args.facial_landmarks_detection_model, args.device,
            args.extensions, args.perf_counts)

        start_models_load_time = time.time()
        self.face_detection.load_model()
        self.gaze_estimation.load_model()
        self.head_pose_estimation.load_model()
        self.facial_landmarks_detection.load_model()

        logger = logging.getLogger()
        input_T = args.input_type
        input_F = args.input_file

        if input_T.lower() == 'cam':
            # open the video feed
            self.feed = InputFeeder(args.input_type, args.input_file)
            self.feed.load_data()
        else:
            if not os.path.isfile(input_F):
                logger.error('Unable to find specified video file')
                exit(1)
            file_extension = input_F.split(".")[-1]
            if (file_extension in ['jpg', 'jpeg', 'bmp']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            elif (file_extension in ['avi', 'mp4']):
                self.feed = InputFeeder(args.input_type, args.input_file)
                self.feed.load_data()
            else:
                logger.error(
                    "Unsupported file Extension. Allowed ['jpg', 'jpeg', 'bmp', 'avi', 'mp4']"
                )
                exit(1)

        print("Models total loading time :",
              time.time() - start_models_load_time)

        # init mouse controller
        self.mouse_controller = MouseController('low', 'fast')

    def run(self):
        inferences_times = []
        face_detections_times = []
        for batch in self.feed.next_batch():
            if batch is None:
                break

            # as we want the webcam to act as a mirror, flip the frame
            batch = cv2.flip(batch, 1)

            inference_time = time.time()
            face = self.face_detection.predict(batch)
            if face is None:
                logger.error('Unable to detect the face.')
                continue
            else:
                face_detections_times.append(time.time() - inference_time)

                left_eye_image, right_eye_image = self.facial_landmarks_detection.predict(
                    face)
                if left_eye_image is None or right_eye_image is None:
                    continue
                head_pose_angles = self.head_pose_estimation.predict(face)
                if head_pose_angles is None:
                    continue
                vector = self.gaze_estimation.predict(left_eye_image,
                                                      right_eye_image,
                                                      head_pose_angles)
                inferences_times.append(time.time() - inference_time)
                if args.show_face == "True":
                    cv2.imshow("Detected face", face)
                    cv2.waitKey(1)
                self.mouse_controller.move(vector[0], vector[1])

        self.feed.close()
        cv2.destroyAllWindows()
        print("Average face detection inference time:",
              sum(face_detections_times) / len(face_detections_times))
        print("Average total inferences time:",
              sum(inferences_times) / len(inferences_times))
def infer_on_stream(args):

    network_fd = Face_Detection(args.face_detection_model, args.device)
    network_hp = Head_Pose_Estimation(args.head_pose_model, args.device)
    network_fl = Facial_Landmarks_Detection(args.facial_landmarks_model,
                                            args.device)
    network_ge = Gaze_Estimation(args.gaze_estimation_model, args.device)

    mouse_cont = MouseController(args.mouse_precision, args.mouse_speed)

    starting_loading = time.time()

    network_fd.load_model()
    network_hp.load_model()
    network_fl.load_model()
    network_ge.load_model()

    duration_loading = time.time() - starting_loading

    input_type = handle_input(args.input)

    feed = InputFeeder(input_type=input_type, input_file=args.input)

    feed.load_data()

    starting_inference = time.time()

    for flag, frame in feed.next_batch():
        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        out_frame, face, face_coords = network_fd.predict(
            frame, args.prob_threshold, args.display)

        if len(face_coords) == 0:
            log.error("There is no face in the stream!")
            continue

        out_frame, head_angle = network_hp.predict(out_frame, face,
                                                   face_coords, args.display)
        out_frame, eye_left, eye_right, eye_center = network_fl.predict(
            out_frame, face, face_coords, args.display)
        out_frame, gaze = network_ge.predict(out_frame, eye_left, eye_right,
                                             eye_center, head_angle,
                                             args.display)

        mouse_cont.move(gaze[0], gaze[1])

        if key_pressed == 27:
            break

        cv2.imshow('Visualization', cv2.resize(out_frame, (600, 400)))

    duration_inference = time.time() - starting_inference

    print("Total loading time is: {}\nTotal inference time is: {} ".format(
        duration_loading, duration_inference))

    feed.close()
    cv2.destroyAllWindows
예제 #7
0
    def main(args):
        ## loading models
        try:
            input_file = args.input
            mode_visualization = args.mode_visualization

            if input_file == "CAM":
                input_feeder = InputFeeder("cam")
            else:
                if not os.path.isfile(input_file):
                    log.error("ERROR: INPUT PATH IS NOT VALID")
                    exit(1)
                input_feeder = InputFeeder("video", input_file)

            face_detection_class = Face_Detection(
                model=args.face_detection,
                device=args.device,
                extensions=args.cpu_extension)
            face_landmarks_class = Landmarks_Detection(
                model=args.face_landmark,
                device=args.device,
                extensions=args.cpu_extension)
            head_pose_class = Head_Pose(model=args.head_pose,
                                        device=args.device,
                                        extensions=args.cpu_extension)
            gaze_estimation_class = Gaze_Estimation(
                model=args.gaze_estimation,
                device=args.device,
                extensions=args.cpu_extension)

            mouse_control = MouseController('medium', 'fast')
            start_time = time.time()

            ## Load the models one by one and all necessary info

            face_det_time = time.time()
            face_detection_class.load_model()
            print("Face Detection Load Time: time: {:.3f} ms".format(
                (time.time() - face_det_time) * 1000))

            face_land_time = time.time()
            face_landmarks_class.load_model()
            print("Facial landmarks load Time: time: {:.3f} ms".format(
                (time.time() - face_land_time) * 1000))

            head_po_time = time.time()
            head_pose_class.load_model()
            print("Head pose load time: time: {:.3f} ms".format(
                (time.time() - head_po_time) * 1000))

            gaze_est_time = time.time()
            gaze_estimation_class.load_model()
            print("Gaze estimation load time: time: {:.3f} ms".format(
                (time.time() - gaze_est_time) * 1000))

            total_time = time.time() - start_time
            print("Total loading time taken: time: {:.3f} ms".format(
                total_time * 1000))

            print("All models are loaded successfully..")

            input_feeder.load_data()
            print("Feeder is loaded")
        except:
            print('Error occured on loading models in app')

        ## performing inferences
        try:
            start_inference_time = time.time()
            frame_count = 0
            for flag, frame in input_feeder.next_batch():
                if not flag:
                    break
                frame_count += 1
                if frame_count == 0:
                    cv2.imshow('video', cv2.resize(frame, (700, 700)))

                key = cv2.waitKey(60)
                crop_face, face_coords = face_detection_class.predict(
                    frame.copy(), args.conf_threshold)
                if type(crop_face) == int:
                    log.error("Unable to detect the face.")
                    if key == 27:
                        break
                    continue

                ## perform inference
                head_angle = head_pose_class.predict(crop_face.copy())
                left_eye, right_eye, eye_coords = face_landmarks_class.predict(
                    crop_face.copy())
                mouse_position, gaze_vector = gaze_estimation_class.predict(
                    left_eye, right_eye, head_angle)

                ## checking for extra flags
                if (not len(mode_visualization) == 0):
                    p_frame = frame.copy()
                    if ('fd' in mode_visualization):
                        p_frame = crop_face
                    if ('fl' in mode_visualization):
                        cv2.rectangle(
                            crop_face,
                            (eye_coords[0][0] - 10, eye_coords[0][1] - 10),
                            (eye_coords[0][2] + 10, eye_coords[0][3] + 10),
                            (0, 255, 0), 1)
                        cv2.rectangle(
                            crop_face,
                            (eye_coords[1][0] - 10, eye_coords[1][1] - 10),
                            (eye_coords[1][2] + 10, eye_coords[1][3] + 10), (
                                0,
                                255,
                                0,
                            ), 1)

                    if ('hp' in mode_visualization):
                        cv2.putText(
                            p_frame,
                            "Head Positions: :{:.2f} :{:.2f} :{:.2f}".format(
                                head_angle[0], head_angle[1],
                                head_angle[2]), (10, 20),
                            cv2.FONT_HERSHEY_COMPLEX, 0.25, (0, 255, 0), 1)

                    if ('ge' in mode_visualization):
                        i, j, k = int(gaze_vector[0] * 12), int(
                            gaze_vector[1] * 12), 160

                        l_eye = cv2.line(left_eye.copy(), (i - k, j - k),
                                         (i + k, j + k), (0, 255, 255), 2)
                        cv2.line(l_eye, (i - k, j + k), (i + k, j - k),
                                 (255, 0, 255), 2)

                        r_eye = cv2.line(right_eye.copy(), (i - k, j - k),
                                         (i + k, j + k), (0, 255, 255), 2)
                        cv2.line(r_eye, (i - k, j + k), (i + k, j - k),
                                 (0, 255, 255), 2)

                        l_eye = crop_face[eye_coords[0][1]:eye_coords[0][3],
                                          eye_coords[0][0]:eye_coords[0][2]]
                        r_eye = crop_face[eye_coords[1][1]:eye_coords[1][3],
                                          eye_coords[1][0]:eye_coords[1][2]]

                    cv2.imshow("visual for client",
                               cv2.resize(p_frame, (700, 700)))

                if frame_count % 1 == 0:
                    mouse_control.move(mouse_position[0], mouse_position[1])
                if key == 27:
                    break
            ## working on inference time and frames per second
            total_infer_time = time.time() - start_inference_time
            frames_per_sec = int(frame_count) / total_infer_time

            print("Time counter: {:.3f} seconds".format(frame_count))
            print("Total inference time: {:.3f} seconds".format(
                total_infer_time))
            print("FPs: {:.3f} fps ".format(frames_per_sec))
        except:
            print('Error on performing inference in app file')

        print("All Done...")

        cv2.destroyAllWindows()
        input_feeder.close()
예제 #8
0
def model_pipelines(args):
    
    # Parameters which were parsed are assigned
    
    #device = args.dev
    #customLayers = args.lay
    inputFile = args.inp
    visual_flag = args.vf
    
    faceDetectionModel = args.mfd
    landmarksDetectionModel = args.mld
    headPoseEstimationModel = args.mhp
    gazeDetectionModel = args.mgd
    start_time = time.time()
    # Logging is enabled 
    log = logging.getLogger(__name__)
    
    log.info('----------THE BEGINNING----------')
    log.info('Start Time: {0}'. format(str(start_time))) 

    # The feed is initialised
    single_image = ['jpg','tif','png','jpeg', 'bmp']
    if inputFile.split(".")[-1].lower() in single_image:
        input_feed = InputFeeder('image', inputFile)
    elif args.inp == 'cam':
        input_feed = InputFeeder('cam')
    else:
        input_feed = InputFeeder('video', inputFile)

    # Feed data is loaded
    log.info('Loading data...')
    input_feed.load_data()
    log.info('Data Loaded. Beginning inference...')

    # The models are initialised and loaded here

    face_model_load_start_time = time.time()
    landmark_model_load_start_time = time.time()
    headpose_model_load_start_time = time.time()
    gaze_model_load_start_time = time.time()
    
    ppl_fd = Face_Detection(faceDetectionModel)
    ppl_fl = Facial_Landmarks_Detection(landmarksDetectionModel)
    ppl_hd = Head_Pose_Estimation(headPoseEstimationModel)
    ppl_ge = Gaze_Estimation(gazeDetectionModel)
    
    face_model_load_time = time.time() - face_model_load_start_time
    landmark_model_load_time = time.time() - landmark_model_load_start_time
    headpose_model_load_time = time.time() - headpose_model_load_start_time
    gaze_model_load_time = time.time() - gaze_model_load_start_time
    
    log.info('Face Detection object initialized')
    log.info('Facial Landmarks object initialized')
    log.info('Head Pose object initialized')
    log.info('Gaze object initialized')
    
    log.info('All models loaded and checked')
    
    load_time = [face_model_load_time, landmark_model_load_time, headpose_model_load_time, gaze_model_load_time]
      
    # count the number of frames
    frameCount = 0

    # collate frames from the feeder and feed into the detection pipelines
    for _, frame in input_feed.next_batch():

        if not _:
            break
        frameCount += 1
        
        if frameCount % 5 == 0:
            cv2.imshow('video', cv2.resize(frame, (500, 500)))

        key = cv2.waitKey(100)
        
        # Get the time for the model inference
        face_inference_start_time = time.time()
        face_crop = ppl_fd.predict(frame)
        face_inference_time = time.time() - face_inference_start_time
        
        if 'mfd' in visual_flag:
            cv2.imshow('The cropped face', face_crop)
            
        if type(face_crop) == int:
            log.info("No face can be detected")
            
            if key == 27:
                break
            
            continue
        
        # Get the time for the model inference
        landmark_inference_start_time = time.time()
        eye_image_left, eye_image_right, face_landmarked = ppl_fl.predict(face_crop.copy())
        landmark_inference_time = time.time() - landmark_inference_start_time
       
        # Get face landmark results
        if 'mld' in visual_flag:
            cv2.imshow('Face output', face_landmarked)
            
        if eye_image_left.any() == None or eye_image_right.any() == None:
            log.info("Landmarks could not be detected, check that the eyes are visible and the image is bright")
            continue
        
        # Get the time for the model inference
        headpose_inference_start_time = time.time()
        head_pose_angles, head_pose_image = ppl_hd.predict(face_crop.copy())   
        headpose_inference_time = time.time() - headpose_inference_start_time
        
        # Get head pose results
        if 'mhp' in visual_flag:
            cv2.imshow('Head Pose Angles', head_pose_image)
        
        # Get the time for the model inference
        gaze_inference_start_time = time.time()
        coord_x, coord_y = ppl_ge.predict(eye_image_left ,eye_image_right, head_pose_angles)
        gaze_inference_time = time.time() - gaze_inference_start_time

        # Get gaze detection results
        if 'mgd' in visual_flag:
            cv2.putText(face_landmarked, "Estimated x:{:.2f} | Estimated y:{:.2f}".format(coord_x, coord_y), (10,20), cv2.FONT_HERSHEY_COMPLEX, 0.25, (0,255,0),1)
            cv2.imshow('Gaze Estimation', face_landmarked)


        mCoord = MouseController('medium','fast')
        
        # Move the mouse based on the coordinates received
        if frameCount % 5 == 0:
            mCoord.move(coord_x, coord_y)

        if key == 27:
            break
        
        inference_time = [face_inference_time, landmark_inference_time, headpose_inference_time, gaze_inference_time]
        results(args, inference_time, load_time)
        
        if key == ord('x'):
            log.warning('KeyboardInterrupt: `X` was pressed')
            results(args, inference_time, load_time)
            sys.exit()
        
        
    log.info('End Time: {0}'. format(str(time.time() - start_time)))   
    log.info('----------THE END----------')
    cv2.destroyAllWindows()
    input_feed.close()
def main():

    try:
        logging.basicConfig(
            level=logging.INFO,
            format="%(asctime)s [%(levelname)s] %(message)s",
            handlers=[
                logging.FileHandler("Computer_Pointer_Controller.log"),
                logging.StreamHandler()
            ])
    except:
        print("File cannot be created")

    args = build_argparser()
    video_path = args.i
    visualize = args.flags
    count = 0
    fd_inference_time = 0
    fld_inference_time = 0
    hp_inference_time = 0
    ge_inference_time = 0

    MC = MouseController('medium', 'fast')

    logging.info("############## Model Load Time #############")

    start_time = time.time()
    first_model_time = start_time
    FD = Face_Detection(device=args.d, threshold=args.prob, extensions=args.l)
    FD.load_model(model_path=args.f)
    logging.info("Face Detection Model: {:.3f}ms".format(
        1000 * (time.time() - first_model_time)))

    second_model_time = time.time()
    FLD = Facial_Landmarks_Detection(device=args.d, extensions=args.l)
    FLD.load_model(model_path=args.fl)
    logging.info("Facial Landmarks Detection Model: {:.3f}ms".format(
        1000 * (time.time() - second_model_time)))

    third_model_time = time.time()
    HPE = Head_Pose_Estimation(device=args.d, extensions=args.l)
    HPE.load_model(model_path=args.hp)
    logging.info("Head Pose Estimation Model: {:.3f}ms".format(
        1000 * (time.time() - third_model_time)))

    fourth_model_time = time.time()
    GE = Gaze_Estimation(device=args.d, extensions=args.l)
    GE.load_model(model_path=args.g)
    logging.info("Gaze Estimation Model: {:.3f}ms".format(
        1000 * (time.time() - fourth_model_time)))
    logging.info("############## End ######################### ")

    Total_Model_Load_Time = 1000 * (time.time() - start_time)

    ##### LOADING VIDEO FILE #####

    if (video_path == "cam"):
        IF = InputFeeder("cam")
    else:
        IF = InputFeeder("video", video_path)
    IF.load_data()

    ##### MODEL INFERENCE #####

    start_inf_time = time.time()
    for flag, frame in IF.next_batch():

        if not flag:
            break

        if (count % 5 == 0):
            cv2.imshow('frame', cv2.resize(frame, (500, 500)))

        key = cv2.waitKey(60)

        count = count + 1

        start_time_1 = time.time()
        face, face_coordinates = FD.predict(frame, args.it)
        fd_inference_time += (time.time() - start_time_1)

        start_time_2 = time.time()
        left_eye_image, right_eye_image, eye_coordinates = FLD.predict(
            face, args.it)
        fld_inference_time += (time.time() - start_time_2)

        start_time_3 = time.time()
        head_pose_angles = HPE.predict(face, args.it)
        hp_inference_time += (time.time() - start_time_3)

        start_time_4 = time.time()
        mouse_coordinates, gaze_vector = GE.predict(left_eye_image,
                                                    right_eye_image,
                                                    head_pose_angles, args.it)
        ge_inference_time += (time.time() - start_time_4)

        if (len(visualize) != 0):
            frame_visualize = frame.copy()

            if ("fd" in visualize):
                if (len(visualize) == 1):
                    cv2.rectangle(frame_visualize,
                                  (face_coordinates[0], face_coordinates[1]),
                                  (face_coordinates[2], face_coordinates[3]),
                                  (255, 0, 255), 2)
                else:
                    frame_visualize = face.copy()

            if ("fld" in visualize):
                if not "fd" in visualize:
                    frame_visualize = face.copy()

                cv2.circle(frame_visualize, (eye_coordinates['left_eye'][0],
                                             eye_coordinates['left_eye'][1]),
                           25, (0, 0, 255), 2)
                cv2.circle(frame_visualize, (eye_coordinates['right_eye'][0],
                                             eye_coordinates['right_eye'][1]),
                           25, (0, 0, 255), 2)

            if ("hp" in visualize):
                cv2.putText(
                    frame_visualize,
                    "Pose Angles: yaw:{:.2f} | pitch:{:.2f} | roll:{:.2f}".
                    format(head_pose_angles[0], head_pose_angles[1],
                           head_pose_angles[2]), (10, 20),
                    cv2.FONT_HERSHEY_COMPLEX, 0.255, (0, 255, 0), 1)

            if ("ge" in visualize):
                h = face.shape[0]
                arrow = h * 0.7
                arrow_X = gaze_vector[0] * arrow
                arrow_Y = -gaze_vector[1] * arrow
                cv2.arrowedLine(
                    frame_visualize, (eye_coordinates['left_eye'][0],
                                      eye_coordinates['left_eye'][1]),
                    (int(eye_coordinates['left_eye'][0] + arrow_X),
                     int(eye_coordinates['left_eye'][1] + arrow_Y)),
                    (255, 0, 0), 2)
                cv2.arrowedLine(
                    frame_visualize, (eye_coordinates['right_eye'][0],
                                      eye_coordinates['right_eye'][1]),
                    (int(eye_coordinates['right_eye'][0] + arrow_X),
                     int(eye_coordinates['right_eye'][1] + arrow_Y)),
                    (255, 0, 0), 2)
            if (count % 5 == 0):

                cv2.imshow('Visualization',
                           cv2.resize(frame_visualize, (500, 500)))

        if (count % 5 == 0):
            MC.move(mouse_coordinates[0], mouse_coordinates[1])

        if key == 27:
            break

    Total_Inference_Time = time.time() - start_inf_time
    if (count > 0):
        logging.info("############## Models Inference time #######")
        logging.info("Face Detection:{:.3f}ms".format(
            1000 * fd_inference_time / count))
        logging.info("Facial Landmarks Detection:{:.3f}ms".format(
            1000 * fld_inference_time / count))
        logging.info("Headpose Estimation:{:.3f}ms".format(
            1000 * hp_inference_time / count))
        logging.info("Gaze Estimation:{:.3f}ms".format(
            1000 * ge_inference_time / count))
        logging.info("############## End #########################")

    logging.info("############## Summarized Results ##########")
    logging.info(
        "Total Model Load Time: {:.3f}ms".format(Total_Model_Load_Time))
    logging.info("Total Inference Time: {:.3f}s".format(Total_Inference_Time))
    logging.info("FPS:{}".format(count / Total_Inference_Time))
    logging.info("############ End ###########################")
    cv2.destroyAllWindows()
    IF.close()