def init_models(device="CPU"): # Using global variables, not defining new variables global face_detection global facial_landmarks_detection global head_pose_estimation global gaze_estimation start = time.time() face_detection = Face_Detection(path_face_detection, device) face_detection.load_model() fd_load_time = (time.time() - start) start = time.time() facial_landmarks_detection = Facial_Landmarks_Detection( path_facial_landmarks_detection, device) facial_landmarks_detection.load_model() fld_load_time = (time.time() - start) start = time.time() head_pose_estimation = Head_Pose_Estimation(path_head_pose_estimation, device) head_pose_estimation.load_model() hpe_load_time = (time.time() - start) start = time.time() gaze_estimation = Gaze_Estimation(path_gaze_estimation, device) gaze_estimation.load_model() ge_load_time = (time.time() - start) return (fd_load_time, fld_load_time, hpe_load_time, ge_load_time)
def init_models(device="CPU"): # Using global variables, not defining new variables global face_detection global facial_landmarks_detection global head_pose_estimation global gaze_estimation log.info("Loading Face Detection model...") face_detection = Face_Detection(path_face_detection, device) face_detection.load_model() log.info("DONE\n") log.info("Loading Face Landmarks Detection model...") facial_landmarks_detection = Facial_Landmarks_Detection( path_facial_landmarks_detection, device) facial_landmarks_detection.load_model() log.info("DONE\n") log.info("Loading Head Pose Estimation model...") head_pose_estimation = Head_Pose_Estimation(path_head_pose_estimation, device) head_pose_estimation.load_model() log.info("DONE\n") log.info("Loading Gaze Estimation model...") gaze_estimation = Gaze_Estimation(path_gaze_estimation, device) gaze_estimation.load_model() log.info("DONE\n")
def test_face_detection(): model = Face_Detection("models/intel/face-detection-adas-0001/FP16-INT8/face-detection-adas-0001.xml") model.load_model() image = cv2.imread("media/sample.png") height, width, _ = image.shape box_coords = model.predict(image) count = 0 face = None for box in box_coords: count += 1 xmin = int(box[0] * width) ymin = int(box[1] * height) xmax = int(box[2] * width) ymax = int(box[3] * height) face = image[ymin:ymax, xmin:xmax] cv2.imwrite("bin/face" + str(count) + ".jpg", face) cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 1) cv2.imshow("Result", image) cv2.waitKey()
def main(args): input_type=args.t input_files=args.l flags=args.f face_detect=Face_Detection(face_model_path, args.d, args.p, args.e) face_detect.load_model() landmarks_model=LandmarksDetection(landmarks_model_path, args.d, args.e) landmarks_model.load_model() head_pose=Head_Pose(hpose_model_path, args.d, args.e) head_pose.load_model() gaze_estimation=Gaze_Estimation(gaze_model_path, args.d, args.e) gaze_estimation.load_model() if input_type == 'cam': feeder = InputFeeder(input_type='cam') else: if not os.path.isfile(input_files): logging.error("Could not find the input file") exit(1) feed= InputFeeder(input_type='video', input_file=input_files) #feed=InputFeeder(input_type=input_type, input_file= input_files) try: feed.load_data() except Exception: logging.error("Could not load data from input file", exc_info=True) for batch in feed.next_batch(): try: cropped_face, coords=face_detect.predict(batch) if type(cropped_face) == int: logging.info("Face not detected") if key == 27: break continue cropped_left_eye, cropped_right_eye, left_eye_cord, right_eye_cord = landmarks_model.predict(cropped_face) head_angles = head_pose.predict(cropped_face) x,y = gaze_estimation.predict(cropped_left_eye, cropped_right_eye, head_angles) except Exception: logging.error("An error occured while running predictions", exc_info=True) if flags != 0: if flags == 'FD': cv2.rectangle(batch, (coords[0], coords[1]), (coords[2], coords[3]), (255, 0, 0), 3) if flags =='FL': cv2.rectangle(cropped_face, (left_eye_cord[0], left_eye_cord[1]), (left_eye_cord[2], left_eye_cord[3]), (255, 0, 0), 3) cv2.rectangle(cropped_face, (right_eye_cord[0], right_eye_cord[1]), (right_eye_cord[2], right_eye_cord[3]), (255, 0, 0), 3) if flags =='HP': cv2.putText(batch, "Head angles: yaw={:.2f} , pitch={:.2f}, roll={:.2f}".format( head_angles[0], head_angles[1], head_angles[2]), (20, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 255), 2) if flags == 'GE': left_eye_mid_x= (left_eye_cord[2]-left_eye_cord[0])/2 + left_eye_cord[0] left_eye_mid_y=(left_eye_cord[3]-left_eye_cord[1])/2 + left_eye_cord[1] right_eye_mid_x=(right_eye_cord[2]-right_eye_cord[0])/2 + right_eye_cord[0] right_eye_mid_y=(right_eye_cord[3]- right_eye_cord[1])/2 + right_eye_cord[1] left_eye_new_x=int(left_eye_mid_x + x*160) left_eye_new_y=int(left_eye_mid_y + y*160*-1) right_eye_new_x=int(right_eye_mid_x + x*160) right_eye_new_y=int(right_eye_mid_y + y*160*-1) cv2.line(cropped_face, (int(left_eye_mid_x), int(left_eye_mid_y)), (int(left_eye_new_x), int(left_eye_new_y)), (255, 0, 255), 5) cv2.line(cropped_face, (int(right_eye_mid_x), int(right_eye_mid_y)), (int(right_eye_new_x), int(right_eye_new_y)), (255, 0, 255), 5) mouse=MouseController(precision='low', speed='fast') mouse.move(x,y) batch = imutils.resize(batch, width=500) cv2.imshow('frame', batch) key = cv2.waitKey(1) & 0xFF feed.close()
def main(args): fd = Face_Detection( "models/intel/face-detection-adas-binary-0001/FP32-INT1/face-detection-adas-binary-0001", args.device, args.extensions) start = time.time() fd.load_model() logging.info(f"------Loading Times {args.precision}------") logging.info("Face Detection: {:.5f} sec".format(time.time() - start)) fl = Facial_Landmarks( f"models/intel/landmarks-regression-retail-0009/{args.precision}/landmarks-regression-retail-0009", args.device, args.extensions) start = time.time() fl.load_model() logging.info("Facial Landmarks: {:.5f} sec".format(time.time() - start)) hp = Head_Pose_Estimation( f"models/intel/head-pose-estimation-adas-0001/{args.precision}/head-pose-estimation-adas-0001", args.device, args.extensions) start = time.time() hp.load_model() logging.info("Head Pose Estimation: {:.5f} sec".format(time.time() - start)) gs = Gaze_Estimation( f"models/intel/gaze-estimation-adas-0002/{args.precision}/gaze-estimation-adas-0002", args.device, args.extensions) start = time.time() gs.load_model() logging.info("Gaze Estimation: {:.5f} sec".format(time.time() - start)) input_feed = InputFeeder(args.type, args.input) input_feed.load_data() mc = MouseController("high", "fast") inf_time = [0, 0, 0, 0, 0] # fd, fl, hp, gs, frames for frame in input_feed.next_batch(): if frame is not None: inf_time[4] += 1 # face detection start = time.time() face_frame = fd.predict(frame.copy()) inf_time[0] += time.time() - start # eye detection through facial landmarks start = time.time() left_eye_image, left_x, left_y, right_eye_image, right_x, right_y = fl.predict( face_frame) inf_time[1] += time.time() - start # head pose start = time.time() yaw, pitch, roll = hp.predict(face_frame) inf_time[2] += time.time() - start # gaze estimation start = time.time() gaze_vector = gs.predict(left_eye_image, right_eye_image, (yaw, pitch, roll)) inf_time[3] += time.time() - start # mouse move mc.move(gaze_vector[0], gaze_vector[1]) if args.visualize: face_frame = cv2.circle(face_frame, (right_x, right_y), 5, (255, 0, 0), -5) face_frame = cv2.circle(face_frame, (left_x, left_y), 5, (255, 0, 0), -5) cv2.putText( face_frame, "yaw:{:.2f} - pitch:{:.2f} - roll:{:.2f}".format( yaw, pitch, roll), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1) cv2.putText( face_frame, "gaze-vector x:{:.2f} - y:{:.2f} - z:{:.2f}".format( yaw, pitch, roll), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1) cv2.imshow('left eye', left_eye_image) cv2.imshow('right eye', right_eye_image) x, y, z = gaze_vector cv2.arrowedLine( face_frame, (left_x, left_y), (left_x + int(x * 100), left_y + int(-y * 100)), (0, 0, 255), 2) cv2.arrowedLine( face_frame, (right_x, right_y), (right_x + int(x * 100), right_y + int(-y * 100)), (0, 0, 255), 2) cv2.imshow('face detection', face_frame) cv2.waitKey(60) else: break # inference benchmarks logging.info(f"------Inference Times {args.precision}------") logging.info("Face Detection: {:.5f} sec".format(inf_time[0] / inf_time[4])) logging.info("Facial Landmarks: {:.5f} sec".format(inf_time[1] / inf_time[4])) logging.info("Head Pose Estimation: {:.5f} sec".format(inf_time[2] / inf_time[4])) logging.info("Gaze Estimation: {:.5f} sec".format(inf_time[3] / inf_time[4])) input_feed.close() cv2.destroyAllWindows()
class Computer_Pointer_Controller: def __init__(self, args): # load the objects corresponding to the models self.face_detection = Face_Detection(args.face_detection_model, args.device, args.extensions, args.perf_counts) self.gaze_estimation = Gaze_Estimation(args.gaze_estimation_model, args.device, args.extensions, args.perf_counts) self.head_pose_estimation = Head_Pose_Estimation( args.head_pose_estimation_model, args.device, args.extensions, args.perf_counts) self.facial_landmarks_detection = Facial_Landmarks_Detection( args.facial_landmarks_detection_model, args.device, args.extensions, args.perf_counts) start_models_load_time = time.time() self.face_detection.load_model() self.gaze_estimation.load_model() self.head_pose_estimation.load_model() self.facial_landmarks_detection.load_model() logger = logging.getLogger() input_T = args.input_type input_F = args.input_file if input_T.lower() == 'cam': # open the video feed self.feed = InputFeeder(args.input_type, args.input_file) self.feed.load_data() else: if not os.path.isfile(input_F): logger.error('Unable to find specified video file') exit(1) file_extension = input_F.split(".")[-1] if (file_extension in ['jpg', 'jpeg', 'bmp']): self.feed = InputFeeder(args.input_type, args.input_file) self.feed.load_data() elif (file_extension in ['avi', 'mp4']): self.feed = InputFeeder(args.input_type, args.input_file) self.feed.load_data() else: logger.error( "Unsupported file Extension. Allowed ['jpg', 'jpeg', 'bmp', 'avi', 'mp4']" ) exit(1) print("Models total loading time :", time.time() - start_models_load_time) # init mouse controller self.mouse_controller = MouseController('low', 'fast') def run(self): inferences_times = [] face_detections_times = [] for batch in self.feed.next_batch(): if batch is None: break # as we want the webcam to act as a mirror, flip the frame batch = cv2.flip(batch, 1) inference_time = time.time() face = self.face_detection.predict(batch) if face is None: logger.error('Unable to detect the face.') continue else: face_detections_times.append(time.time() - inference_time) left_eye_image, right_eye_image = self.facial_landmarks_detection.predict( face) if left_eye_image is None or right_eye_image is None: continue head_pose_angles = self.head_pose_estimation.predict(face) if head_pose_angles is None: continue vector = self.gaze_estimation.predict(left_eye_image, right_eye_image, head_pose_angles) inferences_times.append(time.time() - inference_time) if args.show_face == "True": cv2.imshow("Detected face", face) cv2.waitKey(1) self.mouse_controller.move(vector[0], vector[1]) self.feed.close() cv2.destroyAllWindows() print("Average face detection inference time:", sum(face_detections_times) / len(face_detections_times)) print("Average total inferences time:", sum(inferences_times) / len(inferences_times))
def infer_on_stream(args): network_fd = Face_Detection(args.face_detection_model, args.device) network_hp = Head_Pose_Estimation(args.head_pose_model, args.device) network_fl = Facial_Landmarks_Detection(args.facial_landmarks_model, args.device) network_ge = Gaze_Estimation(args.gaze_estimation_model, args.device) mouse_cont = MouseController(args.mouse_precision, args.mouse_speed) starting_loading = time.time() network_fd.load_model() network_hp.load_model() network_fl.load_model() network_ge.load_model() duration_loading = time.time() - starting_loading input_type = handle_input(args.input) feed = InputFeeder(input_type=input_type, input_file=args.input) feed.load_data() starting_inference = time.time() for flag, frame in feed.next_batch(): if not flag: break key_pressed = cv2.waitKey(60) out_frame, face, face_coords = network_fd.predict( frame, args.prob_threshold, args.display) if len(face_coords) == 0: log.error("There is no face in the stream!") continue out_frame, head_angle = network_hp.predict(out_frame, face, face_coords, args.display) out_frame, eye_left, eye_right, eye_center = network_fl.predict( out_frame, face, face_coords, args.display) out_frame, gaze = network_ge.predict(out_frame, eye_left, eye_right, eye_center, head_angle, args.display) mouse_cont.move(gaze[0], gaze[1]) if key_pressed == 27: break cv2.imshow('Visualization', cv2.resize(out_frame, (600, 400))) duration_inference = time.time() - starting_inference print("Total loading time is: {}\nTotal inference time is: {} ".format( duration_loading, duration_inference)) feed.close() cv2.destroyAllWindows
def main(args): ## loading models try: input_file = args.input mode_visualization = args.mode_visualization if input_file == "CAM": input_feeder = InputFeeder("cam") else: if not os.path.isfile(input_file): log.error("ERROR: INPUT PATH IS NOT VALID") exit(1) input_feeder = InputFeeder("video", input_file) face_detection_class = Face_Detection( model=args.face_detection, device=args.device, extensions=args.cpu_extension) face_landmarks_class = Landmarks_Detection( model=args.face_landmark, device=args.device, extensions=args.cpu_extension) head_pose_class = Head_Pose(model=args.head_pose, device=args.device, extensions=args.cpu_extension) gaze_estimation_class = Gaze_Estimation( model=args.gaze_estimation, device=args.device, extensions=args.cpu_extension) mouse_control = MouseController('medium', 'fast') start_time = time.time() ## Load the models one by one and all necessary info face_det_time = time.time() face_detection_class.load_model() print("Face Detection Load Time: time: {:.3f} ms".format( (time.time() - face_det_time) * 1000)) face_land_time = time.time() face_landmarks_class.load_model() print("Facial landmarks load Time: time: {:.3f} ms".format( (time.time() - face_land_time) * 1000)) head_po_time = time.time() head_pose_class.load_model() print("Head pose load time: time: {:.3f} ms".format( (time.time() - head_po_time) * 1000)) gaze_est_time = time.time() gaze_estimation_class.load_model() print("Gaze estimation load time: time: {:.3f} ms".format( (time.time() - gaze_est_time) * 1000)) total_time = time.time() - start_time print("Total loading time taken: time: {:.3f} ms".format( total_time * 1000)) print("All models are loaded successfully..") input_feeder.load_data() print("Feeder is loaded") except: print('Error occured on loading models in app') ## performing inferences try: start_inference_time = time.time() frame_count = 0 for flag, frame in input_feeder.next_batch(): if not flag: break frame_count += 1 if frame_count == 0: cv2.imshow('video', cv2.resize(frame, (700, 700))) key = cv2.waitKey(60) crop_face, face_coords = face_detection_class.predict( frame.copy(), args.conf_threshold) if type(crop_face) == int: log.error("Unable to detect the face.") if key == 27: break continue ## perform inference head_angle = head_pose_class.predict(crop_face.copy()) left_eye, right_eye, eye_coords = face_landmarks_class.predict( crop_face.copy()) mouse_position, gaze_vector = gaze_estimation_class.predict( left_eye, right_eye, head_angle) ## checking for extra flags if (not len(mode_visualization) == 0): p_frame = frame.copy() if ('fd' in mode_visualization): p_frame = crop_face if ('fl' in mode_visualization): cv2.rectangle( crop_face, (eye_coords[0][0] - 10, eye_coords[0][1] - 10), (eye_coords[0][2] + 10, eye_coords[0][3] + 10), (0, 255, 0), 1) cv2.rectangle( crop_face, (eye_coords[1][0] - 10, eye_coords[1][1] - 10), (eye_coords[1][2] + 10, eye_coords[1][3] + 10), ( 0, 255, 0, ), 1) if ('hp' in mode_visualization): cv2.putText( p_frame, "Head Positions: :{:.2f} :{:.2f} :{:.2f}".format( head_angle[0], head_angle[1], head_angle[2]), (10, 20), cv2.FONT_HERSHEY_COMPLEX, 0.25, (0, 255, 0), 1) if ('ge' in mode_visualization): i, j, k = int(gaze_vector[0] * 12), int( gaze_vector[1] * 12), 160 l_eye = cv2.line(left_eye.copy(), (i - k, j - k), (i + k, j + k), (0, 255, 255), 2) cv2.line(l_eye, (i - k, j + k), (i + k, j - k), (255, 0, 255), 2) r_eye = cv2.line(right_eye.copy(), (i - k, j - k), (i + k, j + k), (0, 255, 255), 2) cv2.line(r_eye, (i - k, j + k), (i + k, j - k), (0, 255, 255), 2) l_eye = crop_face[eye_coords[0][1]:eye_coords[0][3], eye_coords[0][0]:eye_coords[0][2]] r_eye = crop_face[eye_coords[1][1]:eye_coords[1][3], eye_coords[1][0]:eye_coords[1][2]] cv2.imshow("visual for client", cv2.resize(p_frame, (700, 700))) if frame_count % 1 == 0: mouse_control.move(mouse_position[0], mouse_position[1]) if key == 27: break ## working on inference time and frames per second total_infer_time = time.time() - start_inference_time frames_per_sec = int(frame_count) / total_infer_time print("Time counter: {:.3f} seconds".format(frame_count)) print("Total inference time: {:.3f} seconds".format( total_infer_time)) print("FPs: {:.3f} fps ".format(frames_per_sec)) except: print('Error on performing inference in app file') print("All Done...") cv2.destroyAllWindows() input_feeder.close()
def main(): try: logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s", handlers=[ logging.FileHandler("Computer_Pointer_Controller.log"), logging.StreamHandler() ]) except: print("File cannot be created") args = build_argparser() video_path = args.i visualize = args.flags count = 0 fd_inference_time = 0 fld_inference_time = 0 hp_inference_time = 0 ge_inference_time = 0 MC = MouseController('medium', 'fast') logging.info("############## Model Load Time #############") start_time = time.time() first_model_time = start_time FD = Face_Detection(device=args.d, threshold=args.prob, extensions=args.l) FD.load_model(model_path=args.f) logging.info("Face Detection Model: {:.3f}ms".format( 1000 * (time.time() - first_model_time))) second_model_time = time.time() FLD = Facial_Landmarks_Detection(device=args.d, extensions=args.l) FLD.load_model(model_path=args.fl) logging.info("Facial Landmarks Detection Model: {:.3f}ms".format( 1000 * (time.time() - second_model_time))) third_model_time = time.time() HPE = Head_Pose_Estimation(device=args.d, extensions=args.l) HPE.load_model(model_path=args.hp) logging.info("Head Pose Estimation Model: {:.3f}ms".format( 1000 * (time.time() - third_model_time))) fourth_model_time = time.time() GE = Gaze_Estimation(device=args.d, extensions=args.l) GE.load_model(model_path=args.g) logging.info("Gaze Estimation Model: {:.3f}ms".format( 1000 * (time.time() - fourth_model_time))) logging.info("############## End ######################### ") Total_Model_Load_Time = 1000 * (time.time() - start_time) ##### LOADING VIDEO FILE ##### if (video_path == "cam"): IF = InputFeeder("cam") else: IF = InputFeeder("video", video_path) IF.load_data() ##### MODEL INFERENCE ##### start_inf_time = time.time() for flag, frame in IF.next_batch(): if not flag: break if (count % 5 == 0): cv2.imshow('frame', cv2.resize(frame, (500, 500))) key = cv2.waitKey(60) count = count + 1 start_time_1 = time.time() face, face_coordinates = FD.predict(frame, args.it) fd_inference_time += (time.time() - start_time_1) start_time_2 = time.time() left_eye_image, right_eye_image, eye_coordinates = FLD.predict( face, args.it) fld_inference_time += (time.time() - start_time_2) start_time_3 = time.time() head_pose_angles = HPE.predict(face, args.it) hp_inference_time += (time.time() - start_time_3) start_time_4 = time.time() mouse_coordinates, gaze_vector = GE.predict(left_eye_image, right_eye_image, head_pose_angles, args.it) ge_inference_time += (time.time() - start_time_4) if (len(visualize) != 0): frame_visualize = frame.copy() if ("fd" in visualize): if (len(visualize) == 1): cv2.rectangle(frame_visualize, (face_coordinates[0], face_coordinates[1]), (face_coordinates[2], face_coordinates[3]), (255, 0, 255), 2) else: frame_visualize = face.copy() if ("fld" in visualize): if not "fd" in visualize: frame_visualize = face.copy() cv2.circle(frame_visualize, (eye_coordinates['left_eye'][0], eye_coordinates['left_eye'][1]), 25, (0, 0, 255), 2) cv2.circle(frame_visualize, (eye_coordinates['right_eye'][0], eye_coordinates['right_eye'][1]), 25, (0, 0, 255), 2) if ("hp" in visualize): cv2.putText( frame_visualize, "Pose Angles: yaw:{:.2f} | pitch:{:.2f} | roll:{:.2f}". format(head_pose_angles[0], head_pose_angles[1], head_pose_angles[2]), (10, 20), cv2.FONT_HERSHEY_COMPLEX, 0.255, (0, 255, 0), 1) if ("ge" in visualize): h = face.shape[0] arrow = h * 0.7 arrow_X = gaze_vector[0] * arrow arrow_Y = -gaze_vector[1] * arrow cv2.arrowedLine( frame_visualize, (eye_coordinates['left_eye'][0], eye_coordinates['left_eye'][1]), (int(eye_coordinates['left_eye'][0] + arrow_X), int(eye_coordinates['left_eye'][1] + arrow_Y)), (255, 0, 0), 2) cv2.arrowedLine( frame_visualize, (eye_coordinates['right_eye'][0], eye_coordinates['right_eye'][1]), (int(eye_coordinates['right_eye'][0] + arrow_X), int(eye_coordinates['right_eye'][1] + arrow_Y)), (255, 0, 0), 2) if (count % 5 == 0): cv2.imshow('Visualization', cv2.resize(frame_visualize, (500, 500))) if (count % 5 == 0): MC.move(mouse_coordinates[0], mouse_coordinates[1]) if key == 27: break Total_Inference_Time = time.time() - start_inf_time if (count > 0): logging.info("############## Models Inference time #######") logging.info("Face Detection:{:.3f}ms".format( 1000 * fd_inference_time / count)) logging.info("Facial Landmarks Detection:{:.3f}ms".format( 1000 * fld_inference_time / count)) logging.info("Headpose Estimation:{:.3f}ms".format( 1000 * hp_inference_time / count)) logging.info("Gaze Estimation:{:.3f}ms".format( 1000 * ge_inference_time / count)) logging.info("############## End #########################") logging.info("############## Summarized Results ##########") logging.info( "Total Model Load Time: {:.3f}ms".format(Total_Model_Load_Time)) logging.info("Total Inference Time: {:.3f}s".format(Total_Inference_Time)) logging.info("FPS:{}".format(count / Total_Inference_Time)) logging.info("############ End ###########################") cv2.destroyAllWindows() IF.close()