def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_hp, args.d_reid]) self.context = InferenceContext() context = self.context context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib) for d in used_devices: context.get_plugin(d).set_config( {"PERF_COUNT": "YES" if args.perf_stats else "NO"}) log.info("Loading models") face_detector_net = self.load_model(args.m_fd) landmarks_net = self.load_model(args.m_lm) head_pose_net = self.load_model(args.m_hp) # face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(landmarks_net) self.head_pose_detector = HeadPoseDetector(head_pose_net) self.face_detector.deploy(args.d_fd, context) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE) self.head_pose_detector.deploy(args.d_hp, context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded")
class FrameProcessor: QUEUE_SIZE = 16 def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_hp, args.d_reid]) self.context = InferenceContext() context = self.context context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib) for d in used_devices: context.get_plugin(d).set_config( {"PERF_COUNT": "YES" if args.perf_stats else "NO"}) log.info("Loading models") face_detector_net = self.load_model(args.m_fd) landmarks_net = self.load_model(args.m_lm) head_pose_net = self.load_model(args.m_hp) # face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(landmarks_net) self.head_pose_detector = HeadPoseDetector(head_pose_net) self.face_detector.deploy(args.d_fd, context) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE) self.head_pose_detector.deploy(args.d_hp, context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded") def load_model(self, model_path): model_path = osp.abspath(model_path) model_description_path = model_path model_weights_path = osp.splitext(model_path)[0] + ".bin" log.info("Loading the model from '%s'" % (model_description_path)) assert osp.isfile(model_description_path), \ "Model description is not found at '%s'" % (model_description_path) assert osp.isfile(model_weights_path), \ "Model weights are not found at '%s'" % (model_weights_path) model = IENetwork(model_description_path, model_weights_path) log.info("Model is loaded") return model def process(self, frame): # print("frame.shape--", frame.shape) assert len(frame.shape) == 3, \ "Expected input frame in (H, W, C) format" assert frame.shape[2] in [3, 4], \ "Expected BGR or BGRA input" orig_image = frame.copy() frame = frame.transpose((2, 0, 1)) # HWC to CHW # print("frame.shape--", frame.shape) frame = np.expand_dims(frame, axis=0) # print("frame.shape--", frame.shape) self.face_detector.clear() self.landmarks_detector.clear() self.head_pose_detector.clear() # self.face_identifier.clear() self.face_detector.start_async(frame) rois = self.face_detector.get_roi_proposals(frame) if self.QUEUE_SIZE < len(rois): log.warning("Too many faces for processing." \ " Will be processed only %s of %s." % \ (self.QUEUE_SIZE, len(rois))) rois = rois[:self.QUEUE_SIZE] self.landmarks_detector.start_async(frame, rois) self.head_pose_detector.start_async(frame, rois) landmarks = self.landmarks_detector.get_landmarks() head_pose = self.head_pose_detector.get_head_pose() outputs = [rois, landmarks, head_pose, 'test'] return outputs def get_performance_stats(self): stats = { 'face_detector': self.face_detector.get_performance_stats(), 'landmarks': self.landmarks_detector.get_performance_stats(), 'face_identifier': self.face_identifier.get_performance_stats(), } return stats