def __init__(self, args): self.gpu_ext = args.gpu_lib self.allow_grow = args.allow_grow and not args.no_show log.info('OpenVINO Inference Engine') log.info('\tbuild: {}'.format(get_version())) core = Core() if args.cpu_lib and 'CPU' in {args.d_fd, args.d_lm, args.d_reid}: core.add_extension(args.cpu_lib, 'CPU') self.face_detector = FaceDetector(core, args.m_fd, args.fd_input_size, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(core, args.m_lm) self.face_identifier = FaceIdentifier(core, args.m_reid, match_threshold=args.t_id, match_algo=args.match_algo) self.face_detector.deploy(args.d_fd, self.get_config(args.d_fd)) self.landmarks_detector.deploy(args.d_lm, self.get_config(args.d_lm), self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, self.get_config(args.d_reid), self.QUEUE_SIZE) log.debug('Building faces database using images from {}'.format( args.fg)) self.faces_database = FacesDatabase( args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show) self.face_identifier.set_faces_database(self.faces_database) log.info('Database is built, registered {} identities'.format( len(self.faces_database)))
def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_reid]) self.context = InferenceContext() context = self.context context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib) for d in used_devices: context.get_plugin(d).set_config( {"PERF_COUNT": "YES" if args.perf_stats else "NO"}) log.info("Loading models") face_detector_net = self.load_model(args.m_fd) landmarks_net = self.load_model(args.m_lm) face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector( face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd, ) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=args.t_id) self.face_detector.deploy(args.d_fd, context) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded") if args.fc in "LOAD": self.faces_database = pickle.loads(open(args.fpl, "rb").read()) log.info("Face database loaded from {}.".format(args.fpl)) else: log.info("Building faces database using images from '%s'" % (args.fg)) self.faces_database = FacesDatabase( args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show, ) if args.fc in "SAVE": with open(args.fps, "wb") as f: f.write(pickle.dumps(self.faces_database)) f.close() log.info("Face database {} saved".format(args.fps)) self.face_identifier.set_faces_database(self.faces_database) log.info("Database is built, registered %s identities" % (len(self.faces_database))) self.allow_grow = args.allow_grow and not args.no_show
def __init__(self, varsd): used_devices = set([varsd["d_fd"], varsd["d_lm"], varsd["d_reid"]]) self.context = InferenceContext(used_devices, varsd["cpu_lib"], varsd["gpu_lib"], varsd["perf_stats"]) context = self.context log.info("Loading models") face_detector_net = self.load_model(varsd["m_fd"]) assert (varsd["fd_input_height"] and varsd["fd_input_width"]) or \ (varsd["fd_input_height"]==0 and varsd["fd_input_width"]==0), \ "Both -fd_iw and -fd_ih parameters should be specified for reshape" if varsd["fd_input_height"] and varsd["fd_input_width"]: face_detector_net.reshape({ "data": [1, 3, varsd["fd_input_height"], varsd["fd_input_width"]] }) landmarks_net = self.load_model(varsd["m_lm"]) face_reid_net = self.load_model(varsd["m_reid"]) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=varsd["t_fd"], roi_scale_factor=varsd["exp_r_fd"]) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=varsd["t_id"], match_algo=varsd["match_algo"]) self.face_detector.deploy(varsd["d_fd"], context) self.landmarks_detector.deploy(varsd["d_lm"], context, queue_size=self.QUEUE_SIZE) self.face_identifier.deploy(varsd["d_reid"], context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded") log.info("Building faces database using images from '%s'" % (varsd["fg"])) self.faces_database = FacesDatabase( varsd["fg"], self.face_identifier, self.landmarks_detector, self.face_detector if varsd["run_detector"] else None, varsd["no_show"]) self.face_identifier.set_faces_database(self.faces_database) log.info("Database is built, registered %s identities" % \ (len(self.faces_database))) self.allow_grow = varsd["allow_grow"] and not varsd["no_show"]
def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_reid]) self.context = InferenceContext() context = self.context context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib) for d in used_devices: context.get_plugin(d).set_config( {"PERF_COUNT": "YES" if args.perf_stats else "NO"}) log.info("Loading models") face_detector_net = self.load_model(args.m_fd) assert (args.fd_input_height and args.fd_input_width) or \ (args.fd_input_height==0 and args.fd_input_width==0), \ "Both -fd_iw and -fd_ih parameters should be specified for reshape" if args.fd_input_height and args.fd_input_width: face_detector_net.reshape( {"data": [1, 3, args.fd_input_height, args.fd_input_width]}) landmarks_net = self.load_model(args.m_lm) face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=args.t_id, match_algo=args.match_algo) self.face_detector.deploy(args.d_fd, context) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded") log.info("Building faces database using images from '%s'" % (args.fg)) self.faces_database = FacesDatabase( args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show) self.face_identifier.set_faces_database(self.faces_database) log.info("Database is built, registered %s identities" % \ (len(self.faces_database))) self.allow_grow = args.allow_grow and not args.no_show
def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_reid]) print (used_devices) self.context = InferenceContext() context = self.context log.info("Loading models") # create network face_detector_net = self.load_model(args.m_fd) landmarks_net = self.load_model(args.m_lm) face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=args.t_id, match_algo = args.match_algo) #load network onto devices self.face_detector.deploy(args.d_fd, context, enable_multi = False) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE, enable_multi = args.multi) self.face_identifier.deploy(args.d_reid, context, queue_size=self.QUEUE_SIZE, enable_multi = args.multi) log.info("Models are loaded") log.info("Building faces database using images from '%s'" % (args.fg)) self.faces_database = FacesDatabase(args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show) self.face_identifier.set_faces_database(self.faces_database) log.info("Database is built, registered %s identities" % \ (len(self.faces_database))) self.allow_grow = args.allow_grow and not args.no_show
def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_reid]) self.context = InferenceContext() context = self.context context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib) for d in used_devices: context.get_plugin(d).set_config({ "PERF_COUNT": "YES" if args.perf_stats else "NO"}) log.info("Loading models") face_detector_net = self.load_model(args.m_fd) landmarks_net = self.load_model(args.m_lm) face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=args.t_id) self.face_detector.deploy(args.d_fd, context) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded") log.info("Building faces database using images from '%s'" % (args.fg)) self.faces_database = FacesDatabase(args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show) self.face_identifier.set_faces_database(self.faces_database) log.info("Database is built, registered %s identities" % \ (len(self.faces_database))) # [print(s.label) for s in self.faces_database] self.allow_grow = args.allow_grow and not args.no_show
def __init__(self, args): self.gpu_ext = args.gpu_lib self.perf_count = args.perf_stats self.allow_grow = args.allow_grow and not args.no_show log.info('Initializing Inference Engine...') ie = IECore() if args.cpu_lib and 'CPU' in {args.d_fd, args.d_lm, args.d_reid}: log.info('Using CPU extensions library "{}"'.format(args.cpu_lib)) ie.add_extension(args.cpu_lib, 'CPU') log.info('Loading networks...') self.face_detector = FaceDetector(ie, args.m_fd, args.fd_input_size, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(ie, args.m_lm) self.face_identifier = FaceIdentifier(ie, args.m_reid, match_threshold=args.t_id, match_algo=args.match_algo) self.face_detector.deploy(args.d_fd, self.get_config(args.d_fd)) self.landmarks_detector.deploy(args.d_lm, self.get_config(args.d_lm), self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, self.get_config(args.d_reid), self.QUEUE_SIZE) log.info('Building faces database using images from "{}"'.format( args.fg)) self.faces_database = FacesDatabase( args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show) self.face_identifier.set_faces_database(self.faces_database) log.info('Database is built, registered {} identities'.format( len(self.faces_database)))
class FrameProcessor: QUEUE_SIZE = 16 def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_reid]) self.context = InferenceContext(used_devices, args.cpu_lib, args.gpu_lib, args.perf_stats) context = self.context log.info("Loading models") face_detector_net = self.load_model(args.m_fd) assert (args.fd_input_height and args.fd_input_width) or \ (args.fd_input_height==0 and args.fd_input_width==0), \ "Both -fd_iw and -fd_ih parameters should be specified for reshape" if args.fd_input_height and args.fd_input_width: face_detector_net.reshape( {"data": [1, 3, args.fd_input_height, args.fd_input_width]}) landmarks_net = self.load_model(args.m_lm) face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=args.t_id, match_algo=args.match_algo) self.face_detector.deploy(args.d_fd, context) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded") log.info("Building faces database using images from '%s'" % (args.fg)) self.faces_database = FacesDatabase( args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show) self.face_identifier.set_faces_database(self.faces_database) log.info("Database is built, registered %s identities" % \ (len(self.faces_database))) self.allow_grow = args.allow_grow and not args.no_show def load_model(self, model_path): model_path = osp.abspath(model_path) model_weights_path = osp.splitext(model_path)[0] + ".bin" log.info("Loading the model from '%s'" % (model_path)) assert osp.isfile(model_path), \ "Model description is not found at '%s'" % (model_path) assert osp.isfile(model_weights_path), \ "Model weights are not found at '%s'" % (model_weights_path) model = self.context.ie_core.read_network(model_path, model_weights_path) log.info("Model is loaded") return model def process(self, frame): assert len(frame.shape) == 3, \ "Expected input frame in (H, W, C) format" assert frame.shape[2] in [3, 4], \ "Expected BGR or BGRA input" orig_image = frame.copy() frame = frame.transpose((2, 0, 1)) # HWC to CHW frame = np.expand_dims(frame, axis=0) self.face_detector.clear() self.landmarks_detector.clear() self.face_identifier.clear() self.face_detector.start_async(frame) rois = self.face_detector.get_roi_proposals(frame) if self.QUEUE_SIZE < len(rois): log.warning("Too many faces for processing." \ " Will be processed only %s of %s." % \ (self.QUEUE_SIZE, len(rois))) rois = rois[:self.QUEUE_SIZE] self.landmarks_detector.start_async(frame, rois) landmarks = self.landmarks_detector.get_landmarks() self.face_identifier.start_async(frame, rois, landmarks) face_identities, unknowns = self.face_identifier.get_matches() if self.allow_grow and len(unknowns) > 0: for i in unknowns: # This check is preventing asking to save half-images in the boundary of images if rois[i].position[0] == 0.0 or rois[i].position[1] == 0.0 or \ (rois[i].position[0] + rois[i].size[0] > orig_image.shape[1]) or \ (rois[i].position[1] + rois[i].size[1] > orig_image.shape[0]): continue crop = orig_image[ int(rois[i].position[1]):int(rois[i].position[1] + rois[i].size[1]), int(rois[i].position[0]):int(rois[i].position[0] + rois[i].size[0])] name = self.faces_database.ask_to_save(crop) if name: id = self.faces_database.dump_faces( crop, face_identities[i].descriptor, name) face_identities[i].id = id outputs = [rois, landmarks, face_identities] return outputs def get_performance_stats(self): stats = { 'face_detector': self.face_detector.get_performance_stats(), 'landmarks': self.landmarks_detector.get_performance_stats(), 'face_identifier': self.face_identifier.get_performance_stats(), } return stats
class FrameProcessor: QUEUE_SIZE = 16 def __init__(self, args): self.gpu_ext = args.gpu_lib self.allow_grow = args.allow_grow and not args.no_show log.info('OpenVINO Inference Engine') log.info('\tbuild: {}'.format(get_version())) core = Core() if args.cpu_lib and 'CPU' in {args.d_fd, args.d_lm, args.d_reid}: core.add_extension(args.cpu_lib, 'CPU') self.face_detector = FaceDetector(core, args.m_fd, args.fd_input_size, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd) self.landmarks_detector = LandmarksDetector(core, args.m_lm) self.face_identifier = FaceIdentifier(core, args.m_reid, match_threshold=args.t_id, match_algo=args.match_algo) self.face_detector.deploy(args.d_fd, self.get_config(args.d_fd)) self.landmarks_detector.deploy(args.d_lm, self.get_config(args.d_lm), self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, self.get_config(args.d_reid), self.QUEUE_SIZE) log.debug('Building faces database using images from {}'.format( args.fg)) self.faces_database = FacesDatabase( args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show) self.face_identifier.set_faces_database(self.faces_database) log.info('Database is built, registered {} identities'.format( len(self.faces_database))) def get_config(self, device): config = {} if device == 'GPU' and self.gpu_ext: config['CONFIG_FILE'] = self.gpu_ext return config def process(self, frame): orig_image = frame.copy() rois = self.face_detector.infer((frame, )) if self.QUEUE_SIZE < len(rois): log.warning( 'Too many faces for processing. Will be processed only {} of {}' .format(self.QUEUE_SIZE, len(rois))) rois = rois[:self.QUEUE_SIZE] landmarks = self.landmarks_detector.infer((frame, rois)) face_identities, unknowns = self.face_identifier.infer( (frame, rois, landmarks)) if self.allow_grow and len(unknowns) > 0: for i in unknowns: # This check is preventing asking to save half-images in the boundary of images if rois[i].position[0] == 0.0 or rois[i].position[1] == 0.0 or \ (rois[i].position[0] + rois[i].size[0] > orig_image.shape[1]) or \ (rois[i].position[1] + rois[i].size[1] > orig_image.shape[0]): continue crop_image = crop(orig_image, rois[i]) name = self.faces_database.ask_to_save(crop_image) if name: id = self.faces_database.dump_faces( crop_image, face_identities[i].descriptor, name) face_identities[i].id = id return [rois, landmarks, face_identities]
class FrameProcessor: QUEUE_SIZE = 16 def __init__(self, args): used_devices = set([args.d_fd, args.d_lm, args.d_reid]) self.context = InferenceContext() context = self.context context.load_plugins(used_devices, args.cpu_lib, args.gpu_lib) for d in used_devices: context.get_plugin(d).set_config( {"PERF_COUNT": "YES" if args.perf_stats else "NO"}) log.info("Loading models") face_detector_net = self.load_model(args.m_fd) landmarks_net = self.load_model(args.m_lm) face_reid_net = self.load_model(args.m_reid) self.face_detector = FaceDetector( face_detector_net, confidence_threshold=args.t_fd, roi_scale_factor=args.exp_r_fd, ) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=args.t_id) self.face_detector.deploy(args.d_fd, context) self.landmarks_detector.deploy(args.d_lm, context, queue_size=self.QUEUE_SIZE) self.face_identifier.deploy(args.d_reid, context, queue_size=self.QUEUE_SIZE) log.info("Models are loaded") if args.fc in "LOAD": self.faces_database = pickle.loads(open(args.fpl, "rb").read()) log.info("Face database loaded from {}.".format(args.fpl)) else: log.info("Building faces database using images from '%s'" % (args.fg)) self.faces_database = FacesDatabase( args.fg, self.face_identifier, self.landmarks_detector, self.face_detector if args.run_detector else None, args.no_show, ) if args.fc in "SAVE": with open(args.fps, "wb") as f: f.write(pickle.dumps(self.faces_database)) f.close() log.info("Face database {} saved".format(args.fps)) self.face_identifier.set_faces_database(self.faces_database) log.info("Database is built, registered %s identities" % (len(self.faces_database))) self.allow_grow = args.allow_grow and not args.no_show def load_model(self, model_path): model_path = osp.abspath(model_path) model_description_path = model_path model_weights_path = osp.splitext(model_path)[0] + ".bin" log.info("Loading the model from '%s'" % (model_description_path)) assert osp.isfile( model_description_path ), "Model description is not found at '%s'" % (model_description_path) assert osp.isfile( model_weights_path), "Model weights are not found at '%s'" % ( model_weights_path) model = IENetwork(model_description_path, model_weights_path) log.info("Model is loaded") return model def process(self, frame): assert len( frame.shape) == 3, "Expected input frame in (H, W, C) format" assert frame.shape[2] in [3, 4], "Expected BGR or BGRA input" orig_image = frame.copy() frame = frame.transpose((2, 0, 1)) # HWC to CHW frame = np.expand_dims(frame, axis=0) self.face_detector.clear() self.landmarks_detector.clear() self.face_identifier.clear() self.face_detector.start_async(frame) rois = self.face_detector.get_roi_proposals(frame) if self.QUEUE_SIZE < len(rois): log.warning("Too many faces for processing." " Will be processed only %s of %s." % (self.QUEUE_SIZE, len(rois))) rois = rois[:self.QUEUE_SIZE] self.landmarks_detector.start_async(frame, rois) landmarks = self.landmarks_detector.get_landmarks() self.face_identifier.start_async(frame, rois, landmarks) face_identities, unknowns = self.face_identifier.get_matches() if self.allow_grow and len(unknowns) > 0: for i in unknowns: # This check is preventing asking to save half-images in the boundary of images if (rois[i].position[0] == 0.0 or rois[i].position[1] == 0.0 or (rois[i].position[0] + rois[i].size[0] > orig_image.shape[1]) or (rois[i].position[1] + rois[i].size[1] > orig_image.shape[0])): continue crop = orig_image[ int(rois[i].position[1]):int(rois[i].position[1] + rois[i].size[1]), int(rois[i].position[0]):int(rois[i].position[0] + rois[i].size[0]), ] name = self.faces_database.ask_to_save(crop) if name: id = self.faces_database.dump_faces( crop, face_identities[i].descriptor, name) face_identities[i].id = id outputs = [rois, landmarks, face_identities] return outputs def get_performance_stats(self): stats = { "face_detector": self.face_detector.get_performance_stats(), "landmarks": self.landmarks_detector.get_performance_stats(), "face_identifier": self.face_identifier.get_performance_stats(), } return stats