def load_models(self): """ Loads all models. """ face_detector_net = self.load_model( self.Helpers.confs["genisysai"]["detection"]) face_detector_net.reshape({"data": [1, 3, 384, 672]}) landmarks_net = self.load_model( self.Helpers.confs["genisysai"]["landmarks"]) face_reid_net = self.load_model( self.Helpers.confs["genisysai"]["reidentification"]) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=0.6, roi_scale_factor=1.15) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=0.3, match_algo='HUNGARIAN') self.face_detector.deploy(self.Helpers.confs["genisysai"]["runas"], self.context) self.landmarks_detector.deploy(self.Helpers.confs["genisysai"]["runas"], self.context, queue_size=self.qs) self.face_identifier.deploy(self.Helpers.confs["genisysai"]["runas"], self.context, queue_size=self.qs) self.Helpers.logger.info("Models loaded")
class GeniSysAI(): def __init__(self): """ GeniSysAI Class GeniSysAI functions for the Hospital Intelligent Automation System. """ self.Helpers = Helpers("GeniSysAI", False) self.qs = 16 self.context = InferenceContext([self.Helpers.confs["genisysai"]["runas"], self.Helpers.confs["genisysai"]["runas"], self.Helpers.confs["genisysai"]["runas"]], "", "", "") self.Helpers.logger.info("GeniSysAI Helper Class initialization complete.") def connect(self): """ Connects to the local GeniSysAI camera. """ self.lcv = cv2.VideoCapture(self.Helpers.confs["genisysai"]["vid"]) self.Helpers.logger.info("Connected to GeniSysAI") def load_models(self): """ Loads all models. """ face_detector_net = self.load_model( self.Helpers.confs["genisysai"]["detection"]) face_detector_net.reshape({"data": [1, 3, 384, 672]}) landmarks_net = self.load_model( self.Helpers.confs["genisysai"]["landmarks"]) face_reid_net = self.load_model( self.Helpers.confs["genisysai"]["reidentification"]) self.face_detector = FaceDetector(face_detector_net, confidence_threshold=0.6, roi_scale_factor=1.15) self.landmarks_detector = LandmarksDetector(landmarks_net) self.face_identifier = FaceIdentifier(face_reid_net, match_threshold=0.3, match_algo='HUNGARIAN') self.face_detector.deploy(self.Helpers.confs["genisysai"]["runas"], self.context) self.landmarks_detector.deploy(self.Helpers.confs["genisysai"]["runas"], self.context, queue_size=self.qs) self.face_identifier.deploy(self.Helpers.confs["genisysai"]["runas"], self.context, queue_size=self.qs) self.Helpers.logger.info("Models loaded") def load_model(self, model_path): """ Loads a model from path. """ model_path = osp.abspath(model_path) model_weights_path = osp.splitext(model_path)[0] + ".bin" self.Helpers.logger.info("Loading the model from '%s'" % (model_path)) model = self.context.ie_core.read_network(model_path, model_weights_path) self.Helpers.logger.info("Model loaded") return model def load_known(self): """ Loads known data. """ self.faces_database = FacesDatabase(self.Helpers.confs["genisysai"]["data"], self.face_identifier, self.landmarks_detector, self.face_detector, True) self.face_identifier.set_faces_database(self.faces_database) self.Helpers.logger.info("Database is built, registered %s identities" % (len(self.faces_database))) def process(self, frame): """ Processes a frame. """ orig_image = frame.copy() frame = frame.transpose((2, 0, 1)) frame = np.expand_dims(frame, axis=0) self.face_detector.clear() self.landmarks_detector.clear() self.face_identifier.clear() self.face_detector.start_async(frame) rois = self.face_detector.get_roi_proposals(frame) if self.qs < len(rois): self.Helpers.logger.info("Too many faces for processing." \ " Will be processed only %s of %s." % \ (self.qs, len(rois))) rois = rois[:self.qs] self.landmarks_detector.start_async(frame, rois) landmarks = self.landmarks_detector.get_landmarks() self.face_identifier.start_async(frame, rois, landmarks) face_identities, unknowns = self.face_identifier.get_matches() outputs = [rois, landmarks, face_identities] return outputs def draw_text_with_background(self, frame, text, origin, font=cv2.FONT_HERSHEY_SIMPLEX, scale=1.0, color=(0, 0, 0), thickness=1, bgcolor=(255, 255, 255)): text_size, baseline = cv2.getTextSize(text, font, scale, thickness) cv2.rectangle(frame, tuple((origin + (0, baseline)).astype(int)), tuple((origin + (text_size[0], -text_size[1])).astype(int)), bgcolor, cv2.FILLED) cv2.putText(frame, text, tuple(origin.astype(int)), font, scale, color, thickness) return text_size, baseline def draw_detection_roi(self, frame, roi, identity): label = self.face_identifier.get_identity_label(identity.id) # Draw face ROI border cv2.rectangle(frame, tuple(roi.position), tuple(roi.position + roi.size), (0, 220, 0), 2) # Draw identity label text_scale = 0.5 font = cv2.FONT_HERSHEY_SIMPLEX text_size = cv2.getTextSize("H1", font, text_scale, 1) line_height = np.array([0, text_size[0][1]]) if label is "Unknown": text = label else: text = "User #" + label if identity.id != FaceIdentifier.UNKNOWN_ID: text += ' %.2f%%' % (100.0 * (1 - identity.distance)) self.draw_text_with_background(frame, text, roi.position - line_height * 0.5, font, scale=text_scale) return frame, label def draw_detection_keypoints(self, frame, roi, landmarks): keypoints = [landmarks.left_eye, landmarks.right_eye, landmarks.nose_tip, landmarks.left_lip_corner, landmarks.right_lip_corner, landmarks.right_lip_corner] for point in keypoints: center = roi.position + roi.size * point cv2.circle(frame, tuple(center.astype(int)), 2, (0, 255, 255), 2) return frame
def __init__(self, path, face_identifier, landmarks_detector, face_detector=None, no_show=False): path = osp.abspath(path) self.fg_path = path self.no_show = no_show paths = [] if osp.isdir(path): paths = [osp.join(path, f) for f in os.listdir(path) \ if f.split('.')[-1] in self.IMAGE_EXTENSIONS] else: log.error("Wrong face images database path. Expected a " \ "path to the directory containing %s files, " \ "but got '%s'" % \ (" or ".join(self.IMAGE_EXTENSIONS), path)) if len(paths) == 0: log.error("The images database folder has no images.") self.database = [] for num, path in enumerate(paths): label = osp.splitext(osp.basename(path))[0] image = cv2.imread(path, flags=cv2.IMREAD_COLOR) assert len(image.shape) == 3, \ "Expected an input image in (H, W, C) format" assert image.shape[2] in [3, 4], \ "Expected BGR or BGRA input" orig_image = image.copy() image = image.transpose((2, 0, 1)) # HWC to CHW image = np.expand_dims(image, axis=0) if face_detector: face_detector.start_async(image) rois = face_detector.get_roi_proposals(image) if len(rois) < 1: log.warning("Not found faces on the image '%s'" % (path)) else: w, h = image.shape[-1], image.shape[-2] rois = [FaceDetector.Result([0, 0, 0, 0, 0, w, h])] for i, roi in enumerate(rois): r = [roi] landmarks_detector.start_async(image, r) landmarks = landmarks_detector.get_landmarks() face_identifier.start_async(image, r, landmarks) descriptor = face_identifier.get_descriptors()[0] if face_detector: mm = self.check_if_face_exist( descriptor, face_identifier.get_threshold()) if mm < 0: crop = orig_image[int(roi.position[1]):int(roi.position[1]+roi.size[1]), \ int(roi.position[0]):int(roi.position[0]+roi.size[0])] name = self.ask_to_save(crop) if name is None: name = label self.dump_faces(crop, descriptor, name) else: log.debug("Adding label {} to the gallery.".format(label)) self.add_item(descriptor, label)