def update_calibrate(self, face: m.Face): self.face = face if face.get_right_eye() and face.get_pos_outer_right_eye_corner(): if face.get_left_eye() and face.get_pos_outer_left_eye_corner(): self.find_vectors() if self.get_number_of_data() == cons.NUMBER_CALLIBRATE_DATA: self.mean_eye_vector = self.calculate_mean(self.eye_vector)
def main(_): args = parser.parse_args() with tf.Session() as sess: model = Face(sess, args) if args.phase == "train": model.train(args) print('train mode')
def _read_face(self): n = self._read_normal() # get face-specific data verts = struct.unpack("<4H", self.data.read(2 * 4)) texture = struct.unpack("<H", self.data.read(2))[0] # skip unknown data self.data.read(2) # construct face dict return Face(verts, n, n.group, texture)
def draw_eye_region(img: np.ndarray, face: c.Face): # left eye region x_l = face.get_left_eye().get_pos_eye().get_upper_left_corner().x y_l = face.get_left_eye().get_pos_eye().get_upper_left_corner().y cv2.rectangle(img, (x_l, y_l), (x_l + face.get_left_eye().get_pos_eye().width, y_l + face.get_left_eye().get_pos_eye().height), cons.COLOR_EYE_IND) # right eye region x_r = face.get_right_eye().get_pos_eye().get_upper_left_corner().x y_r = face.get_right_eye().get_pos_eye().get_upper_left_corner().y cv2.rectangle(img, (x_r, y_r), (x_r + face.get_right_eye().get_pos_eye().width, y_r + face.get_right_eye().get_pos_eye().height), cons.COLOR_EYE_IND)
def detect(self, image): """ returns a Face object or None if a face does not found """ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5) if len(faces) == 0: if self.num_recovery >= self.max_recovery: self.last_face = None self.num_recovery = 0 if self.last_face is not None: self.num_recovery = self.num_recovery + 1 return self.last_face (x, y, w, h) = faces[0] face = Face(x, y, w, h) self.last_face = face self.num_recovery = 0 return face
def main(_): args = parser.parse_args() log.init("FaceNeural", logging.DEBUG, log_path="output/log.txt") with tf.Session() as sess: if args.phase == "train": model = Face(sess, args) model.train(args) log.info('train mode') elif args.phase == "inference": log.info("inference") model = Face(sess, args) model.inference(args) elif args.phase == "lightcnn": log.info("light cnn test") elif args.phase == "faceparsing": log.info("faceparsing") elif args.phase == "net": log.info("net start with ports (%d, %d)", 5010, 5011) net = Net(5010, 5011) while True: r_input = raw_input("command: \n") if r_input == "s": msg = raw_input("input: ") net.only_send(msg) elif r_input == 'r': msg = raw_input("input: ") net.send_recv(msg) elif r_input == "q": net.only_send("quit") net.close() break else: log.error("unknown code, quit") net.close() break
def draw_eye_pupils(img: np.ndarray, face: c.Face): c_l = face.get_left_eye().getPupil().getGlobalPosition() cv2.circle(img, (c_l.x, c_l.y), cons.RADIUS_PUPIL_IND, cons.COLOR_PUPIL_IND) c_r = face.get_right_eye().get_pupil().get_global_position_center() cv2.circle(img, (c_r.x, c_r.y), cons.RADIUS_PUPIL_IND, cons.COLOR_PUPIL_IND)
def draw_eye_corners(img: np.ndarray, face: c.Face): cv2.circle(img, (face.get_pos_outer_left_eye_corner().x, face.get_pos_outer_left_eye_corner().y), cons.RADIUS_EYECORNER_IND, cons.COLOR_EYECORNER_IND) cv2.circle(img, (face.get_pos_outer_right_eye_corner().x, face.get_pos_outer_right_eye_corner().y), cons.RADIUS_EYECORNER_IND, cons.COLOR_EYECORNER_IND)
def recognize_face(self, img: np.ndarray, face_box: Box) -> Face: shape: dlib.full_object_detection = self.face_shape_predictor( img, face_box.to_dlib_rect()) descriptor: np.ndarray = np.asarray( self.face_recognition_model.compute_face_descriptor(img, shape)) return Face(box=face_box, shape=shape, descriptor=descriptor)