This function is only for this Face Recoginition System. if you want to import to another functions please modify ---------Created by ZOU Zijie :) """ #/------Initial Logger------/ log_timee = time.strftime("_%d-%b-%Y_%H:%M:%S", time.localtime()) #/---Dlib initial---/ model = './models/shape_predictor_68_face_landmarks.dat' detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(model) fa = face_utils.FaceAligner(predictor, desiredFaceWidth=250) def log_time(): #/--1 for info; 2 for warning; 3 for error--/ log_time = time.strftime("%d-%b-%Y_%H:%M:%S", time.localtime()) return log_time def align_unknown(): user = "******" path = './Users/people_ori/Unkonwn' un_vec = './Users/people_vectors/Unkonwn' count = 0
# 随机颜色 color = np.random.randint(0, 255, (100, 3)) cap = cv2.VideoCapture("stable.mp4") cap.set(3, 1280) cap.set(4, 720) detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor("./68Marks.dat") p0 = [] Face_Counter = 0 old_gray = None aligner = face_utils.FaceAligner(predictor, desiredFaceWidth=512, desiredFaceHeight=512, desiredLeftEye=(0.30, 0.30)) while True: rect, frame = cap.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) old_gray = frame_gray print(old_gray.shape) dets = detector(frame_gray, 0) if len(dets) != 0: face = dets[0] Face_Counter = Face_Counter + 1 left = face.left() right = face.right() bottom = face.bottom() top = face.top() cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
def __init__(self): self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") self.fa = face_utils.FaceAligner(self.predictor, desiredFaceWidth=256)
def main(): # return cap = cv2.VideoCapture(0) if not cap.isOpened(): print("Unable to connect to camera.") return counter = 0 frame_count = 0 record = False detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(face_landmark_path) fa = face_utils.FaceAligner(predictor, desiredFaceWidth=256) while cap.isOpened(): ret, img = cap.read() #flip img img = cv2.flip(img, 1) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) res = img.copy() if ret: #detect faces from image face_rects = detector(img, 0) for i in range(len(face_rects)): mask = np.zeros(img.shape[:2], dtype = np.uint8) #get face landmarks face_rect = face_rects[i] shape = predictor(img, face_rect) shape = face_utils.shape_to_np(shape) reprojectdst, euler_angle = get_head_pose(shape) #detect hull to crop face hull = cv2.convexHull(shape) cv2.fillPoly(mask, pts =[hull], color=1) #visualize cv2.polylines(res,[hull],True,(255, 0, 0)) for (x, y) in shape: cv2.circle(res, (x, y), 1, (0, 255, 0), -1) for start, end in line_pairs: cv2.line(res, reprojectdst[start], reprojectdst[end], (0, 0, 255)) (x, y, w, h) = face_utils.rect_to_bb(face_rect) x = np.clip(x, 0, img.shape[1]) y = np.clip(y, 0, img.shape[0]) w = np.clip(w, 1, img.shape[1]-x) h = np.clip(h, 1, img.shape[0]-y) img_masked = cv2.bitwise_and(img, img, mask = mask) faceOrig = cv2.resize(img_masked[y:y + h, x:x + w], (256, 256)) faceAligned = cv2.resize(fa.align(img_masked, gray, face_rect), (img.shape[1], img.shape[0])) if record: if frame_count % 2 == 0: path = './face_database/'+person_name+'/'+person_name+str(counter)+'.jpg' counter+=1 cv2.imwrite(path, faceAligned) res = np.hstack((res, faceAligned)) frame_count += 1 cv2.imshow("demo", res) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break elif key == ord('r'): if not record: record = True print("recording...") else: record = False print("stop recording")
def __init__(self): self.detector = dlib.get_frontal_face_detector() datapath = pathlib.Path(__file__).parent.absolute() datapath = os.path.join(datapath, "shape_predictor_68_face_landmarks.dat") self.predictor = dlib.shape_predictor(datapath) self.fa = face_utils.FaceAligner(self.predictor, desiredFaceWidth=256)