def test_batched_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) images = [img, img, img] batched_detected_faces = api.batch_face_locations(images, number_of_times_to_upsample=0) for detected_faces in batched_detected_faces: self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0], (154, 611, 390, 375))
def save_keypoints_video(video_path, input_folder, output_folder, tolerance, batch_size): video = cv2.VideoCapture(str(video_path)) out_path = move_path(video_path, input_folder, output_folder) out_path = out_path.with_suffix('') known_faces = [] frame_count = 0 frames = [] while video.isOpened(): ret, frame = video.read() if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = scale_frame(frame) frames.append(frame) if (len(frames) == batch_size) or not ret: batch_of_face_locations = face_recognition.batch_face_locations( frames, number_of_times_to_upsample=upsample, batch_size=batch_size) # Now let's list all the faces we found in all batch_size frames for i, face_locations in enumerate(batch_of_face_locations): frame_count += 1 encodings, landmarks = face_recognition.face_encodings_and_landmarks( frames[i], known_face_locations=locations) num_faces = landmarks.shape[0] if num_faces == 0: continue for face_idx, (encoding, landmark_array) in enumerate( zip(encodings, landmarks)): distances = face_recognition.face_distance( known_faces, encoding) if len(distances) == 0 or distances.min() > tolerance: known_idx = len(known_faces) known_faces.append(encoding) else: known_idx = int(np.argmin(distances)) known_faces[known_idx] = np.mean( [known_faces[known_idx], encoding], axis=0) file_path = Path(out_path, F"{known_idx}", F"{frame_count}.npy") file_path.parent.mkdir(parents=True, exist_ok=True) np.save(file_path, landmark_array) # Clear the frames array to start the next batch frames = [] print(F"finished video {out_path}")