def __init__(self, blaze_weight, anchors, scale: float = 1.0): super().__init__() face_detector = BlazeFace().to(device) face_detector.load_weights(blaze_weight) face_detector.load_anchors(anchors) _ = face_detector.train(False) self.extractor = FaceExtractor(face_detector, margin=scale - 1)
def process_video(video_path, filename, image_path, original): gpu = torch.device("cuda" if torch.cuda.is_available() else "cpu") facedet = BlazeFace().to(gpu) facedet.load_weights("blazeface.pth") facedet.load_anchors("anchors.npy") _ = facedet.train(False) from helpers_read_video_1 import VideoReader from helpers_face_extract_1 import FaceExtractor frames_per_video = 10 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_random_frames( x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet) faces = face_extractor.process_video(video_path) # Only look at one face per frame. face_extractor.keep_only_best_face(faces) n = 0 for frame_data in faces: for face in frame_data["faces"]: face_locations = face_recognition.face_locations(face) for face_location in face_locations: top, right, bottom, left = face_location face_image = face[top:bottom, left:right] resized_face = cv2.resize(face_image, (224, 224), interpolation=cv2.INTER_AREA) resized_face = cv2.cvtColor(resized_face, cv2.COLOR_RGB2BGR) cv2.imwrite( image_path + "/" + filename[:-4] + original + "_" + str(n) + ".jpg", resized_face, [int(cv2.IMWRITE_JPEG_QUALITY), 85]) n += 1
def _load_face_extractor(self): """ Init and Return the face extractor object (implemented in deepfakes-inference-demo/helpers/face_extract_1) that consists of a video reader function and a facedetector """ import sys sys.path.insert(0, os.path.join(self.root_path, "blazeface-pytorch")) sys.path.insert( 0, os.path.join(self.root_path, "deepfakes-inference-demo")) #Load the face detection model BlazeFace, based on https://github.com/tkat0/PyTorch_BlazeFace/ from blazeface import BlazeFace facedet = BlazeFace().to(self.gpu) #Load the pretrained weights facedet.load_weights( os.path.join(self.root_path, "blazeface-pytorch/blazeface.pth")) facedet.load_anchors( os.path.join(self.root_path, "blazeface-pytorch/anchors.npy")) #Set the module in evaluation mode _ = facedet.train(False) from helpers.read_video_1 import VideoReader from helpers.face_extract_1 import FaceExtractor #set number of frames to be read from the video, taken regulary from the beggining to the end of the video self.frames_per_video = 17 #init video reader video_reader = VideoReader() #create a lambda function to read the frames where x is the video path video_read_fn = lambda x: video_reader.read_frames( x, num_frames=self.frames_per_video) #init the face extractor with the video reader function and the facedetector face_extractor = FaceExtractor(video_read_fn, facedet) return face_extractor
sys.path.insert(1, 'helpers') sys.path.insert(1, 'model') sys.path.insert(1, 'weight') from cvit import CViT from helpers_read_video_1 import VideoReader from helpers_face_extract_1 import FaceExtractor device = 'cuda' if torch.cuda.is_available() else 'cpu' from blazeface import BlazeFace facedet = BlazeFace().to(device) facedet.load_weights("helpers/blazeface.pth") facedet.load_anchors("helpers/anchors.npy") _ = facedet.train(False) mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = transforms.Compose([transforms.Normalize(mean, std)]) tresh = 50 sample = 'sample__prediction_data/' ran = random.randint(0, 400) ran_min = abs(ran - 1) filenames = sorted([x for x in os.listdir(sample) if x[-4:] == ".mp4" ]) #[ran_min, ran] - select video randomly mtcnn = MTCNN(select_largest=False,