def im_test(im): face_info = lib.align(im[:, :, (2, 1, 0)], front_face_detector, lmark_predictor) # Samples if len(face_info) == 0: logging.warning('No faces are detected.') prob = -1 # we ignore this case else: # Check how many faces in an image logging.info('{} faces are detected.'.format(len(face_info))) max_prob = -1 # If one face is fake, the image is fake for _, point in face_info: rois = [] for i in range(sample_num): roi, _ = lib.cut_head([im], point, i) rois.append(cv2.resize(roi[0], tuple(cfg.IMG_SIZE[:2]))) vis_im(rois, 'tmp/vis.jpg') prob = solver.test(rois) prob = np.mean( np.sort(prob[:, 0])[np.round(sample_num / 2).astype(int):]) if prob >= max_prob: max_prob = prob prob = max_prob return prob
def im_test(net, im, args): face_info = lib.align(im[:, :, (2, 1, 0)], front_face_detector, lmark_predictor) # Samples if len(face_info) != 1: prob = -1 else: _, point = face_info[0] rois = [] for i in range(sample_num): roi, _ = lib.cut_head([im], point, i) rois.append(cv2.resize(roi[0], (args.input_size, args.input_size))) # vis_ = np.concatenate(rois, 1) # cv2.imwrite('vis.jpg', vis_) bgr_mean = np.array([103.939, 116.779, 123.68]) bgr_mean = bgr_mean[np.newaxis, :, np.newaxis, np.newaxis] bgr_mean = torch.from_numpy(bgr_mean).float().cuda() rois = torch.from_numpy(np.array(rois)).float().cuda() rois = rois.permute((0, 3, 1, 2)) prob = net(rois - bgr_mean) prob = F.softmax(prob, dim=1) prob = prob.data.cpu().numpy() prob = 1 - np.mean( np.sort(prob[:, 0])[np.round(sample_num / 2).astype(int):]) return prob, face_info
def get_batch(self, batch_idx, resize=None): if batch_idx >= self.batch_num: raise ValueError("Batch idx must be in range [0, {}].".format(self.batch_num - 1)) imgs = [] names = [] im_path = self.face_img_paths[batch_idx] im = cv2.imread(str(im_path)) im_name = os.path.basename(im_path).split('.')[0] _, points = self.face_caches[im_name] if points is None: return None for _ in range(self.sample_num): # Cut out head region im_cut, _ = lib.cut_head([im.copy()], points) im_cut = cv2.resize(im_cut[0], (resize[0], resize[1])) imgs.append(im_cut) data = {} data['images'] = imgs data['name_list'] = im_name return data
def get_batch(self, batch_idx, resize=None): if batch_idx >= self.batch_num: raise ValueError("Batch idx must be in range [0, {}].".format(self.batch_num - 1)) # Get start and end image index ( counting from 0 ) start_idx = batch_idx * self.batch_size idx_range = [] for i in range(self.batch_size): idx_range.append((start_idx + i) % self.data_num) print('batch index: {}, counting from 0'.format(batch_idx)) imgs = [] labels = [] names = [] for i in idx_range: im = cv2.imread(str(self.face_img_paths[i])) im_name = os.path.basename(self.face_img_paths[i]).split('.')[0] if im_name in self.face_caches: trans_matrix, point = self.face_caches[im_name] if point is None: continue label = self.annos[im_name] # label is 1 means this is an authentic image, if label == 1: rnd = np.random.uniform() if rnd < 0.5: # Affine warp face area back size = np.arange(64, 128, dtype=np.int32) c = np.random.randint(0, len(size)) new_im = self._face_blur(im, trans_matrix, size=size[c]) rnd2 = np.random.uniform() if rnd2 < 0.5: # Only retain a minimal polygon mask part_mask = lib.get_face_mask(im.shape[:2], point) # Select specific blurred part new_im = self._select_part_to_blur(im, new_im, part_mask) im = new_im label = 0 else: continue # Cut out head region ims, _ = lib.cut_head([im], point) # Augmentation if self.is_aug: im = proc_img.aug(ims, random_transform_args=None, color_rng=[0.8, 1.2])[0] im = cv2.resize(im, (resize[0], resize[1])) imgs.append(im) labels.append(label) names.append(im_name) if batch_idx == self.batch_num - 1: if self.is_shuffle: idx = np.random.permutation(self.data_num) self.face_img_paths = [self.face_img_paths[j] for j in idx] data = {} data['images'] = imgs data['images_label'] = labels data['name_list'] = names return data