def main(): cfg = load_yaml('./configs/arc_res50_mask.yaml') model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) model.summary() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] training from scratch.") temp1 = np.ones((62,112,3)) temp2 = np.zeros((50,112,3)) masked_img = np.concatenate([temp1, temp2], axis =0) path_img1 = '/home/anhdq23/Desktop/nguyen/data/AR/test2/M-002-12.bmp' path_img2 = '/home/anhdq23/Desktop/nguyen/data/AR/test2/M-003-01.bmp' img1 = Image.open(path_img1) img1 = img1.resize((112, 112)) img1 = np.array(img1)/255.0 img2 = Image.open(path_img2) img2 = img2.resize((112, 112)) img2 = np.array(img2)/255.0 mask_img2 = np.multiply(img2, masked_img) fc1 = model.predict(mask_img2.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,512)), norm='l2', axis=1) fc2 = model.predict(img2.reshape((1,112,112,3))) norm_fc2 = preprocessing.normalize(fc2.reshape((1,512)), norm='l2', axis=1) diff = np.subtract(norm_fc1, norm_fc2) dist = np.sqrt(np.sum(np.square(diff), 1))/2 print(dist) for i in np.arange(20): print(np.sqrt(np.sum(np.square(diff[0][i*25:i*25+25]), 0))/2) fig = plt.figure() ax = fig.add_subplot(2,1,1) ax.plot(np.arange(512), norm_fc1[0]) # ax = fig.add_subplot(2,1,2) ax.plot(np.arange(512), norm_fc2[0]) ax = fig.add_subplot(2,1,2) ax.plot(np.arange(512), diff[0]) plt.show()
def main(args): ijbc_meta = np.load(args.meta_path) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu logger = tf.get_logger() logger.disabled = True logger.setLevel(logging.FATAL) set_memory_growth() #cfg = load_yaml('configs/arc_res50.yaml') cfg = load_yaml(args.config_path) model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], training=False) ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] Cannot find ckpt from {}.".format(ckpt_path)) exit() img_names = [ os.path.join(args.input_path, img_name.split('/')[-1]) for img_name in ijbc_meta['img_names'] ] embedding_size = cfg['embd_shape'] batch_size = cfg['batch_size'] img_size = cfg['input_size'] def read_img(filename): raw = tf.io.read_file(filename) img = tf.image.decode_jpeg(raw, channels=3) img = tf.cast(img, tf.float32) img = img / 255 return img dataset = tf.data.Dataset.from_tensor_slices(img_names) dataset = dataset.map(read_img, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) embeddings = model.predict(dataset, batch_size=batch_size, verbose=1) print('embeddings', embeddings.shape) np.save(args.output_path, embeddings)
def main(): # with open('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.json', 'r') as f: # model_json = json.load(f) # model = model_from_json(model_json) # model.load_weights('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.h5') # model.summary() cfg = load_yaml('./configs/arc_res50_new.yaml') model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) model.summary() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) print(ckpt_path) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] training from scratch.") model_mask = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + 'arc_res50_mask') print(ckpt_path) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model_mask.load_weights(ckpt_path) else: print("[*] training from scratch.") import sys sys.path.append('/home/anhdq23/Desktop/nguyen/VT_simulation/') from detector import get_detector predictor = get_detector() ICPR_dict = dict() path_ICPR = '/home/anhdq23/Desktop/nguyen/data/ICPR_cropped_face' for name_fold in os.listdir(path_ICPR): print(name_fold) path_fold = os.path.join(path_ICPR, name_fold) if name_fold not in ICPR_dict.keys(): ICPR_dict[name_fold] = [] for name_image in os.listdir(path_fold): path_image = os.path.join(path_fold, name_image) if '60' not in name_image[-10:-4] and '90' not in name_image[-10:-4]\ and '75' not in name_image[-10:]: image = Image.open(path_image) image = expand2square(image, (255, 255, 255)) image = image.resize((112, 112)) image = np.array(image)/255.0 _, labels, _ = predictor.predict(image, 1500/2, 0.6) if labels.numpy()[0] == 1: fc1 = model_mask.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) else: fc1 = model.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) ICPR_dict[name_fold].append(norm_fc1) path_ICPR = '/home/anhdq23/Desktop/nguyen/data/ICPR_cropped_face' anchor_list = [] name_list = [] for name_fold in os.listdir(path_ICPR): print(name_fold) path_fold = os.path.join(path_ICPR, name_fold) for name_image in os.listdir(path_fold): path_image = os.path.join(path_fold, name_image) if '+0+0' in name_image[-10:] or '+0-15' in name_image[-10:] or\ '+0+15' in name_image[-10:] or '+15+0' in name_image[-10:] or\ '-15+0' in name_image[-10:]: print(name_image) image = Image.open(path_image) image = expand2square(image, (255, 255, 255)) image = image.resize((112, 112)) image = np.array(image)/255.0 _, labels, _ = predictor.predict(image, 1500/2, 0.6) if labels.numpy()[0] == 1: fc1 = model_mask.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) else: fc1 = model.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) anchor_list.append(norm_fc1) name_list.append(name_fold) # Init faiss import faiss count_true = 0 count_all = 0 res = faiss.StandardGpuResources() # use a single GPU index_flat = faiss.IndexFlatL2(512) # gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat) gpu_index_flat = index_flat gpu_index_flat.add(np.array(anchor_list).reshape((-1,512))) for key in list(ICPR_dict.keys()): for feature in ICPR_dict[key]: D, I = gpu_index_flat.search(feature, k=1) # actual search print(key, name_list[I[0][0]]) if key == name_list[I[0][0]]: count_true +=1 count_all +=1 print(count_true, count_all)
def main(): # with open('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.json', 'r') as f: # model_json = json.load(f) # model = model_from_json(model_json) # model.load_weights('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.h5') # model.summary() cfg = load_yaml('./configs/arc_res50_mix.yaml') model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) model.summary() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) print(ckpt_path) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] training from scratch.") # # serialize model to JSON # model_json = model.to_json() # with open("/home/anhdq23/Desktop/nguyen/image-segmentation-keras/weights/arc_res50_new.json", "w") as json_file: # json.dump(model_json, json_file) # model_mask.save_weights("/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arc_res50_mask.h5") data_path = '/home/anhdq23/Desktop/nguyen/arcface-tf2/data' lfw, lfw_issame = get_val_pair(data_path, 'lfw_align_112/lfw') lfw = np.transpose(lfw, [0, 2, 3, 1]) * 0.5 + 0.5 image_1 = lfw[0::2] image_2 = lfw[1::2] dist_all = [] for idx in range(len(lfw_issame)): print(idx) fc1 = model.predict(image_1[idx].reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) fc2 = model.predict(image_2[idx].reshape((1,112,112,3))) norm_fc2 = preprocessing.normalize(fc2.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) # dist = tf.keras.losses.cosine_similarity(fc1.reshape((1,512)), fc2.reshape((1,512))) diff = np.subtract(norm_fc1, norm_fc2) dist = np.sqrt(np.sum(np.square(diff), 1))/2 dist_all.extend(dist) plt.plot(dist_all) plt.show() thresholds = np.arange(0, 1, 0.01) tpr_all = [] fpr_all = [] for thr in thresholds: tpr, fpr, acc, f1 = calculate_accuracy(thr, np.array(dist_all), lfw_issame) top_left = np.sqrt((1-tpr)**2 + fpr**2) print('thr %.4f' % thr , 'tpr %.4f' % tpr, 'fpr %.4f' % fpr, \ 'top left %.4f' % top_left, 'acc %.4f' % acc, 'f1_score %.4f'%f1) # top_left_batch.append(top_left) tpr_all.append(tpr) fpr_all.append(fpr) for threshold in thresholds: predict_issame = np.less(np.array(dist_all), threshold) conf_matrix = confusion_matrix(lfw_issame, predict_issame) print(conf_matrix) plt.figure() lw = 2 plt.plot(fpr_all, tpr_all, color='darkorange', lw=lw, label='ROC curve') plt.xlim([0.0, 1.]) plt.ylim([0.0, 1.]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.show()