def main(_argv): model = ArcFaceModel(size=FLAGS.input_size, training=False, use_pretrain=False) model.load_weights(FLAGS.weights).expect_partial() model.summary() # Saved path will be 'output_dir/model_name/version' saved_path = os.path.join(FLAGS.output_dir, 'arcface', str(FLAGS.version)) tf.saved_model.save(model, saved_path) logging.info("model saved to: {}".format(saved_path)) model = tf.saved_model.load(saved_path) infer = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY] logging.info(infer.structured_outputs) if not FLAGS.image: return img = tf.image.decode_image(open(FLAGS.image, 'rb').read(), channels=3) img = tf.image.resize(img, (FLAGS.input_size, FLAGS.input_size)) img = img / 255. img = tf.expand_dims(img, 0) t1 = time.time() outputs = infer(img) embeddings = outputs['OutputLayer'] t2 = time.time() logging.info('time: {}'.format(t2 - t1))
def main(): cfg = load_yaml('./configs/arc_res50_mask.yaml') model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) model.summary() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] training from scratch.") temp1 = np.ones((62,112,3)) temp2 = np.zeros((50,112,3)) masked_img = np.concatenate([temp1, temp2], axis =0) path_img1 = '/home/anhdq23/Desktop/nguyen/data/AR/test2/M-002-12.bmp' path_img2 = '/home/anhdq23/Desktop/nguyen/data/AR/test2/M-003-01.bmp' img1 = Image.open(path_img1) img1 = img1.resize((112, 112)) img1 = np.array(img1)/255.0 img2 = Image.open(path_img2) img2 = img2.resize((112, 112)) img2 = np.array(img2)/255.0 mask_img2 = np.multiply(img2, masked_img) fc1 = model.predict(mask_img2.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,512)), norm='l2', axis=1) fc2 = model.predict(img2.reshape((1,112,112,3))) norm_fc2 = preprocessing.normalize(fc2.reshape((1,512)), norm='l2', axis=1) diff = np.subtract(norm_fc1, norm_fc2) dist = np.sqrt(np.sum(np.square(diff), 1))/2 print(dist) for i in np.arange(20): print(np.sqrt(np.sum(np.square(diff[0][i*25:i*25+25]), 0))/2) fig = plt.figure() ax = fig.add_subplot(2,1,1) ax.plot(np.arange(512), norm_fc1[0]) # ax = fig.add_subplot(2,1,2) ax.plot(np.arange(512), norm_fc2[0]) ax = fig.add_subplot(2,1,2) ax.plot(np.arange(512), diff[0]) plt.show()
def main(): # with open('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.json', 'r') as f: # model_json = json.load(f) # model = model_from_json(model_json) # model.load_weights('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.h5') # model.summary() cfg = load_yaml('./configs/arc_res50_new.yaml') model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) model.summary() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) print(ckpt_path) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] training from scratch.") model_mask = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + 'arc_res50_mask') print(ckpt_path) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model_mask.load_weights(ckpt_path) else: print("[*] training from scratch.") import sys sys.path.append('/home/anhdq23/Desktop/nguyen/VT_simulation/') from detector import get_detector predictor = get_detector() ICPR_dict = dict() path_ICPR = '/home/anhdq23/Desktop/nguyen/data/ICPR_cropped_face' for name_fold in os.listdir(path_ICPR): print(name_fold) path_fold = os.path.join(path_ICPR, name_fold) if name_fold not in ICPR_dict.keys(): ICPR_dict[name_fold] = [] for name_image in os.listdir(path_fold): path_image = os.path.join(path_fold, name_image) if '60' not in name_image[-10:-4] and '90' not in name_image[-10:-4]\ and '75' not in name_image[-10:]: image = Image.open(path_image) image = expand2square(image, (255, 255, 255)) image = image.resize((112, 112)) image = np.array(image)/255.0 _, labels, _ = predictor.predict(image, 1500/2, 0.6) if labels.numpy()[0] == 1: fc1 = model_mask.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) else: fc1 = model.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) ICPR_dict[name_fold].append(norm_fc1) path_ICPR = '/home/anhdq23/Desktop/nguyen/data/ICPR_cropped_face' anchor_list = [] name_list = [] for name_fold in os.listdir(path_ICPR): print(name_fold) path_fold = os.path.join(path_ICPR, name_fold) for name_image in os.listdir(path_fold): path_image = os.path.join(path_fold, name_image) if '+0+0' in name_image[-10:] or '+0-15' in name_image[-10:] or\ '+0+15' in name_image[-10:] or '+15+0' in name_image[-10:] or\ '-15+0' in name_image[-10:]: print(name_image) image = Image.open(path_image) image = expand2square(image, (255, 255, 255)) image = image.resize((112, 112)) image = np.array(image)/255.0 _, labels, _ = predictor.predict(image, 1500/2, 0.6) if labels.numpy()[0] == 1: fc1 = model_mask.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) else: fc1 = model.predict(image.reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) anchor_list.append(norm_fc1) name_list.append(name_fold) # Init faiss import faiss count_true = 0 count_all = 0 res = faiss.StandardGpuResources() # use a single GPU index_flat = faiss.IndexFlatL2(512) # gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat) gpu_index_flat = index_flat gpu_index_flat.add(np.array(anchor_list).reshape((-1,512))) for key in list(ICPR_dict.keys()): for feature in ICPR_dict[key]: D, I = gpu_index_flat.search(feature, k=1) # actual search print(key, name_list[I[0][0]]) if key == name_list[I[0][0]]: count_true +=1 count_all +=1 print(count_true, count_all)
def main(_): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu logger = tf.get_logger() logger.disabled = True logger.setLevel(logging.FATAL) set_memory_growth() cfg = load_yaml(FLAGS.cfg_path) model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=True) model.summary(line_length=80) if cfg['train_dataset']: logging.info("load ms1m dataset.") dataset_len = cfg['num_samples'] steps_per_epoch = dataset_len // cfg['batch_size'] train_dataset = dataset.load_tfrecord_dataset(cfg['train_dataset'], cfg['batch_size'], cfg['binary_img'], is_ccrop=cfg['is_ccrop']) else: logging.info("load fake dataset.") dataset_len = 1 steps_per_epoch = 1 train_dataset = dataset.load_fake_dataset(cfg['input_size']) learning_rate = tf.constant(cfg['base_lr']) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) loss_fn = SoftmaxLoss() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) epochs, steps = get_ckpt_inf(ckpt_path, steps_per_epoch) else: print("[*] training from scratch.") epochs, steps = 1, 1 if FLAGS.mode == 'eager_tf': # Eager mode is great for debugging # Non eager graph mode is recommended for real training summary_writer = tf.summary.create_file_writer('./logs/' + cfg['sub_name']) train_dataset = iter(train_dataset) while epochs <= cfg['epochs']: inputs, labels = next(train_dataset) with tf.GradientTape() as tape: logist = model(inputs, training=True) reg_loss = tf.reduce_sum(model.losses) pred_loss = loss_fn(labels, logist) total_loss = pred_loss + reg_loss grads = tape.gradient(total_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) if steps % 5 == 0: verb_str = "Epoch {}/{}: {}/{}, loss={:.2f}, lr={:.4f}" print( verb_str.format(epochs, cfg['epochs'], steps % steps_per_epoch, steps_per_epoch, total_loss.numpy(), learning_rate.numpy())) with summary_writer.as_default(): tf.summary.scalar('loss/total loss', total_loss, step=steps) tf.summary.scalar('loss/pred loss', pred_loss, step=steps) tf.summary.scalar('loss/reg loss', reg_loss, step=steps) tf.summary.scalar('learning rate', optimizer.lr, step=steps) if steps % cfg['save_steps'] == 0: print('[*] save ckpt file!') model.save_weights('checkpoints/{}/e_{}_b_{}.ckpt'.format( cfg['sub_name'], epochs, steps % steps_per_epoch)) steps += 1 epochs = steps // steps_per_epoch + 1 else: model.compile(optimizer=optimizer, loss=loss_fn, run_eagerly=(FLAGS.mode == 'eager_fit')) mc_callback = ModelCheckpoint( 'checkpoints/' + cfg['sub_name'] + '/e_{epoch}_b_{batch}.ckpt', save_freq=cfg['save_steps'] * cfg['batch_size'], verbose=1, save_weights_only=True) tb_callback = TensorBoard(log_dir='logs/', update_freq=cfg['batch_size'] * 5, profile_batch=0) tb_callback._total_batches_seen = steps tb_callback._samples_seen = steps * cfg['batch_size'] callbacks = [mc_callback, tb_callback] history = model.fit(train_dataset, epochs=cfg['epochs'], steps_per_epoch=steps_per_epoch, callbacks=callbacks, initial_epoch=epochs - 1) print("[*] training done!")
cfg = load_yaml(FLAGS.cfg_path) cfg['num_classes'] = 179721 cfg['num_samples'] = 8621403 cfg['train_dataset'] = 'data/popet/dtat/ms1m_asian.tfrecord' cfg['sub_name'] = '200324_model' print(cfg) model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=True) model.summary(line_length=80) if cfg['train_dataset']: logging.info("load ms1m dataset.") dataset_len = cfg['num_samples'] steps_per_epoch = dataset_len // cfg['batch_size'] train_dataset = dataset.load_tfrecord_dataset( cfg['train_dataset'], cfg['batch_size'], cfg['binary_img'], is_ccrop=cfg['is_ccrop']) else: logging.info("load fake dataset.") dataset_len = 1 steps_per_epoch = 1 train_dataset = dataset.load_fake_dataset(cfg['input_size']) learning_rate = tf.constant(cfg['base_lr'])
def main(): # with open('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.json', 'r') as f: # model_json = json.load(f) # model = model_from_json(model_json) # model.load_weights('/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arcface_ret50.h5') # model.summary() cfg = load_yaml('./configs/arc_res50_mix.yaml') model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=False) model.summary() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) print(ckpt_path) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] training from scratch.") # # serialize model to JSON # model_json = model.to_json() # with open("/home/anhdq23/Desktop/nguyen/image-segmentation-keras/weights/arc_res50_new.json", "w") as json_file: # json.dump(model_json, json_file) # model_mask.save_weights("/home/anhdq23/Desktop/nguyen/VT_simulation/weights/arc_res50_mask.h5") data_path = '/home/anhdq23/Desktop/nguyen/arcface-tf2/data' lfw, lfw_issame = get_val_pair(data_path, 'lfw_align_112/lfw') lfw = np.transpose(lfw, [0, 2, 3, 1]) * 0.5 + 0.5 image_1 = lfw[0::2] image_2 = lfw[1::2] dist_all = [] for idx in range(len(lfw_issame)): print(idx) fc1 = model.predict(image_1[idx].reshape((1,112,112,3))) norm_fc1 = preprocessing.normalize(fc1.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) fc2 = model.predict(image_2[idx].reshape((1,112,112,3))) norm_fc2 = preprocessing.normalize(fc2.reshape((1,cfg['embd_shape'])), norm='l2', axis=1) # dist = tf.keras.losses.cosine_similarity(fc1.reshape((1,512)), fc2.reshape((1,512))) diff = np.subtract(norm_fc1, norm_fc2) dist = np.sqrt(np.sum(np.square(diff), 1))/2 dist_all.extend(dist) plt.plot(dist_all) plt.show() thresholds = np.arange(0, 1, 0.01) tpr_all = [] fpr_all = [] for thr in thresholds: tpr, fpr, acc, f1 = calculate_accuracy(thr, np.array(dist_all), lfw_issame) top_left = np.sqrt((1-tpr)**2 + fpr**2) print('thr %.4f' % thr , 'tpr %.4f' % tpr, 'fpr %.4f' % fpr, \ 'top left %.4f' % top_left, 'acc %.4f' % acc, 'f1_score %.4f'%f1) # top_left_batch.append(top_left) tpr_all.append(tpr) fpr_all.append(fpr) for threshold in thresholds: predict_issame = np.less(np.array(dist_all), threshold) conf_matrix = confusion_matrix(lfw_issame, predict_issame) print(conf_matrix) plt.figure() lw = 2 plt.plot(fpr_all, tpr_all, color='darkorange', lw=lw, label='ROC curve') plt.xlim([0.0, 1.]) plt.ylim([0.0, 1.]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.show()
def main(_): set_memory_growth() cfg = load_yaml(FLAGS.cfg_path) model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], num_classes=cfg['num_classes'], head_type=cfg['head_type'], embd_shape=cfg['embd_shape'], w_decay=cfg['w_decay'], training=True) model.summary() learning_rate = tf.constant(cfg['base_lr']) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True) loss_fn = SoftmaxLoss() ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] training from scratch.") model.compile(optimizer=optimizer, loss=loss_fn) data_path = 'data' lfw, lfw_issame = get_val_pair(data_path, 'lfw_align_112/lfw') lfw = np.transpose(lfw, [0, 2, 3, 1]) * 0.5 + 0.5 image_1 = lfw[0::2] image_2 = lfw[1::2] augment = ImgAugTransform() if FLAGS.mode == 'eager_tf': top_left_all = [0.008807] for epochs in range(cfg['epochs']): logging.info("Shuffle ms1m dataset.") dataset_len = cfg['num_samples'] steps_per_epoch = dataset_len // cfg['batch_size'] train_dataset = dataset.load_tfrecord_dataset( cfg['train_dataset'], cfg['batch_size'], cfg['binary_img'], is_ccrop=cfg['is_ccrop']) for batch, (x, y) in enumerate(train_dataset): x0_new = np.array(x[0], dtype=np.uint8) x1_new = np.array(x[1], dtype=np.float32) for i in np.arange(len(x0_new)): x0_new[i] = augment(x0_new[i]) temp = np.array(x0_new, dtype=np.float32) / 255.0 loss = model.train_on_batch(*((temp, x1_new), x1_new)) if batch % 50 == 0: verb_str = "Epoch {}/{}: {}/{}, loss={:.6f}, lr={:.6f}" print( verb_str.format( epochs, cfg['epochs'], batch, steps_per_epoch, loss, cfg['base_lr'] / (1.0 + cfg['w_decay'] * (epochs * 45489 + batch)))) if batch % cfg['save_steps'] == 0: resnet_model = tf.keras.Model( inputs=model.get_layer('resnet50').input, outputs=model.get_layer('resnet50').output) output_model = tf.keras.Model( inputs=model.get_layer('OutputLayer').input, outputs=model.get_layer('OutputLayer').output) dist_all = [] top_left_batch = [] for idx in range(0, len(lfw_issame), cfg['batch_size']): tem = resnet_model.predict( image_1[idx:idx + cfg['batch_size']]) embeds_1 = output_model.predict(tem) norm_embeds_1 = preprocessing.normalize(embeds_1, norm='l2', axis=1) tem = resnet_model.predict( image_2[idx:idx + cfg['batch_size']]) embeds_2 = output_model.predict(tem) norm_embeds_2 = preprocessing.normalize(embeds_2, norm='l2', axis=1) diff = np.subtract(norm_embeds_1, norm_embeds_2) dist = np.sqrt(np.sum(np.square(diff), 1)) / 2 dist_all.extend(dist) thresholds = np.arange(0, 1, 0.01) for thr in thresholds: tpr, fpr, _ = calculate_accuracy( thr, np.array(dist_all), lfw_issame) top_left = np.sqrt((1 - tpr)**2 + fpr**2) top_left_batch.append(top_left) print( "The current top left: {:.6f} Threshold: {:.2f}" .format(np.min(top_left_batch), 0.01 * np.argmin(top_left_batch))) if not len(top_left_all): print( "The best top left: {:.6f} Threshold: {:.2f}" .format(np.min(top_left_batch), 0.01 * np.argmin(top_left_batch))) else: print("The best top left: {:.6f}".format( top_left_all[-1])) if not len(top_left_all): top_left_all.append(np.min(top_left_batch)) print('[*] save ckpt file!') model.save_weights( 'checkpoints/{}/e_{}_b_{}.ckpt'.format( cfg['sub_name'], epochs, batch % steps_per_epoch)) elif top_left_all[-1] > np.min(top_left_batch): top_left_all.append(np.min(top_left_batch)) print('[*] save ckpt file!') model.save_weights( 'checkpoints/{}/e_{}_b_{}.ckpt'.format( cfg['sub_name'], epochs, batch % steps_per_epoch)) model.save_weights('checkpoints/train_{}/{}.ckpt'.format( cfg['sub_name'], cfg['sub_name']))