Exemple #1
0
def get_extractor(args):
    # load model
    model_path = args.model_path
    images = tf.placeholder(
        name='img_inputs',
        shape=[None, args.image_size[0], args.image_size[1], 3],
        dtype=tf.float32)
    dropout_rate = tf.placeholder(name='dropout_rate', dtype=tf.float32)

    print('Buiding net structure')
    w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)

    # test net  because of batch normal layer
    tl.layers.set_name_reuse(True)
    test_net = get_resnet(images,
                          args.net_depth,
                          type='ir',
                          w_init=w_init_method,
                          trainable=False,
                          reuse=tf.AUTO_REUSE,
                          keep_rate=dropout_rate)
    embedding_tensor = test_net.outputs
    # 3.10 define sess
    #sess = tf.Session()
    gpu_config = tf.ConfigProto(allow_soft_placement=True)
    gpu_config.gpu_options.allow_growth = True

    sess = tf.Session(config=gpu_config)
    # 3.13 init all variables
    sess.run(tf.global_variables_initializer())
    # restore weights
    saver = tf.train.Saver()
    saver.restore(sess, model_path)
    # lfw validate
    feed_dict = {images: None, dropout_rate: 1.0}

    #feed_dict_test.update(tl.utils.dict_to_one(net.all_drop))
    extractor = TensorflowExtractor(sess, embedding_tensor, args.batch_size,
                                    feed_dict, images)

    return extractor
Exemple #2
0
 # 2.2 prepare validate datasets
 ver_list = []
 ver_name_list = []
 for db in args.eval_datasets:
     print('begin db %s convert.' % db)  #列出每一個驗證dataset的path,一次迴圈一個
     data_set = load_bin(
         db, args.image_size,
         args)  #預設是lfw,image size 27行 112*112,得到dataset BGR  [112, 112, 3]
     ver_list.append(data_set)  #把驗證集加到ver_list裡
     ver_name_list.append(db)  #把驗證集path加到ver_name_list裡
 # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver
 # 3.1 inference phase
 w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
 net = get_resnet(images,
                  args.net_depth,
                  type='ir',
                  w_init=w_init_method,
                  trainable=True,
                  keep_rate=dropout_rate)
 # 3.2 get arcface loss
 logit = arcface_loss(embedding=net.outputs,
                      labels=labels,
                      w_init=w_init_method,
                      out_num=args.num_output)
 # test net  because of batch normal layer
 tl.layers.set_name_reuse(True)
 test_net = get_resnet(images,
                       args.net_depth,
                       type='ir',
                       w_init=w_init_method,
                       trainable=False,
                       reuse=True,
Exemple #3
0
        dropout_keep_rate = tf.placeholder(name='dropout_keep_rate',
                                           dtype=tf.float32)
        images_t = tf.placeholder(name='img_inputs_t',
                                  shape=[None, *args.image_size, 3],
                                  dtype=tf.float32)
        labels_t = tf.placeholder(name='img_labels_t',
                                  shape=[
                                      None,
                                  ],
                                  dtype=tf.int64)

        t_model_path = args.teacher_model_path
        w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
        net_t = get_resnet(images_t,
                           args.net_depth,
                           type='ir',
                           w_init=w_init_method,
                           trainable=False,
                           keep_rate=dropout_keep_rate)
        embedding_tensor_t = net_t.outputs

        logit_t = arcface_loss(embedding=embedding_tensor_t,
                               labels=labels_t,
                               var_scope='arcface_loss',
                               w_init=w_init_method,
                               out_num=args.num_output)
        t_tau = tf.scalar_mul(1.0 / args.tau, logit_t)
        pred_t = tf.nn.softmax(logit_t)
        acc_t = tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(pred_t, axis=1), labels_t),
                    dtype=tf.float32))
# labels = sess.graph.get_tensor_by_name("img_labels:0")
# dropout_rate = sess.graph.get_tensor_by_name("dropout_rate:0")
# output = sess.graph.get_tensor_by_name("resnet_v1_50/E_BN2/Identity_2:0")

images = tf.placeholder(name='img_inputs',
                        shape=[None, 112, 112, 3],
                        dtype=tf.float32)
labels = tf.placeholder(name='img_labels', shape=[
    None,
], dtype=tf.int64)
dropout_rate = tf.placeholder(name='dropout_rate', dtype=tf.float32)

w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
net = get_resnet(images,
                 50,
                 type='ir',
                 w_init=w_init_method,
                 trainable=False,
                 keep_rate=dropout_rate)
output = net.outputs

saver = tf.train.Saver()
saver.restore(sess, './model/InsightFace_iter_best_710000.ckpt')

embedding_array = np.zeros((278, 10, 512))
label_array = np.zeros((278, 1))
file_list = 'D:/Project/face/INSIGHT_FACE/datasets/CROP_0.95'

classes = [
    path for path in os.listdir(file_list)
    if os.path.isdir(os.path.join(file_list, path))
]
 dataset = dataset.shuffle(buffer_size=args.buffer_size)
 dataset = dataset.batch(args.batch_size)
 iterator = dataset.make_initializable_iterator()
 next_element = iterator.get_next()
 # 2.2 prepare validate datasets
 ver_list = []
 ver_name_list = []
 for db in args.eval_datasets:
     print('begin db %s convert.' % db)
     data_set = load_bin(db, args.image_size, args)
     ver_list.append(data_set)
     ver_name_list.append(db)
 # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver
 # 3.1 inference phase
 w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
 net = get_resnet(images, args.net_depth, type='ir', w_init=w_init_method, trainable=True, keep_rate=dropout_rate)
 # 3.2 get arcface loss
 logit = arcface_loss(embedding=net.outputs, labels=labels, w_init=w_init_method, out_num=args.num_output)
 # test net  because of batch normal layer
 tl.layers.set_name_reuse(True)
 test_net = get_resnet(images, args.net_depth, type='ir', w_init=w_init_method, trainable=False, reuse=True, keep_rate=dropout_rate)
 embedding_tensor = test_net.outputs
 # 3.3 define the cross entropy
 inference_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=labels))
 # inference_loss_avg = tf.reduce_mean(inference_loss)
 # 3.4 define weight deacy losses
 # for var in tf.trainable_variables():
 #     print(var.name)
 # print('##########'*30)
 wd_loss = 0
 for weights in tl.layers.get_variables_with_name('W_conv2d', True, True):