Esempio n. 1
0
 )  ##這個函數是用來讀TFRecord文件的,dataset中的每一個元素就是一個TFExample,裡面放filename,是string A Dataset comprising records from one or more TFRecord files.
 dataset = dataset.map(
     parse_function
 )  ##parse the whole batch at once. Apply this function to each item in the dataset using the tf.data.Dataset.map method:處理 string,將 string 轉化為 tf.Tensor 物件
 dataset = dataset.shuffle(buffer_size=args.buffer_size)  ##打亂
 dataset = dataset.batch(args.batch_size)  ##設定batch
 iterator = dataset.make_initializable_iterator(
 )  ##構建 iterator。Initializable iterator 要求在使用之前顯式的通過呼叫iterator.initializer操作初始化,這使得在定義資料集時可以結合tf.placeholder傳入引數
 next_element = iterator.get_next()
 # 2.2 prepare validate datasets
 ver_list = []
 ver_name_list = []
 for db in args.eval_datasets:
     print('begin db %s convert.' % db)  #列出每一個驗證dataset的path,一次迴圈一個
     data_set = load_bin(
         db, args.image_size,
         args)  #預設是lfw,image size 27行 112*112,得到dataset BGR  [112, 112, 3]
     ver_list.append(data_set)  #把驗證集加到ver_list裡
     ver_name_list.append(db)  #把驗證集path加到ver_name_list裡
 # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver
 # 3.1 inference phase
 w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
 net = get_resnet(images,
                  args.net_depth,
                  type='ir',
                  w_init=w_init_method,
                  trainable=True,
                  keep_rate=dropout_rate)
 # 3.2 get arcface loss
 logit = arcface_loss(embedding=net.outputs,
                      labels=labels,
Esempio n. 2
0
 tfrecords_f = os.path.join(args.tfrecords_file_path, 'tran.tfrecords')
 dataset = tf.data.TFRecordDataset(tfrecords_f)
 dataset = dataset.map(parse_function).shuffle(
     buffer_size=args.buffer_size).batch(args.batch_size).prefetch(
         args.batch_size)
 # dataset = dataset.map(parse_function)
 # dataset = dataset.shuffle(buffer_size=args.buffer_size)
 # dataset = dataset.batch(args.batch_size)
 iterator = dataset.make_initializable_iterator()
 next_element = iterator.get_next()
 # 2.2 prepare validate datasets
 ver_list = []
 ver_name_list = []
 for db in args.eval_datasets:
     print('begin db %s convert.' % db)
     data_set = load_bin(db, args.image_size, args)
     ver_list.append(data_set)
     ver_name_list.append(db)
 # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver
 # 3.1 inference phase
 w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
 net = get_resnet(images,
                  args.net_depth,
                  type='ir',
                  w_init=w_init_method,
                  trainable=True,
                  keep_rate=dropout_rate)
 # 3.2 get arcface loss
 logit = arcface_loss(embedding=net.outputs,
                      labels=labels,
                      w_init=w_init_method,
    # 2.1 train datasets
    # the image is substracted 127.5 and multiplied 1/128.
    # random flip left right
    tfrecords_f = os.path.join(args.tfrecords_file_path, 'tran.tfrecords')
    dataset = tf.data.TFRecordDataset(tfrecords_f)
    dataset = dataset.map(parse_function)
    dataset = dataset.shuffle(buffer_size=args.buffer_size)
    dataset = dataset.batch(args.batch_size)
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()
    # 2.2 prepare validate datasets
    ver_list = []
    ver_name_list = []
    for db in args.eval_datasets:
        print('begin db %s convert.' % db)
        data_set = load_bin(db, args.image_size, args)
        ver_list.append(data_set)
        ver_name_list.append(db)
    # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver
    # 3.1 inference phase
    w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
    # 3.2 define the learning rate schedule
    p = int(512.0/args.batch_size)
    lr_steps = [p*val for val in args.lr_steps]
    print('learning rate steps: ', lr_steps)
    lr = tf.train.piecewise_constant(global_step, boundaries=lr_steps, values=[0.001, 0.0005, 0.0003, 0.0001],
                                     name='lr_schedule')
    # 3.3 define the optimize method
    opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=args.momentum)

    # Calculate the gradients for each model tower.
Esempio n. 4
0
     )  # map,parse_function函数对每一个图进行处理,bgr位置转换,标准化,随机数据增强
 else:
     dataset = dataset.map(folder_parse_function)
 dataset = dataset.shuffle(buffer_size=buffer_size)  # shuffle
 dataset = dataset.batch(batch_size)  # ((?, 112, 112, 3), (?,))
 iterator = dataset.make_initializable_iterator(
 )  # ((?, 112, 112, 3), (?,))
 next_element = iterator.get_next(
 )  # <class 'tuple'>: (<tf.Tensor 'IteratorGetNext:0' shape=(?, 112, 112, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(?,) dtype=int64>)
 # 2.2 prepare validate datasets
 ver_list = []
 ver_name_list = []
 for db in eval_datasets:
     print('begin db %s convert.' % db)
     data_set = load_bin(
         db, image_size, eval_db_path
     )  # [正(12000, 112, 112, 3), 反(12000, 112, 112, 3)], 6000
     ver_list.append(data_set)
     ver_name_list.append(db)
 # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver
 # 3.1 inference phase
 w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
 net = get_resnet(images,
                  net_depth,
                  type='ir',
                  w_init=w_init_method,
                  trainable=True,
                  keep_rate=dropout_rate)
 # 3.2 get arcface loss
 logit = arcface_loss(embedding=net.outputs,
                      labels=labels,
Esempio n. 5
0
    parser.add_argument('--ckpt_index_list',
                        default=['1950000.ckpt'],
                        help='ckpt file indexes')

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = get_args()
    ver_list = []
    ver_name_list = []
    for db in args.eval_datasets:
        print('begin db %s convert.' % db)
        data_set = load_bin(
            db, args.image_size, args.eval_db_path
        )  # (data_list, issame_list),len(data_list)=2 ,data_list[0].shape=(12000, 112, 112, 3), len(issame_list) = 6000
        ver_list.append(data_set)
        ver_name_list.append(db)

    images = tf.placeholder(name='img_inputs',
                            shape=[None, *args.image_size, 3],
                            dtype=tf.float32)  # (?, 112, 112, 3)
    labels = tf.placeholder(name='img_labels', shape=[
        None,
    ], dtype=tf.int64)  # (?, )
    dropout_rate = tf.placeholder(name='dropout_rate', dtype=tf.float32)

    w_init_method = tf.contrib.layers.xavier_initializer(
        uniform=False)  # 随机初始化权重先把空架子搭起来,后续再往里面restore train好的权重
    net = get_resnet(
Esempio n. 6
0
 ]:
     dataset = dataset.map(
         raw_parse_function
     )  # map,parse_function函数对每一个图进行处理,bgr位置转换,标准化,随机数据增强
 else:
     dataset = dataset.map(folder_parse_function)
 dataset = dataset.shuffle(buffer_size=buffer_size)
 dataset = dataset.batch(batch_size)
 iterator = dataset.make_initializable_iterator()
 next_element = iterator.get_next()
 # 2.2 prepare validate datasets
 ver_list = []
 ver_name_list = []
 for db in eval_datasets:
     print('begin db %s convert.' % db)
     data_set = load_bin(db, image_size, eval_db_path)
     ver_list.append(data_set)
     ver_name_list.append(db)
 # 3. define network, loss, optimize method, learning rate schedule, summary writer, saver
 # 3.1 inference phase
 w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)
 # 3.2 define the learning rate schedule
 p = int(512.0 / batch_size)
 lr_steps = [p * val for val in lr_steps]
 print('learning rate steps: ', lr_steps)
 lr = tf.train.piecewise_constant(global_step,
                                  boundaries=lr_steps,
                                  values=lr_values,
                                  name='lr_schedule')
 # 3.3 define the optimize method
 opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=momentum)