예제 #1
0
def quant_val_reader_batch():
    nl, nr, flods, flags = parse_filelist(args.test_data_dir)
    test_dataset = LFW(nl, nr)
    test_reader = fluid.io.batch(
        test_dataset.reader, batch_size=1, drop_last=False)
    shuffle_reader = fluid.io.shuffle(test_reader, 3)

    def _reader():
        while True:
            for idx, data in enumerate(shuffle_reader()):
                yield np.expand_dims(data[0][0], axis=0)

    return _reader
예제 #2
0
def main():
    global args
    parser = argparse.ArgumentParser(description='PaddlePaddle SlimFaceNet')
    parser.add_argument(
        '--action', default='train', type=str, help='train/test/quant')
    parser.add_argument(
        '--model',
        default='SlimFaceNet_B_x0_75',
        type=str,
        help='SlimFaceNet_B_x0_75/SlimFaceNet_C_x0_75/SlimFaceNet_A_x0_60')
    parser.add_argument(
        '--use_gpu', default=1, type=int, help='Use GPU or not, 0 is not used')
    parser.add_argument(
        '--lr_strategy',
        default='piecewise_decay',
        type=str,
        help='lr_strategy')
    parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
    parser.add_argument(
        '--lr_list',
        default='0.1,0.01,0.001,0.0001',
        type=str,
        help='learning rate list (piecewise_decay)')
    parser.add_argument(
        '--lr_steps',
        default='36,52,58',
        type=str,
        help='learning rate decay at which epochs')
    parser.add_argument(
        '--l2_decay', default=4e-5, type=float, help='base l2_decay')
    parser.add_argument(
        '--train_data_dir', default='./CASIA', type=str, help='train_data_dir')
    parser.add_argument(
        '--test_data_dir', default='./lfw', type=str, help='lfw_data_dir')
    parser.add_argument(
        '--train_batchsize', default=512, type=int, help='train_batchsize')
    parser.add_argument(
        '--test_batchsize', default=500, type=int, help='test_batchsize')
    parser.add_argument(
        '--img_shape', default='3,112,96', type=str, help='img_shape')
    parser.add_argument(
        '--start_epoch', default=0, type=int, help='start_epoch')
    parser.add_argument(
        '--total_epoch', default=80, type=int, help='total_epoch')
    parser.add_argument(
        '--save_frequency', default=1, type=int, help='save_frequency')
    parser.add_argument(
        '--save_ckpt', default='output', type=str, help='save_ckpt')
    parser.add_argument(
        '--feature_save_dir',
        default='result.mat',
        type=str,
        help='The path of the extract features save, must be .mat file')
    args = parser.parse_args()

    if args.use_gpu:
        num_trainers = fluid.core.get_cuda_device_count()
    else:
        num_trainers = int(os.environ.get('CPU_NUM', 1))
    print(args)
    print('num_trainers: {}'.format(num_trainers))
    if args.save_ckpt == None:
        args.save_ckpt = 'output'
    if not os.path.isdir(args.save_ckpt):
        os.makedirs(args.save_ckpt)
    with open(os.path.join(args.save_ckpt, 'log.txt'), 'w+') as f:
        f.writelines(str(args) + '\n')
        f.writelines('num_trainers: {}'.format(num_trainers) + '\n')

    if args.action == 'train':
        train_program = fluid.Program()
    test_program = fluid.Program()
    startup_program = fluid.Program()

    if args.action == 'train':
        train_out = build_program(train_program, startup_program, args, True)
    test_out = build_program(test_program, startup_program, args, False)
    test_program = test_program.clone(for_test=True)
    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_program)

    if args.action == 'train':
        train(exe, train_program, train_out, test_program, test_out, args)
    elif args.action == 'quant':
        quant_post_static(
            executor=exe,
            model_dir='./out_inference/',
            quantize_model_path='./quant_model/',
            sample_generator=quant_val_reader_batch(),
            model_filename=None,  #'model',
            params_filename=None,  #'params',
            save_model_filename=None,  #'model',
            save_params_filename=None,  #'params',
            batch_size=np.random.randint(80, 160),
            batch_nums=np.random.randint(4, 10))
    elif args.action == 'test':
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(
             dirname='./quant_model/',
             model_filename=None,
             params_filename=None,
             executor=exe)
        nl, nr, flods, flags = parse_filelist(args.test_data_dir)
        test_dataset = LFW(nl, nr)
        test_reader = fluid.io.batch(
            test_dataset.reader,
            batch_size=args.test_batchsize,
            drop_last=False)
        image_test = fluid.data(
            name='image_test', shape=[-1, 3, 112, 96], dtype='float32')
        image_test1 = fluid.data(
            name='image_test1', shape=[-1, 3, 112, 96], dtype='float32')
        image_test2 = fluid.data(
            name='image_test2', shape=[-1, 3, 112, 96], dtype='float32')
        image_test3 = fluid.data(
            name='image_test3', shape=[-1, 3, 112, 96], dtype='float32')
        image_test4 = fluid.data(
            name='image_test4', shape=[-1, 3, 112, 96], dtype='float32')
        reader = fluid.io.DataLoader.from_generator(
            feed_list=[image_test1, image_test2, image_test3, image_test4],
            capacity=64,
            iterable=True,
            return_list=False)
        reader.set_sample_list_generator(
            test_reader,
            places=fluid.cuda_places() if args.use_gpu else fluid.CPUPlace())
        test_out = (fetch_targets, reader, flods, flags)
        print('fetch_targets[0]: ', fetch_targets[0])
        print('feed_target_names: ', feed_target_names)
        test(exe, inference_program, test_out, args)
    else:
        print('WRONG ACTION')
예제 #3
0
def build_program(program, startup, args, is_train=True):
    if args.use_gpu:
        num_trainers = fluid.core.get_cuda_device_count()
    else:
        num_trainers = int(os.environ.get('CPU_NUM', 1))
    places = fluid.cuda_places() if args.use_gpu else fluid.CPUPlace()

    train_dataset = CASIA_Face(root=args.train_data_dir)
    trainset_scale = len(train_dataset)

    with fluid.program_guard(main_program=program, startup_program=startup):
        with fluid.unique_name.guard():
            # Model construction
            model = models.__dict__[args.model](
                class_dim=train_dataset.class_nums)

            if is_train:
                image = fluid.data(
                    name='image', shape=[-1, 3, 112, 96], dtype='float32')
                label = fluid.data(name='label', shape=[-1, 1], dtype='int64')
                train_reader = fluid.io.batch(
                    train_dataset.reader,
                    batch_size=args.train_batchsize // num_trainers,
                    drop_last=False)
                reader = fluid.io.DataLoader.from_generator(
                    feed_list=[image, label],
                    capacity=64,
                    iterable=True,
                    return_list=False)
                reader.set_sample_list_generator(train_reader, places=places)

                model.extract_feature = False
                loss, acc = model.net(image, label)
                optimizer = creat_optimizer(args, trainset_scale)
                optimizer.minimize(loss)
                global_lr = optimizer._global_learning_rate()
                out = (loss, acc, global_lr, reader)

            else:
                nl, nr, flods, flags = parse_filelist(args.test_data_dir)
                test_dataset = LFW(nl, nr)
                test_reader = fluid.io.batch(
                    test_dataset.reader,
                    batch_size=args.test_batchsize,
                    drop_last=False)
                image_test = fluid.data(
                    name='image_test', shape=[-1, 3, 112, 96], dtype='float32')
                image_test1 = fluid.data(
                    name='image_test1',
                    shape=[-1, 3, 112, 96],
                    dtype='float32')
                image_test2 = fluid.data(
                    name='image_test2',
                    shape=[-1, 3, 112, 96],
                    dtype='float32')
                image_test3 = fluid.data(
                    name='image_test3',
                    shape=[-1, 3, 112, 96],
                    dtype='float32')
                image_test4 = fluid.data(
                    name='image_test4',
                    shape=[-1, 3, 112, 96],
                    dtype='float32')
                reader = fluid.io.DataLoader.from_generator(
                    feed_list=[
                        image_test1, image_test2, image_test3, image_test4
                    ],
                    capacity=64,
                    iterable=True,
                    return_list=False)
                reader.set_sample_list_generator(
                    test_reader,
                    places=fluid.cuda_places()
                    if args.use_gpu else fluid.CPUPlace())

                model.extract_feature = True
                feature = model.net(image_test)
                out = (feature, reader, flods, flags)

            return out
예제 #4
0
    parser.add_argument(
        '--test_data_dir', default='./lfw', type=str, help='lfw_data_dir')
    parser.add_argument(
        '--resume', default='output/0', type=str, help='resume')
    parser.add_argument(
        '--feature_save_dir',
        default='result.mat',
        type=str,
        help='The path of the extract features save, must be .mat file')
    args = parser.parse_args()

    place = fluid.CPUPlace() if args.use_gpu == 0 else fluid.CUDAPlace(0)
    with fluid.dygraph.guard(place):
        train_dataset = CASIA_Face(root=args.train_data_dir)
        nl, nr, flods, flags = parse_filelist(args.test_data_dir)
        test_dataset = LFW(nl, nr)
        test_reader = paddle.batch(
            test_dataset.reader,
            batch_size=args.test_batchsize,
            drop_last=False)

        net = models.__dict__[args.model](class_dim=train_dataset.class_nums)
        if args.resume:
            assert os.path.exists(args.resume + ".pdparams"
                                  ), "Given dir {}.pdparams not exist.".format(
                                      args.resume)
            para_dict, opti_dict = fluid.dygraph.load_dygraph(args.resume)
            net.set_dict(para_dict)

        test(test_reader, flods, flags, net, args)