def compress(args): test_reader = None if args.data == "mnist": import paddle.dataset.mnist as reader val_reader = reader.test() class_dim = 10 image_shape = "1,28,28" elif args.data == "imagenet": import imagenet_reader as reader val_reader = reader.val() class_dim = 1000 image_shape = "3,224,224" else: raise ValueError("{} is not supported.".format(args.data)) image_shape = [int(m) for m in image_shape.split(",")] assert args.model in model_list, "{} is not in lists: {}".format( args.model, model_list) image = fluid.layers.data(name='image', shape=image_shape, dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') # model definition model = models.__dict__[args.model]() out = model.net(input=image, class_dim=class_dim) acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) val_program = fluid.default_main_program().clone(for_test=True) place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) if args.pretrained_model: def if_exist(var): return os.path.exists( os.path.join(args.pretrained_model, var.name)) fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) val_reader = paddle.batch(val_reader, batch_size=args.batch_size) val_feeder = feeder = fluid.DataFeeder( [image, label], place, program=val_program) def test(program): batch_id = 0 acc_top1_ns = [] acc_top5_ns = [] for data in val_reader(): start_time = time.time() acc_top1_n, acc_top5_n = exe.run( program, feed=val_feeder.feed(data), fetch_list=[acc_top1.name, acc_top5.name]) end_time = time.time() if batch_id % args.log_period == 0: _logger.info( "Eval batch[{}] - acc_top1: {}; acc_top5: {}; time: {}". format(batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n), end_time - start_time)) acc_top1_ns.append(np.mean(acc_top1_n)) acc_top5_ns.append(np.mean(acc_top5_n)) batch_id += 1 _logger.info("Final eva - acc_top1: {}; acc_top5: {}".format( np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns)))) return np.mean(np.array(acc_top1_ns)) params = [] for param in fluid.default_main_program().global_block().all_parameters(): if "_sep_weights" in param.name: params.append(param.name) sensitivity( val_program, place, params, test, sensitivities_file="sensitivities_0.data", pruned_ratios=[0.1, 0.2, 0.3, 0.4]) sensitivity( val_program, place, params, test, sensitivities_file="sensitivities_1.data", pruned_ratios=[0.5, 0.6, 0.7]) sens = merge_sensitive( ["./sensitivities_0.data", "./sensitivities_1.data"]) ratios = get_ratios_by_loss(sens, 0.01) print ratios
def main(): config = program.load_config(FLAGS.config) program.merge_config(FLAGS.opt) logger.info(config) # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] program.check_gpu(use_gpu) alg = config['Global']['algorithm'] assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE'] if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']: config['Global']['char_ops'] = CharacterOps(config['Global']) place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() startup_program = fluid.Program() train_program = fluid.Program() train_build_outputs = program.build(config, train_program, startup_program, mode='train') train_loader = train_build_outputs[0] train_fetch_name_list = train_build_outputs[1] train_fetch_varname_list = train_build_outputs[2] train_opt_loss_name = train_build_outputs[3] eval_program = fluid.Program() eval_build_outputs = program.build(config, eval_program, startup_program, mode='eval') eval_fetch_name_list = eval_build_outputs[1] eval_fetch_varname_list = eval_build_outputs[2] eval_program = eval_program.clone(for_test=True) train_reader = reader_main(config=config, mode="train") train_loader.set_sample_list_generator(train_reader, places=place) eval_reader = reader_main(config=config, mode="eval") exe = fluid.Executor(place) exe.run(startup_program) # compile program for multi-devices init_model(config, train_program, exe) sen = load_sensitivities("sensitivities_0.data") for i in skip_list: if i in sen.keys(): sen.pop(i) back_bone_list = ['conv' + str(x) for x in range(1, 5)] for i in back_bone_list: for key in list(sen.keys()): if i + '_' in key: sen.pop(key) ratios = get_ratios_by_loss(sen, 0.03) logger.info("FLOPs before pruning: {}".format(flops(eval_program))) pruner = Pruner(criterion='geometry_median') print("ratios: {}".format(ratios)) pruned_val_program, _, _ = pruner.prune(eval_program, fluid.global_scope(), params=ratios.keys(), ratios=ratios.values(), place=place, only_graph=True) pruned_program, _, _ = pruner.prune(train_program, fluid.global_scope(), params=ratios.keys(), ratios=ratios.values(), place=place) logger.info("FLOPs after pruning: {}".format(flops(pruned_val_program))) train_compile_program = program.create_multi_devices_program( pruned_program, train_opt_loss_name) train_info_dict = {'compile_program':train_compile_program,\ 'train_program':pruned_program,\ 'reader':train_loader,\ 'fetch_name_list':train_fetch_name_list,\ 'fetch_varname_list':train_fetch_varname_list} eval_info_dict = {'program':pruned_val_program,\ 'reader':eval_reader,\ 'fetch_name_list':eval_fetch_name_list,\ 'fetch_varname_list':eval_fetch_varname_list} if alg in ['EAST', 'DB']: program.train_eval_det_run(config, exe, train_info_dict, eval_info_dict, is_slim="prune") else: program.train_eval_rec_run(config, exe, train_info_dict, eval_info_dict)
def compress(args): test_reader = None if args.data == "mnist": val_dataset = paddle.vision.datasets.MNIST(mode='test') class_dim = 10 image_shape = "1,28,28" elif args.data == "imagenet": import imagenet_reader as reader val_dataset = reader.ImageNetDataset(mode='val') class_dim = 1000 image_shape = "3,224,224" else: raise ValueError("{} is not supported.".format(args.data)) image_shape = [int(m) for m in image_shape.split(",")] assert args.model in model_list, "{} is not in lists: {}".format( args.model, model_list) image = paddle.static.data(name='image', shape=[None] + image_shape, dtype='float32') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') # model definition model = models.__dict__[args.model]() out = model.net(input=image, class_dim=class_dim) acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1) acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5) val_program = paddle.static.default_main_program().clone(for_test=True) places = paddle.static.cuda_places( ) if args.use_gpu else paddle.static.cpu_places() place = places[0] exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) if args.pretrained_model: def if_exist(var): return os.path.exists(os.path.join(args.pretrained_model, var.name)) paddle.fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist) valid_loader = paddle.io.DataLoader(val_dataset, places=place, feed_list=[image, label], drop_last=False, batch_size=args.batch_size, use_shared_memory=True, shuffle=False) def test(program): acc_top1_ns = [] acc_top5_ns = [] for batch_id, data in enumerate(valid_loader): start_time = time.time() acc_top1_n, acc_top5_n = exe.run( program, feed=data, fetch_list=[acc_top1.name, acc_top5.name]) end_time = time.time() if batch_id % args.log_period == 0: _logger.info( "Eval batch[{}] - acc_top1: {}; acc_top5: {}; time: {}". format(batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n), end_time - start_time)) acc_top1_ns.append(np.mean(acc_top1_n)) acc_top5_ns.append(np.mean(acc_top5_n)) batch_id += 1 _logger.info("Final eva - acc_top1: {}; acc_top5: {}".format( np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns)))) return np.mean(np.array(acc_top1_ns)) params = [] for param in paddle.static.default_main_program().global_block( ).all_parameters(): if "weights" in param.name: print(param.name) params.append(param.name) sensitivity(val_program, place, params, test, sensitivities_file="sensitivities_0.data", pruned_ratios=[0.1, 0.2, 0.3, 0.4]) sensitivity(val_program, place, params, test, sensitivities_file="sensitivities_1.data", pruned_ratios=[0.5, 0.6, 0.7]) sens = merge_sensitive( ["./sensitivities_0.data", "./sensitivities_1.data"]) ratios = get_ratios_by_loss(sens, 0.01) print(sens)
def test_sensitivity(self): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): input = fluid.data(name="image", shape=[None, 1, 28, 28]) label = fluid.data(name="label", shape=[None, 1], dtype="int64") conv1 = conv_bn_layer(input, 8, 3, "conv1") conv2 = conv_bn_layer(conv1, 8, 3, "conv2") sum1 = conv1 + conv2 conv3 = conv_bn_layer(sum1, 8, 3, "conv3") conv4 = conv_bn_layer(conv3, 8, 3, "conv4") sum2 = conv4 + sum1 conv5 = conv_bn_layer(sum2, 8, 3, "conv5") conv6 = conv_bn_layer(conv5, 8, 3, "conv6") out = fluid.layers.fc(conv6, size=10, act='softmax') acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) eval_program = main_program.clone(for_test=True) place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(startup_program) val_reader = paddle.fluid.io.batch( paddle.dataset.mnist.test(), batch_size=128) def eval_func(program): feeder = fluid.DataFeeder( feed_list=['image', 'label'], place=place, program=program) acc_set = [] for data in val_reader(): acc_np = exe.run(program=program, feed=feeder.feed(data), fetch_list=[acc_top1]) acc_set.append(float(acc_np[0])) acc_val_mean = numpy.array(acc_set).mean() print("acc_val_mean: {}".format(acc_val_mean)) return acc_val_mean def eval_func_for_args(args): program = args[0] feeder = fluid.DataFeeder( feed_list=['image', 'label'], place=place, program=program) acc_set = [] for data in val_reader(): acc_np = exe.run(program=program, feed=feeder.feed(data), fetch_list=[acc_top1]) acc_set.append(float(acc_np[0])) acc_val_mean = numpy.array(acc_set).mean() print("acc_val_mean: {}".format(acc_val_mean)) return acc_val_mean sensitivity( eval_program, place, ["conv4_weights"], eval_func, sensitivities_file="./sensitivities_file_0", pruned_ratios=[0.1, 0.2]) sensitivity( eval_program, place, ["conv4_weights"], eval_func, sensitivities_file="./sensitivities_file_1", pruned_ratios=[0.3, 0.4]) params_sens = sensitivity( eval_program, place, ["conv4_weights"], eval_func_for_args, eval_args=[eval_program], sensitivities_file="./sensitivites_file_params", pruned_ratios=[0.1, 0.2, 0.3, 0.4]) sens_0 = load_sensitivities('./sensitivities_file_0') sens_1 = load_sensitivities('./sensitivities_file_1') sens = merge_sensitive([sens_0, sens_1]) origin_sens = sensitivity( eval_program, place, ["conv4_weights"], eval_func, sensitivities_file="./sensitivities_file_2", pruned_ratios=[0.1, 0.2, 0.3, 0.4]) self.assertTrue(params_sens == origin_sens) self.assertTrue(sens == origin_sens) loss = 0.0 ratios = get_ratios_by_loss(sens, loss) self.assertTrue(len(ratios) == len(sens)) loss = min(list(sens.get('conv4_weights').values())) - 0.01 ratios = get_ratios_by_loss(sens, loss) self.assertTrue(len(ratios) == len(sens))