def main(): # build train program train_build_outputs = program.build( config, train_program, startup_program, mode='train') train_loader = train_build_outputs[0] train_fetch_name_list = train_build_outputs[1] train_fetch_varname_list = train_build_outputs[2] train_opt_loss_name = train_build_outputs[3] model_average = train_build_outputs[-1] # build eval program eval_program = fluid.Program() eval_build_outputs = program.build( config, eval_program, startup_program, mode='eval') eval_fetch_name_list = eval_build_outputs[1] eval_fetch_varname_list = eval_build_outputs[2] eval_program = eval_program.clone(for_test=True) # initialize train reader train_reader = reader_main(config=config, mode="train") train_loader.set_sample_list_generator(train_reader, places=place) # initialize eval reader eval_reader = reader_main(config=config, mode="eval") exe = fluid.Executor(place) exe.run(startup_program) # compile program for multi-devices train_compile_program = program.create_multi_devices_program( train_program, train_opt_loss_name) # dump mode structure if config['Global']['debug']: if train_alg_type == 'rec' and 'attention' in config['Global'][ 'loss_type']: logger.warning('Does not suport dump attention...') else: summary(train_program) init_model(config, train_program, exe) train_info_dict = {'compile_program':train_compile_program,\ 'train_program':train_program,\ 'reader':train_loader,\ 'fetch_name_list':train_fetch_name_list,\ 'fetch_varname_list':train_fetch_varname_list,\ 'model_average': model_average} eval_info_dict = {'program':eval_program,\ 'reader':eval_reader,\ 'fetch_name_list':eval_fetch_name_list,\ 'fetch_varname_list':eval_fetch_varname_list} if train_alg_type == 'det': program.train_eval_det_run(config, exe, train_info_dict, eval_info_dict) elif train_alg_type == 'rec': program.train_eval_rec_run(config, exe, train_info_dict, eval_info_dict) else: program.train_eval_cls_run(config, exe, train_info_dict, eval_info_dict)
def main(): # Run code with static graph mode. try: paddle.enable_static() except: pass config = program.load_config(FLAGS.config) program.merge_config(FLAGS.opt) logger.info(config) # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] program.check_gpu(use_gpu) alg = config['Global']['algorithm'] assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE'] if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']: config['Global']['char_ops'] = CharacterOps(config['Global']) place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() startup_prog = fluid.Program() eval_program = fluid.Program() eval_build_outputs = program.build(config, eval_program, startup_prog, mode='test') eval_fetch_name_list = eval_build_outputs[1] eval_fetch_varname_list = eval_build_outputs[2] eval_program = eval_program.clone(for_test=True) exe = fluid.Executor(place) exe.run(startup_prog) init_model(config, eval_program, exe) eval_reader = reader_main(config=config, mode="eval") eval_info_dict = {'program':eval_program,\ 'reader':eval_reader,\ 'fetch_name_list':eval_fetch_name_list,\ 'fetch_varname_list':eval_fetch_varname_list} eval_args = dict() eval_args = { 'exe': exe, 'config': config, 'eval_info_dict': eval_info_dict } metrics = eval_function(eval_args) print("Baseline: {}".format(metrics)) params = get_pruned_params(eval_program) print('Start to analyze') sens_0 = slim.prune.sensitivity( eval_program, place, params, eval_function, sensitivities_file="sensitivities_0.data", pruned_ratios=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], eval_args=eval_args, criterion='geometry_median')
def main(): config = program.load_config(FLAGS.config) program.merge_config(FLAGS.opt) logger.info(config) # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] program.check_gpu(use_gpu) alg = config['Global']['algorithm'] assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE'] if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']: config['Global']['char_ops'] = CharacterOps(config['Global']) place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() startup_program = fluid.Program() train_program = fluid.Program() train_build_outputs = program.build(config, train_program, startup_program, mode='train') train_loader = train_build_outputs[0] train_fetch_name_list = train_build_outputs[1] train_fetch_varname_list = train_build_outputs[2] train_opt_loss_name = train_build_outputs[3] eval_program = fluid.Program() eval_build_outputs = program.build(config, eval_program, startup_program, mode='eval') eval_fetch_name_list = eval_build_outputs[1] eval_fetch_varname_list = eval_build_outputs[2] eval_program = eval_program.clone(for_test=True) train_reader = reader_main(config=config, mode="train") train_loader.set_sample_list_generator(train_reader, places=place) eval_reader = reader_main(config=config, mode="eval") exe = fluid.Executor(place) exe.run(startup_program) # compile program for multi-devices train_compile_program = program.create_multi_devices_program( train_program, train_opt_loss_name) # dump mode structure if config['Global']['debug']: if 'attention' in config['Global']['loss_type']: logger.warning('Does not suport dump attention...') else: summary(train_program) init_model(config, train_program, exe) train_info_dict = {'compile_program':train_compile_program,\ 'train_program':train_program,\ 'reader':train_loader,\ 'fetch_name_list':train_fetch_name_list,\ 'fetch_varname_list':train_fetch_varname_list} eval_info_dict = {'program':eval_program,\ 'reader':eval_reader,\ 'fetch_name_list':eval_fetch_name_list,\ 'fetch_varname_list':eval_fetch_varname_list} if alg in ['EAST', 'DB']: program.train_eval_det_run(config, exe, train_info_dict, eval_info_dict) else: program.train_eval_rec_run(config, exe, train_info_dict, eval_info_dict)
def main(): train_build_outputs = program.build(config, train_program, startup_program, mode='train') train_loader = train_build_outputs[0] train_fetch_name_list = train_build_outputs[1] train_fetch_varname_list = train_build_outputs[2] train_opt_loss_name = train_build_outputs[3] model_average = train_build_outputs[-1] eval_program = fluid.Program() eval_build_outputs = program.build(config, eval_program, startup_program, mode='eval') eval_fetch_name_list = eval_build_outputs[1] eval_fetch_varname_list = eval_build_outputs[2] eval_program = eval_program.clone(for_test=True) train_reader = reader_main(config=config, mode="train") train_loader.set_sample_list_generator(train_reader, places=place) eval_reader = reader_main(config=config, mode="eval") exe = fluid.Executor(place) exe.run(startup_program) # 1. quantization configs quant_config = { # weight quantize type, default is 'channel_wise_abs_max' 'weight_quantize_type': 'channel_wise_abs_max', # activation quantize type, default is 'moving_average_abs_max' 'activation_quantize_type': 'moving_average_abs_max', # weight quantize bit num, default is 8 'weight_bits': 8, # activation quantize bit num, default is 8 'activation_bits': 8, # ops of name_scope in not_quant_pattern list, will not be quantized 'not_quant_pattern': ['skip_quant'], # ops of type in quantize_op_types, will be quantized 'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'], # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8' 'dtype': 'int8', # window size for 'range_abs_max' quantization. defaulf is 10000 'window_size': 10000, # The decay coefficient of moving average, default is 0.9 'moving_rate': 0.9, } # 2. quantization transform programs (training aware) # Make some quantization transforms in the graph before training and testing. # According to the weight and activation quantization type, the graph will be added # some fake quantize operators and fake dequantize operators. act_preprocess_func = pact optimizer_func = get_optimizer executor = exe eval_program = quant_aware(eval_program, place, quant_config, scope=None, act_preprocess_func=act_preprocess_func, optimizer_func=optimizer_func, executor=executor, for_test=True) quant_train_program = quant_aware(train_program, place, quant_config, scope=None, act_preprocess_func=act_preprocess_func, optimizer_func=optimizer_func, executor=executor, for_test=False) # compile program for multi-devices train_compile_program = program.create_multi_devices_program( quant_train_program, train_opt_loss_name, for_quant=True) init_model(config, train_program, exe) train_info_dict = {'compile_program':train_compile_program,\ 'train_program':quant_train_program,\ 'reader':train_loader,\ 'fetch_name_list':train_fetch_name_list,\ 'fetch_varname_list':train_fetch_varname_list,\ 'model_average': model_average} eval_info_dict = {'program':eval_program,\ 'reader':eval_reader,\ 'fetch_name_list':eval_fetch_name_list,\ 'fetch_varname_list':eval_fetch_varname_list} if train_alg_type == 'det': program.train_eval_det_run(config, exe, train_info_dict, eval_info_dict, is_slim="quant") elif train_alg_type == 'rec': program.train_eval_rec_run(config, exe, train_info_dict, eval_info_dict, is_slim="quant") else: program.train_eval_cls_run(config, exe, train_info_dict, eval_info_dict, is_slim="quant")
def main(): config = program.load_config(FLAGS.config) program.merge_config(FLAGS.opt) logger.info(config) # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] program.check_gpu(use_gpu) alg = config['Global']['algorithm'] assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE'] if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']: config['Global']['char_ops'] = CharacterOps(config['Global']) place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() startup_program = fluid.Program() train_program = fluid.Program() train_build_outputs = program.build(config, train_program, startup_program, mode='train') train_loader = train_build_outputs[0] train_fetch_name_list = train_build_outputs[1] train_fetch_varname_list = train_build_outputs[2] train_opt_loss_name = train_build_outputs[3] eval_program = fluid.Program() eval_build_outputs = program.build(config, eval_program, startup_program, mode='eval') eval_fetch_name_list = eval_build_outputs[1] eval_fetch_varname_list = eval_build_outputs[2] eval_program = eval_program.clone(for_test=True) train_reader = reader_main(config=config, mode="train") train_loader.set_sample_list_generator(train_reader, places=place) eval_reader = reader_main(config=config, mode="eval") exe = fluid.Executor(place) exe.run(startup_program) # compile program for multi-devices init_model(config, train_program, exe) sen = load_sensitivities("sensitivities_0.data") for i in skip_list: if i in sen.keys(): sen.pop(i) back_bone_list = ['conv' + str(x) for x in range(1, 5)] for i in back_bone_list: for key in list(sen.keys()): if i + '_' in key: sen.pop(key) ratios = get_ratios_by_loss(sen, 0.03) logger.info("FLOPs before pruning: {}".format(flops(eval_program))) pruner = Pruner(criterion='geometry_median') print("ratios: {}".format(ratios)) pruned_val_program, _, _ = pruner.prune(eval_program, fluid.global_scope(), params=ratios.keys(), ratios=ratios.values(), place=place, only_graph=True) pruned_program, _, _ = pruner.prune(train_program, fluid.global_scope(), params=ratios.keys(), ratios=ratios.values(), place=place) logger.info("FLOPs after pruning: {}".format(flops(pruned_val_program))) train_compile_program = program.create_multi_devices_program( pruned_program, train_opt_loss_name) train_info_dict = {'compile_program':train_compile_program,\ 'train_program':pruned_program,\ 'reader':train_loader,\ 'fetch_name_list':train_fetch_name_list,\ 'fetch_varname_list':train_fetch_varname_list} eval_info_dict = {'program':pruned_val_program,\ 'reader':eval_reader,\ 'fetch_name_list':eval_fetch_name_list,\ 'fetch_varname_list':eval_fetch_varname_list} if alg in ['EAST', 'DB']: program.train_eval_det_run(config, exe, train_info_dict, eval_info_dict, is_slim="prune") else: program.train_eval_rec_run(config, exe, train_info_dict, eval_info_dict)