Пример #1
0
def main():
    # Run code with static graph mode.
    try:
        paddle.enable_static()
    except:
        pass

    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    program.check_gpu(use_gpu)

    alg = config['Global']['algorithm']
    assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
    if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
        config['Global']['char_ops'] = CharacterOps(config['Global'])

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    startup_prog = fluid.Program()
    eval_program = fluid.Program()
    eval_build_outputs = program.build(config,
                                       eval_program,
                                       startup_prog,
                                       mode='test')
    eval_fetch_name_list = eval_build_outputs[1]
    eval_fetch_varname_list = eval_build_outputs[2]
    eval_program = eval_program.clone(for_test=True)
    exe = fluid.Executor(place)
    exe.run(startup_prog)

    init_model(config, eval_program, exe)

    eval_reader = reader_main(config=config, mode="eval")
    eval_info_dict = {'program':eval_program,\
        'reader':eval_reader,\
        'fetch_name_list':eval_fetch_name_list,\
        'fetch_varname_list':eval_fetch_varname_list}
    eval_args = dict()
    eval_args = {
        'exe': exe,
        'config': config,
        'eval_info_dict': eval_info_dict
    }
    metrics = eval_function(eval_args)
    print("Baseline: {}".format(metrics))

    params = get_pruned_params(eval_program)
    print('Start to analyze')
    sens_0 = slim.prune.sensitivity(
        eval_program,
        place,
        params,
        eval_function,
        sensitivities_file="sensitivities_0.data",
        pruned_ratios=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
        eval_args=eval_args,
        criterion='geometry_median')
Пример #2
0
def main():
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config['PostProcess'],
                                            config['Global'])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, 'character'):
        char_num = len(getattr(post_process_class, 'character'))
        config['Architecture']["Head"]['out_channels'] = char_num
    model = build_model(config['Architecture'])
    init_model(config, model, logger)
    model.eval()

    save_path = '{}/inference'.format(config['Global']['save_inference_dir'])

    infer_shape = [3, int(FLAGS.height), int(FLAGS.width)] 
    if config['Architecture']['model_type'] == "rec":
         infer_shape = [3, 32, -1]

    model = to_static(
        model,
        input_spec=[
            paddle.static.InputSpec(
                shape=[None] + infer_shape, dtype='float32')
        ])
    paddle.jit.save(model, save_path)
    logger.info('inference model is saved to {}'.format(save_path))
Пример #3
0
def main():
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config['PostProcess'],
                                            config['Global'])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, 'character'):
        char_num = len(getattr(post_process_class, 'character'))
        config['Architecture']["Head"]['out_channels'] = char_num
    model = build_model(config['Architecture'])
    init_model(config, model, logger)
    model.eval()

    save_path = '{}/inference'.format(config['Global']['save_inference_dir'])

    if config['Architecture']['algorithm'] == "SRN":
        max_text_length = config['Architecture']['Head']['max_text_length']
        other_shape = [
            paddle.static.InputSpec(shape=[None, 1, 64, 256], dtype='float32'),
            [
                paddle.static.InputSpec(shape=[None, 256, 1], dtype="int64"),
                paddle.static.InputSpec(shape=[None, max_text_length, 1],
                                        dtype="int64"),
                paddle.static.InputSpec(
                    shape=[None, 8, max_text_length, max_text_length],
                    dtype="int64"),
                paddle.static.InputSpec(
                    shape=[None, 8, max_text_length, max_text_length],
                    dtype="int64")
            ]
        ]
        model = to_static(model, input_spec=other_shape)
    else:
        infer_shape = [3, -1, -1]
        if config['Architecture']['model_type'] == "rec":
            infer_shape = [3, 32, -1]  # for rec model, H must be 32
            if 'Transform' in config['Architecture'] and config[
                    'Architecture']['Transform'] is not None and config[
                        'Architecture']['Transform']['name'] == 'TPS':
                logger.info(
                    'When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training'
                )
                infer_shape[-1] = 100
        model = to_static(model,
                          input_spec=[
                              paddle.static.InputSpec(shape=[None] +
                                                      infer_shape,
                                                      dtype='float32')
                          ])

    paddle.jit.save(model, save_path)
    logger.info('inference model is saved to {}'.format(save_path))
Пример #4
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    #     check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, outputs = rec_model(mode="test")
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    init_model(config, eval_prog, exe)

    blobs = reader_main(config, 'test')()
    infer_img = config['Global']['infer_img']
    infer_list = get_image_file_list(infer_img)
    max_img_num = len(infer_list)
    if len(infer_list) == 0:
        logger.info("Can not find img in infer_img dir.")
    for i in range(max_img_num):
        logger.info("infer_img:%s" % infer_list[i])
        img = next(blobs)
        predict = exe.run(program=eval_prog,
                          feed={"image": img},
                          fetch_list=fetch_varname_list,
                          return_numpy=False)
        scores = np.array(predict[0])
        label = np.array(predict[1])
        if len(label.shape) != 1:
            label, scores = scores, label
        logger.info('\t scores: {}'.format(scores))
        logger.info('\t label: {}'.format(label))
    # save for inference model
    target_var = []
    for key, values in outputs.items():
        target_var.append(values)

    fluid.io.save_inference_model("./output",
                                  feeded_var_names=['image'],
                                  target_vars=target_var,
                                  executor=exe,
                                  main_program=eval_prog,
                                  model_filename="model",
                                  params_filename="params")
Пример #5
0
def test_reader():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)
    train_reader = reader_main(config=config, mode="train")
    import time
    starttime = time.time()
    count = 0
    try:
        for data in train_reader():
            count += 1
            if count % 1 == 0:
                batch_time = time.time() - starttime
                starttime = time.time()
                logger.info("reader:", count, len(data), batch_time)
    except Exception as e:
        logger.info(e)
    logger.info("finish reader: {}, Success!".format(count))
Пример #6
0
def main():
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config["PostProcess"],
                                            config["Global"])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, "character"):
        char_num = len(getattr(post_process_class, "character"))
        if config["Architecture"]["algorithm"] in [
                "Distillation",
        ]:  # distillation model
            for key in config["Architecture"]["Models"]:
                config["Architecture"]["Models"][key]["Head"][
                    "out_channels"] = char_num
                # just one final tensor needs to to exported for inference
                config["Architecture"]["Models"][key][
                    "return_all_feats"] = False
        else:  # base rec model
            config["Architecture"]["Head"]["out_channels"] = char_num
    model = build_model(config["Architecture"])
    _ = load_dygraph_params(config, model, logger, None)
    model.eval()

    save_path = config["Global"]["save_inference_dir"]

    arch_config = config["Architecture"]

    if arch_config["algorithm"] in [
            "Distillation",
    ]:  # distillation model
        archs = list(arch_config["Models"].values())
        for idx, name in enumerate(model.model_name_list):
            sub_model_save_path = os.path.join(save_path, name, "inference")
            export_single_model(model.model_list[idx], archs[idx],
                                sub_model_save_path, logger)
    else:
        save_path = os.path.join(save_path, "inference")
        export_single_model(model, arch_config, save_path, logger)
Пример #7
0
def main():
    ############################################################################################################
    # 1. quantization configs
    ############################################################################################################
    quant_config = {
        # weight preprocess type, default is None and no preprocessing is performed.
        'weight_preprocess_type': None,
        # activation preprocess type, default is None and no preprocessing is performed.
        'activation_preprocess_type': None,
        # weight quantize type, default is 'channel_wise_abs_max'
        'weight_quantize_type': 'channel_wise_abs_max',
        # activation quantize type, default is 'moving_average_abs_max'
        'activation_quantize_type': 'moving_average_abs_max',
        # weight quantize bit num, default is 8
        'weight_bits': 8,
        # activation quantize bit num, default is 8
        'activation_bits': 8,
        # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
        'dtype': 'int8',
        # window size for 'range_abs_max' quantization. default is 10000
        'window_size': 10000,
        # The decay coefficient of moving average, default is 0.9
        'moving_rate': 0.9,
        # for dygraph quantization, layers of type in quantizable_layer_type will be quantized
        'quantizable_layer_type': ['Conv2D', 'Linear'],
    }
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config['PostProcess'],
                                            config['Global'])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, 'character'):
        char_num = len(getattr(post_process_class, 'character'))
        if config['Architecture']["algorithm"] in [
                "Distillation",
        ]:  # distillation model
            for key in config['Architecture']["Models"]:
                config['Architecture']["Models"][key]["Head"][
                    'out_channels'] = char_num
        else:  # base rec model
            config['Architecture']["Head"]['out_channels'] = char_num

    model = build_model(config['Architecture'])

    # get QAT model
    quanter = QAT(config=quant_config)
    quanter.quantize(model)

    init_model(config, model)
    model.eval()

    # build metric
    eval_class = build_metric(config['Metric'])

    # build dataloader
    valid_dataloader = build_dataloader(config, 'Eval', device, logger)

    use_srn = config['Architecture']['algorithm'] == "SRN"
    model_type = config['Architecture']['model_type']
    # start eval
    metric = program.eval(model, valid_dataloader, post_process_class,
                          eval_class, model_type, use_srn)

    logger.info('metric eval ***************')
    for k, v in metric.items():
        logger.info('{}:{}'.format(k, v))

    infer_shape = [
        3, 32, 100
    ] if config['Architecture']['model_type'] != "det" else [3, 640, 640]

    save_path = config["Global"]["save_inference_dir"]

    arch_config = config["Architecture"]
    if arch_config["algorithm"] in [
            "Distillation",
    ]:  # distillation model
        for idx, name in enumerate(model.model_name_list):
            sub_model_save_path = os.path.join(save_path, name, "inference")
            export_single_model(quanter, model.model_list[idx], infer_shape,
                                sub_model_save_path, logger)
    else:
        save_path = os.path.join(save_path, "inference")
        export_single_model(quanter, model, infer_shape, save_path, logger)
Пример #8
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)
    char_ops = CharacterOps(config['Global'])
    loss_type = config['Global']['loss_type']
    config['Global']['char_ops'] = char_ops

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    #     check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, outputs = rec_model(mode="test")
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    init_model(config, eval_prog, exe)

    blobs = reader_main(config, 'test')()
    infer_img = config['Global']['infer_img']
    infer_list = get_image_file_list(infer_img)
    max_img_num = len(infer_list)
    if len(infer_list) == 0:
        logger.info("Can not find img in infer_img dir.")
    for i in range(max_img_num):
        logger.info("infer_img:%s" % infer_list[i])
        img = next(blobs)
        if loss_type != "srn":
            predict = exe.run(program=eval_prog,
                              feed={"image": img},
                              fetch_list=fetch_varname_list,
                              return_numpy=False)
        else:
            encoder_word_pos_list = []
            gsrm_word_pos_list = []
            gsrm_slf_attn_bias1_list = []
            gsrm_slf_attn_bias2_list = []
            encoder_word_pos_list.append(img[1])
            gsrm_word_pos_list.append(img[2])
            gsrm_slf_attn_bias1_list.append(img[3])
            gsrm_slf_attn_bias2_list.append(img[4])

            encoder_word_pos_list = np.concatenate(encoder_word_pos_list,
                                                   axis=0).astype(np.int64)
            gsrm_word_pos_list = np.concatenate(gsrm_word_pos_list,
                                                axis=0).astype(np.int64)
            gsrm_slf_attn_bias1_list = np.concatenate(gsrm_slf_attn_bias1_list,
                                                      axis=0).astype(
                                                          np.float32)
            gsrm_slf_attn_bias2_list = np.concatenate(gsrm_slf_attn_bias2_list,
                                                      axis=0).astype(
                                                          np.float32)

            predict = exe.run(program=eval_prog, \
                       feed={'image': img[0], 'encoder_word_pos': encoder_word_pos_list,
                             'gsrm_word_pos': gsrm_word_pos_list, 'gsrm_slf_attn_bias1': gsrm_slf_attn_bias1_list,
                             'gsrm_slf_attn_bias2': gsrm_slf_attn_bias2_list}, \
                       fetch_list=fetch_varname_list, \
                       return_numpy=False)
        if loss_type == "ctc":
            preds = np.array(predict[0])
            preds = preds.reshape(-1)
            preds_lod = predict[0].lod()[0]
            preds_text = char_ops.decode(preds)
            probs = np.array(predict[1])
            ind = np.argmax(probs, axis=1)
            blank = probs.shape[1]
            valid_ind = np.where(ind != (blank - 1))[0]
            if len(valid_ind) == 0:
                continue
            score = np.mean(probs[valid_ind, ind[valid_ind]])
        elif loss_type == "attention":
            preds = np.array(predict[0])
            probs = np.array(predict[1])
            end_pos = np.where(preds[0, :] == 1)[0]
            if len(end_pos) <= 1:
                preds = preds[0, 1:]
                score = np.mean(probs[0, 1:])
            else:
                preds = preds[0, 1:end_pos[1]]
                score = np.mean(probs[0, 1:end_pos[1]])
            preds = preds.reshape(-1)
            preds_text = char_ops.decode(preds)
        elif loss_type == "srn":
            char_num = char_ops.get_char_num()
            preds = np.array(predict[0])
            preds = preds.reshape(-1)
            probs = np.array(predict[1])
            ind = np.argmax(probs, axis=1)
            valid_ind = np.where(preds != int(char_num - 1))[0]
            if len(valid_ind) == 0:
                continue
            score = np.mean(probs[valid_ind, ind[valid_ind]])
            preds = preds[:valid_ind[-1] + 1]
            preds_text = char_ops.decode(preds)
        logger.info("\t index: {}".format(preds))
        logger.info("\t word : {}".format(preds_text))
        logger.info("\t score: {}".format(score))

    # save for inference model
    target_var = []
    for key, values in outputs.items():
        target_var.append(values)

    fluid.io.save_inference_model("./output/",
                                  feeded_var_names=['image'],
                                  target_vars=target_var,
                                  executor=exe,
                                  main_program=eval_prog,
                                  model_filename="model",
                                  params_filename="params")
Пример #9
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    program.check_gpu(use_gpu)

    alg = config['Global']['algorithm']
    assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
    if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
        config['Global']['char_ops'] = CharacterOps(config['Global'])

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    startup_program = fluid.Program()
    train_program = fluid.Program()
    train_build_outputs = program.build(config,
                                        train_program,
                                        startup_program,
                                        mode='train')
    train_loader = train_build_outputs[0]
    train_fetch_name_list = train_build_outputs[1]
    train_fetch_varname_list = train_build_outputs[2]
    train_opt_loss_name = train_build_outputs[3]

    eval_program = fluid.Program()
    eval_build_outputs = program.build(config,
                                       eval_program,
                                       startup_program,
                                       mode='eval')
    eval_fetch_name_list = eval_build_outputs[1]
    eval_fetch_varname_list = eval_build_outputs[2]
    eval_program = eval_program.clone(for_test=True)

    train_reader = reader_main(config=config, mode="train")
    train_loader.set_sample_list_generator(train_reader, places=place)

    eval_reader = reader_main(config=config, mode="eval")

    exe = fluid.Executor(place)
    exe.run(startup_program)

    # compile program for multi-devices
    train_compile_program = program.create_multi_devices_program(
        train_program, train_opt_loss_name)

    # dump mode structure
    if config['Global']['debug']:
        if 'attention' in config['Global']['loss_type']:
            logger.warning('Does not suport dump attention...')
        else:
            summary(train_program)

    init_model(config, train_program, exe)

    train_info_dict = {'compile_program':train_compile_program,\
        'train_program':train_program,\
        'reader':train_loader,\
        'fetch_name_list':train_fetch_name_list,\
        'fetch_varname_list':train_fetch_varname_list}

    eval_info_dict = {'program':eval_program,\
        'reader':eval_reader,\
        'fetch_name_list':eval_fetch_name_list,\
        'fetch_varname_list':eval_fetch_varname_list}

    if alg in ['EAST', 'DB']:
        program.train_eval_det_run(config, exe, train_info_dict,
                                   eval_info_dict)
    else:
        program.train_eval_rec_run(config, exe, train_info_dict,
                                   eval_info_dict)
Пример #10
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    program.check_gpu(use_gpu)

    alg = config['Global']['algorithm']
    assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
    if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
        config['Global']['char_ops'] = CharacterOps(config['Global'])

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    startup_program = fluid.Program()
    train_program = fluid.Program()
    train_build_outputs = program.build(config,
                                        train_program,
                                        startup_program,
                                        mode='train')
    train_loader = train_build_outputs[0]
    train_fetch_name_list = train_build_outputs[1]
    train_fetch_varname_list = train_build_outputs[2]
    train_opt_loss_name = train_build_outputs[3]

    eval_program = fluid.Program()
    eval_build_outputs = program.build(config,
                                       eval_program,
                                       startup_program,
                                       mode='eval')
    eval_fetch_name_list = eval_build_outputs[1]
    eval_fetch_varname_list = eval_build_outputs[2]
    eval_program = eval_program.clone(for_test=True)

    train_reader = reader_main(config=config, mode="train")
    train_loader.set_sample_list_generator(train_reader, places=place)

    eval_reader = reader_main(config=config, mode="eval")

    exe = fluid.Executor(place)
    exe.run(startup_program)

    # compile program for multi-devices
    init_model(config, train_program, exe)

    sen = load_sensitivities("sensitivities_0.data")
    for i in skip_list:
        if i in sen.keys():
            sen.pop(i)
    back_bone_list = ['conv' + str(x) for x in range(1, 5)]
    for i in back_bone_list:
        for key in list(sen.keys()):
            if i + '_' in key:
                sen.pop(key)
    ratios = get_ratios_by_loss(sen, 0.03)
    logger.info("FLOPs before pruning: {}".format(flops(eval_program)))
    pruner = Pruner(criterion='geometry_median')
    print("ratios: {}".format(ratios))
    pruned_val_program, _, _ = pruner.prune(eval_program,
                                            fluid.global_scope(),
                                            params=ratios.keys(),
                                            ratios=ratios.values(),
                                            place=place,
                                            only_graph=True)

    pruned_program, _, _ = pruner.prune(train_program,
                                        fluid.global_scope(),
                                        params=ratios.keys(),
                                        ratios=ratios.values(),
                                        place=place)
    logger.info("FLOPs after pruning: {}".format(flops(pruned_val_program)))
    train_compile_program = program.create_multi_devices_program(
        pruned_program, train_opt_loss_name)


    train_info_dict = {'compile_program':train_compile_program,\
        'train_program':pruned_program,\
        'reader':train_loader,\
        'fetch_name_list':train_fetch_name_list,\
        'fetch_varname_list':train_fetch_varname_list}

    eval_info_dict = {'program':pruned_val_program,\
        'reader':eval_reader,\
        'fetch_name_list':eval_fetch_name_list,\
        'fetch_varname_list':eval_fetch_varname_list}

    if alg in ['EAST', 'DB']:
        program.train_eval_det_run(config,
                                   exe,
                                   train_info_dict,
                                   eval_info_dict,
                                   is_slim="prune")
    else:
        program.train_eval_rec_run(config, exe, train_info_dict,
                                   eval_info_dict)
Пример #11
0
def main():
    ############################################################################################################
    # 1. quantization configs
    ############################################################################################################
    quant_config = {
        # weight preprocess type, default is None and no preprocessing is performed.
        'weight_preprocess_type': None,
        # activation preprocess type, default is None and no preprocessing is performed.
        'activation_preprocess_type': None,
        # weight quantize type, default is 'channel_wise_abs_max'
        'weight_quantize_type': 'channel_wise_abs_max',
        # activation quantize type, default is 'moving_average_abs_max'
        'activation_quantize_type': 'moving_average_abs_max',
        # weight quantize bit num, default is 8
        'weight_bits': 8,
        # activation quantize bit num, default is 8
        'activation_bits': 8,
        # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
        'dtype': 'int8',
        # window size for 'range_abs_max' quantization. default is 10000
        'window_size': 10000,
        # The decay coefficient of moving average, default is 0.9
        'moving_rate': 0.9,
        # for dygraph quantization, layers of type in quantizable_layer_type will be quantized
        'quantizable_layer_type': ['Conv2D', 'Linear'],
    }
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config['PostProcess'],
                                            config['Global'])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, 'character'):
        char_num = len(getattr(post_process_class, 'character'))
        config['Architecture']["Head"]['out_channels'] = char_num
    model = build_model(config['Architecture'])

    # get QAT model
    quanter = QAT(config=quant_config)
    quanter.quantize(model)

    init_model(config, model, logger)
    model.eval()

    # build metric
    eval_class = build_metric(config['Metric'])

    # build dataloader
    valid_dataloader = build_dataloader(config, 'Eval', device, logger)

    # start eval
    metirc = program.eval(model, valid_dataloader, post_process_class,
                          eval_class)
    logger.info('metric eval ***************')
    for k, v in metirc.items():
        logger.info('{}:{}'.format(k, v))

    save_path = '{}/inference'.format(config['Global']['save_inference_dir'])
    infer_shape = [
        3, 32, 100
    ] if config['Architecture']['model_type'] != "det" else [3, 640, 640]

    quanter.save_quantized_model(model,
                                 save_path,
                                 input_spec=[
                                     paddle.static.InputSpec(shape=[None] +
                                                             infer_shape,
                                                             dtype='float32')
                                 ])
    logger.info('inference QAT model is saved to {}'.format(save_path))