Пример #1
0
    def __init__(self, params):
        """
        Detection module for OCR text detection.
        args:
            params (dict): the super parameters for detection module.
        """
        global_params = params['Global']
        self.algorithm = global_params['algorithm']

        backbone_params = deepcopy(params["Backbone"])
        backbone_params.update(global_params)
        self.backbone = create_module(backbone_params['function'])\
                (params=backbone_params)

        head_params = deepcopy(params["Head"])
        head_params.update(global_params)
        self.head = create_module(head_params['function'])\
                (params=head_params)

        loss_params = deepcopy(params["Loss"])
        loss_params.update(global_params)
        self.loss = create_module(loss_params['function'])\
                (params=loss_params)

        self.image_shape = global_params['image_shape']
Пример #2
0
    def __init__(self, params):
        super(RecModel, self).__init__()
        global_params = params['Global']
        char_num = global_params['char_ops'].get_char_num()
        global_params['char_num'] = char_num
        if "TPS" in params:
            tps_params = deepcopy(params["TPS"])
            tps_params.update(global_params)
            self.tps = create_module(tps_params['function'])\
                (params=tps_params)
        else:
            self.tps = None

        backbone_params = deepcopy(params["Backbone"])
        backbone_params.update(global_params)
        self.backbone = create_module(backbone_params['function'])\
                (params=backbone_params)

        head_params = deepcopy(params["Head"])
        head_params.update(global_params)
        self.head = create_module(head_params['function'])\
                (params=head_params)

        loss_params = deepcopy(params["Loss"])
        loss_params.update(global_params)
        self.loss = create_module(loss_params['function'])\
                (params=loss_params)

        self.loss_type = global_params['loss_type']
        self.image_shape = global_params['image_shape']
        self.max_text_length = global_params['max_text_length']
Пример #3
0
def build(config, main_prog, startup_prog, mode):
    """
    Build a program using a model and an optimizer
        1. create feeds
        2. create a dataloader
        3. create a model
        4. create fetchs
        5. create an optimizer
    Args:
        config(dict): config
        main_prog(): main program
        startup_prog(): startup program
        # mode(bool): train or valid
        mode(str): train or valid
    Returns:
        dataloader(): a bridge between the model and the data
        fetchs(dict): dict of model outputs(included loss and measures)
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            func_infor = config['Architecture']['function']
            model = create_module(func_infor)(params=config)
            dataloader, outputs = model(mode=mode)
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
            # print('fetch_varname_list: ', fetch_varname_list)       # ['tmp_43', 'tmp_40', 'tmp_41', 'tmp_37']
            opt_loss_name = None
            model_average = None
            img_loss_name = None
            word_loss_name = None
            if mode == "train":
                opt_loss = outputs['total_loss']
                ####### srn loss
                # img_loss = outputs['img_loss']
                # word_loss = outputs['word_loss']
                # img_loss_name = img_loss.name
                # word_loss_name = word_loss.name
                opt_params = config['Optimizer']
                optimizer = create_module(opt_params['function'])(opt_params)
                optimizer.minimize(opt_loss)
                opt_loss_name = opt_loss.name
                global_lr = optimizer._global_learning_rate()
                fetch_name_list.insert(0, "lr")
                fetch_varname_list.insert(0, global_lr.name)
                if "loss_type" in config["Global"]:
                    if config['Global']["loss_type"] == 'srn':
                        model_average = fluid.optimizer.ModelAverage(
                            config['Global']['average_window'],
                            min_average_window=config['Global']
                            ['min_average_window'],
                            max_average_window=config['Global']
                            ['max_average_window'])

    return (dataloader, fetch_name_list, fetch_varname_list, opt_loss_name,
            model_average)
Пример #4
0
def build_export(config, main_prog, startup_prog):
    """
    Build a program using a model and an optimizer
        1. create feeds
        2. create a dataloader
        3. create a model
        4. create fetchs
        5. create an optimizer

    Args:
        config(dict): config
        main_prog(): main program
        startup_prog(): startup program
        is_train(bool): train or valid

    Returns:
        dataloader(): a bridge between the model and the data
        fetchs(dict): dict of model outputs(included loss and measures)
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            func_infor = config['Architecture']['function']
            model = create_module(func_infor)(params=config)
            image, outputs = model(mode='export')
            fetches_var = [outputs[name] for name in outputs]
            fetches_var_name = [name for name in outputs]
    feeded_var_names = [image.name]
    target_vars = fetches_var
    return feeded_var_names, target_vars, fetches_var_name
Пример #5
0
def build_export(config, main_prog, startup_prog):
    """
    Build input and output for exporting a checkpoints model to an inference model
    Args:
        config(dict): config
        main_prog: main program
        startup_prog: startup program
    Returns:
        feeded_var_names(list[str]): var names of input for exported inference model
        target_vars(list[Variable]): output vars for exported inference model
        fetches_var_name: dict of checkpoints model outputs(included loss and measures)
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            func_infor = config['Architecture']['function']
            model = create_module(func_infor)(params=config)
            algorithm = config['Global']['algorithm']
            if algorithm == "SRN":
                image, others, outputs = model(mode='export')
            else:
                image, outputs = model(mode='export')
            fetches_var_name = sorted([name for name in outputs.keys()])
            fetches_var = [outputs[name] for name in fetches_var_name]
    if algorithm == "SRN":
        others_var_names = sorted([name for name in others.keys()])
        feeded_var_names = [image.name] + others_var_names
    else:
        feeded_var_names = [image.name]

    target_vars = fetches_var
    return feeded_var_names, target_vars, fetches_var_name
 def __init__(self, params):
     self.num_workers = params['num_workers']
     self.label_file_path = params['label_file_path']
     self.batch_size = params['train_batch_size_per_card']
     assert 'process_function' in params,\
         "absence process_function in Reader"
     self.process = create_module(params['process_function'])(params)
def reader_main(config=None, mode=None):
    """Create a reader for trainning

    Args:
        settings: arguments

    Returns:
        train reader
    """
    assert mode in ["train", "eval", "test"],\
        "Nonsupport mode:{}".format(mode)
    global_params = config['Global']
    if mode == "train":
        params = deepcopy(config['TrainReader'])
    elif mode == "eval":
        params = deepcopy(config['EvalReader'])
    else:
        params = deepcopy(config['TestReader'])
    params['mode'] = mode
    params.update(global_params)
    reader_function = params['reader_function']
    function = create_module(reader_function)(params)
    if mode == "train":
        if sys.platform == "win32":
            return function(0)
        readers = []
        num_workers = params['num_workers']
        for process_id in range(num_workers):
            readers.append(function(process_id))
        return paddle.reader.multiprocess_reader(readers, False)
    else:
        return function(mode)
Пример #8
0
def main():
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    char_ops = CharacterOps(config['Global'])
    config['Global']['char_num'] = char_ops.get_char_num()

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            eval_outputs = rec_model(mode="test")
            eval_fetch_list = [v.name for v in eval_outputs]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    pretrain_weights = config['Global']['pretrain_weights']
    if pretrain_weights is not None:
        fluid.load(eval_prog, pretrain_weights)

    test_img_path = config['test_img_path']
    image_shape = config['Global']['image_shape']
    blobs = test_reader(image_shape, test_img_path)
    predict = exe.run(program=eval_prog,
                      feed={"image": blobs},
                      fetch_list=eval_fetch_list,
                      return_numpy=False)
    preds = np.array(predict[0])
    if preds.shape[1] == 1:
        preds = preds.reshape(-1)
        preds_lod = predict[0].lod()[0]
        preds_text = char_ops.decode(preds)
    else:
        end_pos = np.where(preds[0, :] == 1)[0]
        if len(end_pos) <= 1:
            preds_text = preds[0, 1:]
        else:
            preds_text = preds[0, 1:end_pos[1]]
        preds_text = preds_text.reshape(-1)
        preds_text = char_ops.decode(preds_text)

    fluid.io.save_inference_model("./output/",
                                  feeded_var_names=['image'],
                                  target_vars=eval_outputs,
                                  executor=exe,
                                  main_program=eval_prog,
                                  model_filename="model",
                                  params_filename="params")
    print(preds)
    print(preds_text)
Пример #9
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    #     check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, outputs = rec_model(mode="test")
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    init_model(config, eval_prog, exe)

    blobs = reader_main(config, 'test')()
    infer_img = config['Global']['infer_img']
    infer_list = get_image_file_list(infer_img)
    max_img_num = len(infer_list)
    if len(infer_list) == 0:
        logger.info("Can not find img in infer_img dir.")
    for i in range(max_img_num):
        logger.info("infer_img:%s" % infer_list[i])
        img = next(blobs)
        predict = exe.run(program=eval_prog,
                          feed={"image": img},
                          fetch_list=fetch_varname_list,
                          return_numpy=False)
        scores = np.array(predict[0])
        label = np.array(predict[1])
        if len(label.shape) != 1:
            label, scores = scores, label
        logger.info('\t scores: {}'.format(scores))
        logger.info('\t label: {}'.format(label))
    # save for inference model
    target_var = []
    for key, values in outputs.items():
        target_var.append(values)

    fluid.io.save_inference_model("./output",
                                  feeded_var_names=['image'],
                                  target_vars=target_var,
                                  executor=exe,
                                  main_program=eval_prog,
                                  model_filename="model",
                                  params_filename="params")
Пример #10
0
def paddle(img_path, config, exe, eval_prog, eval_fetch_list):

    config['Global']['infer_img'] = img_path
    #logger.info('pass1')
    test_reader = reader_main(config=config, mode='test')
    #logger.info('pass2')
    tackling_num = 0
    for data in test_reader():
        img_num = len(data)
        #tackling_num = tackling_num + img_num
        #logger.info("Number of images:%d", tackling_num)
        img_list = []
        ratio_list = []
        img_name_list = []
        for ino in range(img_num):
            img_list.append(data[ino][0])
            ratio_list.append(data[ino][1])
            img_name_list.append(data[ino][2])
        #logger.info('pass3')
        img_list = np.concatenate(img_list, axis=0)
        logger.info("Getting text boxes..")
        outs = exe.run(eval_prog,\
            feed={'image': img_list},\
            fetch_list=eval_fetch_list)
        logger.info('Done get text box!')

        global_params = config['Global']
        postprocess_params = deepcopy(config["PostProcess"])
        postprocess_params.update(global_params)
        postprocess = create_module(postprocess_params['function'])\
            (params=postprocess_params)
        if config['Global']['algorithm'] == 'EAST':
            dic = {'f_score': outs[0], 'f_geo': outs[1]}
        elif config['Global']['algorithm'] == 'DB':
            dic = {'maps': outs[0]}
        elif config['Global']['algorithm'] == 'SAST':
            dic = {
                'f_score': outs[0],
                'f_border': outs[1],
                'f_tvo': outs[2],
                'f_tco': outs[3]
            }
        else:
            raise Exception("only support algorithm: ['EAST', 'DB', 'SAST']")
        dt_boxes_list = postprocess(dic, ratio_list)
        for ino in range(img_num):
            dt_boxes = dt_boxes_list[ino]
            img_name = img_name_list[ino]

            src_img = cv2.imread(img_name)

            copy_img = src_img.copy()

            draw_det_res(dt_boxes, config, src_img, img_name)

    return dt_boxes, copy_img
Пример #11
0
 def __init__(self, params):
     self.num_workers = params['num_workers']
     self.label_file_path = params['label_file_path']
     print(self.label_file_path)
     self.use_mul_data = False
     if isinstance(self.label_file_path, list):
         self.use_mul_data = True
         self.data_ratio_list = params['data_ratio_list']
     self.batch_size = params['train_batch_size_per_card']
     assert 'process_function' in params,\
         "absence process_function in Reader"
     self.process = create_module(params['process_function'])(params)
Пример #12
0
    def __init__(self, params):
        super(ClsModel, self).__init__()
        global_params = params['Global']
        self.infer_img = global_params['infer_img']

        backbone_params = deepcopy(params["Backbone"])
        backbone_params.update(global_params)
        self.backbone = create_module(backbone_params['function']) \
            (params=backbone_params)

        head_params = deepcopy(params["Head"])
        head_params.update(global_params)
        self.head = create_module(head_params['function']) \
            (params=head_params)

        loss_params = deepcopy(params["Loss"])
        loss_params.update(global_params)
        self.loss = create_module(loss_params['function']) \
            (params=loss_params)

        self.image_shape = global_params['image_shape']
Пример #13
0
def build_export(config, main_prog, startup_prog):
    """
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            func_infor = config['Architecture']['function']
            model = create_module(func_infor)(params=config)
            image, outputs = model(mode='export')
            fetches_var_name = sorted([name for name in outputs.keys()])
            fetches_var = [outputs[name] for name in fetches_var_name]
    feeded_var_names = [image.name]
    target_vars = fetches_var
    return feeded_var_names, target_vars, fetches_var_name
Пример #14
0
def build(config, main_prog, startup_prog, mode):
    """
    Build a program using a model and an optimizer
        1. create feeds
        2. create a dataloader
        3. create a model
        4. create fetchs
        5. create an optimizer

    Args:
        config(dict): config
        main_prog(): main program
        startup_prog(): startup program
        is_train(bool): train or valid

    Returns:
        dataloader(): a bridge between the model and the data
        fetchs(dict): dict of model outputs(included loss and measures)
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            func_infor = config['Architecture']['function']
            model = create_module(func_infor)(params=config)
            dataloader, outputs = model(mode=mode)
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
            opt_loss_name = None
            if mode == "train":
                opt_loss = outputs['total_loss']
                opt_params = config['Optimizer']
                optimizer = create_module(opt_params['function'])(opt_params)
                optimizer.minimize(opt_loss)
                opt_loss_name = opt_loss.name
                global_lr = optimizer._global_learning_rate()
                global_lr.persistable = True
                fetch_name_list.insert(0, "lr")
                fetch_varname_list.insert(0, global_lr.name)
    return (dataloader, fetch_name_list, fetch_varname_list, opt_loss_name)
Пример #15
0
def cal_det_res(exe, config, eval_info_dict):
    global_params = config['Global']
    save_res_path = global_params['save_res_path']
    postprocess_params = deepcopy(config["PostProcess"])
    postprocess_params.update(global_params)
    postprocess = create_module(postprocess_params['function']) \
        (params=postprocess_params)
    if not os.path.exists(os.path.dirname(save_res_path)):
        os.makedirs(os.path.dirname(save_res_path))
    with open(save_res_path, "wb") as fout:
        tackling_num = 0
        for data in eval_info_dict['reader']():
            img_num = len(data)
            tackling_num = tackling_num + img_num
            logger.info("test tackling num:%d", tackling_num)
            img_list = []
            ratio_list = []
            img_name_list = []
            for ino in range(img_num):
                img_list.append(data[ino][0])
                ratio_list.append(data[ino][1])
                img_name_list.append(data[ino][2])
            try:
                img_list = np.concatenate(img_list, axis=0)
            except:
                err = "concatenate error usually caused by different input image shapes in evaluation or testing.\n \
                Please set \"test_batch_size_per_card\" in main yml as 1\n \
                or add \"test_image_shape: [h, w]\" in reader yml for EvalReader."

                raise Exception(err)
            outs = exe.run(eval_info_dict['program'], \
                           feed={'image': img_list}, \
                           fetch_list=eval_info_dict['fetch_varname_list'])
            outs_dict = {}
            for tno in range(len(outs)):
                fetch_name = eval_info_dict['fetch_name_list'][tno]
                fetch_value = np.array(outs[tno])
                outs_dict[fetch_name] = fetch_value
            dt_boxes_list = postprocess(outs_dict, ratio_list)
            for ino in range(img_num):
                dt_boxes = dt_boxes_list[ino]
                img_name = img_name_list[ino]
                dt_boxes_json = []
                for box in dt_boxes:
                    tmp_json = {"transcription": ""}
                    tmp_json['points'] = box.tolist()
                    dt_boxes_json.append(tmp_json)
                otstr = img_name + "\t" + json.dumps(dt_boxes_json) + "\n"
                fout.write(otstr.encode())
    return
Пример #16
0
def load_model():

    config = program.load_config('./configs/det/det_r18_vd_db_v1.1.yml')

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    program.check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    det_model = create_module(
        config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, eval_outputs = det_model(mode="test")
            fetch_name_list = list(eval_outputs.keys())
            eval_fetch_list = [eval_outputs[v].name for v in fetch_name_list]

    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    # load checkpoints
    checkpoints = config['Global'].get('checkpoints')
    if checkpoints:
        path = checkpoints
        fluid.load(eval_prog, path, exe)
        logger.info("Finish initing model from {}".format(path))
    else:
        raise Exception("{} not exists!".format(checkpoints))

    config_ocr = Cfg.load_config_from_name('vgg_seq2seq')
    config_ocr['weights'] = './my_weights/transformer.pth'
    config_ocr['cnn']['pretrained'] = False
    config_ocr['device'] = 'cpu'
    config_ocr['predictor']['beamsearch'] = False

    detector = Predictor(config_ocr)

    return detector, exe, config, eval_prog, eval_fetch_list
Пример #17
0
    def __call__(self, mode):
        process_function = create_module(self.params['process_function'])(
            self.params)
        batch_size = self.params['test_batch_size_per_card']

        img_list = []
        if mode != "test":
            img_set_dir = self.params['img_set_dir']
            img_name_list_path = self.params['label_file_path']
            with open(img_name_list_path, "rb") as fin:
                lines = fin.readlines()
                for line in lines:
                    img_name = line.decode().strip("\n").split("\t")[0]
                    img_path = os.path.join(img_set_dir, img_name)
                    img_list.append(img_path)
        else:
            img_path = self.params['infer_img']
            #img_list = get_image_file_list(img_path)
            img_list = [img_path]

        def batch_iter_reader():
            batch_outs = []
            for img_path in img_list:
                #print(';;;;;;;;;;;;', img_path)
                img = cv2.imread(img_path)
                # cv2.imshow('',img)
                # cv2.waitKey(0)
                if img is None:
                    logger.info("{} does not exist!".format(img_path))
                    continue
                elif len(list(img.shape)) == 2 or img.shape[2] == 1:
                    img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
                outs = process_function(img)
                outs.append(img_path)
                batch_outs.append(outs)
                if len(batch_outs) == batch_size:
                    yield batch_outs
                    batch_outs = []
            if len(batch_outs) != 0:
                yield batch_outs

        return batch_iter_reader
Пример #18
0
def main():
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    print(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    det_model = create_module(config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            eval_loader, eval_outputs = det_model(mode="test")
            eval_fetch_list = [v.name for v in eval_outputs]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    pretrain_weights = config['Global']['pretrain_weights']
    if pretrain_weights is not None:
        load_pretrain(exe, eval_prog, pretrain_weights)
#         fluid.load(eval_prog, pretrain_weights)
#         def if_exist(var):
#             return os.path.exists(os.path.join(pretrain_weights, var.name))
#         fluid.io.load_vars(exe, pretrain_weights, predicate=if_exist, main_program=eval_prog)
    else:
        logger.info("Not find pretrain_weights:%s" % pretrain_weights)
        sys.exit(0)

#     fluid.io.save_inference_model("./output/", feeded_var_names=['image'],
#         target_vars=eval_outputs, executor=exe, main_program=eval_prog,
#         model_filename="model", params_filename="params")
#     sys.exit(-1)

    metrics = eval_det_run(exe, eval_prog, eval_fetch_list, config, "test")
    logger.info("metrics:{}".format(metrics))
    logger.info("success!")
    def __call__(self, mode):
        process_function = create_module(self.params['process_function'])(
            self.params)
        batch_size = self.params['test_batch_size_per_card']

        flag_test_single_img = False
        if mode == "test":
            single_img_path = self.params['single_img_path']
            if single_img_path is not None:
                flag_test_single_img = True

        img_list = []
        if flag_test_single_img:
            img_list.append([single_img_path, single_img_path])
        else:
            img_set_dir = self.params['img_set_dir']
            img_name_list_path = self.params['label_file_path']
            with open(img_name_list_path, "rb") as fin:
                lines = fin.readlines()
                for line in lines:
                    img_name = line.decode().strip("\n").split("\t")[0]
                    img_path = img_set_dir + "/" + img_name
                    img_list.append([img_path, img_name])

        def batch_iter_reader():
            batch_outs = []
            for img_path, img_name in img_list:
                img = cv2.imread(img_path)
                if img is None:
                    logger.info("load image error:" + img_path)
                    continue
                outs = process_function(img)
                outs.append(img_name)
                batch_outs.append(outs)
                if len(batch_outs) == batch_size:
                    yield batch_outs
                    batch_outs = []
            if len(batch_outs) != 0:
                yield batch_outs

        return batch_iter_reader
def cal_det_res(exe, config, eval_info_dict):
    global_params = config['Global']
    save_res_path = global_params['save_res_path']
    postprocess_params = deepcopy(config["PostProcess"])
    postprocess_params.update(global_params)
    postprocess = create_module(postprocess_params['function']) \
        (params=postprocess_params)
    with open(save_res_path, "wb") as fout:
        tackling_num = 0
        for data in eval_info_dict['reader']():
            img_num = len(data)
            tackling_num = tackling_num + img_num
            logger.info("test tackling num:%d", tackling_num)
            img_list = []
            ratio_list = []
            img_name_list = []
            for ino in range(img_num):
                img_list.append(data[ino][0])
                ratio_list.append(data[ino][1])
                img_name_list.append(data[ino][2])
            img_list = np.concatenate(img_list, axis=0)
            outs = exe.run(eval_info_dict['program'], \
                           feed={'image': img_list}, \
                           fetch_list=eval_info_dict['fetch_varname_list'])
            outs_dict = {}
            for tno in range(len(outs)):
                fetch_name = eval_info_dict['fetch_name_list'][tno]
                fetch_value = np.array(outs[tno])
                outs_dict[fetch_name] = fetch_value
            dt_boxes_list = postprocess(outs_dict, ratio_list)
            for ino in range(img_num):
                dt_boxes = dt_boxes_list[ino]
                img_name = img_name_list[ino]
                dt_boxes_json = []
                for box in dt_boxes:
                    tmp_json = {"transcription": ""}
                    tmp_json['points'] = box.tolist()
                    dt_boxes_json.append(tmp_json)
                otstr = img_name + "\t" + json.dumps(dt_boxes_json) + "\n"
                fout.write(otstr.encode())
    return
Пример #21
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    print(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    program.check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    det_model = create_module(
        config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, eval_outputs = det_model(mode="test")
            fetch_name_list = list(eval_outputs.keys())
            eval_fetch_list = [eval_outputs[v].name for v in fetch_name_list]

    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    # load checkpoints
    checkpoints = config['Global'].get('checkpoints')
    if checkpoints:
        path = checkpoints
        fluid.load(eval_prog, path, exe)
        logger.info("Finish initing model from {}".format(path))
    else:
        raise Exception("{} not exists!".format(checkpoints))

    save_res_path = config['Global']['save_res_path']
    if not os.path.exists(os.path.dirname(save_res_path)):
        os.makedirs(os.path.dirname(save_res_path))
    with open(save_res_path, "wb") as fout:

        test_reader = reader_main(config=config, mode='test')
        tackling_num = 0
        for data in test_reader():
            img_num = len(data)
            tackling_num = tackling_num + img_num
            logger.info("tackling_num:%d", tackling_num)
            img_list = []
            ratio_list = []
            img_name_list = []
            for ino in range(img_num):
                img_list.append(data[ino][0])
                ratio_list.append(data[ino][1])
                img_name_list.append(data[ino][2])

            img_list = np.concatenate(img_list, axis=0)
            outs = exe.run(eval_prog,\
                feed={'image': img_list},\
                fetch_list=eval_fetch_list)

            global_params = config['Global']
            postprocess_params = deepcopy(config["PostProcess"])
            postprocess_params.update(global_params)
            postprocess = create_module(postprocess_params['function'])\
                (params=postprocess_params)
            if config['Global']['algorithm'] == 'EAST':
                dic = {'f_score': outs[0], 'f_geo': outs[1]}
            elif config['Global']['algorithm'] == 'DB':
                dic = {'maps': outs[0]}
            else:
                raise Exception("only support algorithm: ['EAST', 'DB']")
            dt_boxes_list = postprocess(dic, ratio_list)
            for ino in range(img_num):
                dt_boxes = dt_boxes_list[ino]
                img_name = img_name_list[ino]
                dt_boxes_json = []
                for box in dt_boxes:
                    tmp_json = {"transcription": ""}
                    tmp_json['points'] = box.tolist()
                    dt_boxes_json.append(tmp_json)
                otstr = img_name + "\t" + json.dumps(dt_boxes_json) + "\n"
                fout.write(otstr.encode())
                src_img = cv2.imread(img_name)
                draw_det_res(dt_boxes, config, src_img, img_name)

    logger.info("success!")
Пример #22
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)
    char_ops = CharacterOps(config['Global'])
    loss_type = config['Global']['loss_type']
    config['Global']['char_ops'] = char_ops

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    #     check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, outputs = rec_model(mode="test")
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    init_model(config, eval_prog, exe)

    blobs = reader_main(config, 'test')()
    infer_img = config['Global']['infer_img']
    infer_list = get_image_file_list(infer_img)
    max_img_num = len(infer_list)
    if len(infer_list) == 0:
        logger.info("Can not find img in infer_img dir.")
    for i in range(max_img_num):
        logger.info("infer_img:%s" % infer_list[i])
        img = next(blobs)
        if loss_type != "srn":
            predict = exe.run(program=eval_prog,
                              feed={"image": img},
                              fetch_list=fetch_varname_list,
                              return_numpy=False)
        else:
            encoder_word_pos_list = []
            gsrm_word_pos_list = []
            gsrm_slf_attn_bias1_list = []
            gsrm_slf_attn_bias2_list = []
            encoder_word_pos_list.append(img[1])
            gsrm_word_pos_list.append(img[2])
            gsrm_slf_attn_bias1_list.append(img[3])
            gsrm_slf_attn_bias2_list.append(img[4])

            encoder_word_pos_list = np.concatenate(encoder_word_pos_list,
                                                   axis=0).astype(np.int64)
            gsrm_word_pos_list = np.concatenate(gsrm_word_pos_list,
                                                axis=0).astype(np.int64)
            gsrm_slf_attn_bias1_list = np.concatenate(gsrm_slf_attn_bias1_list,
                                                      axis=0).astype(
                                                          np.float32)
            gsrm_slf_attn_bias2_list = np.concatenate(gsrm_slf_attn_bias2_list,
                                                      axis=0).astype(
                                                          np.float32)

            predict = exe.run(program=eval_prog, \
                       feed={'image': img[0], 'encoder_word_pos': encoder_word_pos_list,
                             'gsrm_word_pos': gsrm_word_pos_list, 'gsrm_slf_attn_bias1': gsrm_slf_attn_bias1_list,
                             'gsrm_slf_attn_bias2': gsrm_slf_attn_bias2_list}, \
                       fetch_list=fetch_varname_list, \
                       return_numpy=False)
        if loss_type == "ctc":
            preds = np.array(predict[0])
            preds = preds.reshape(-1)
            preds_lod = predict[0].lod()[0]
            preds_text = char_ops.decode(preds)
            probs = np.array(predict[1])
            ind = np.argmax(probs, axis=1)
            blank = probs.shape[1]
            valid_ind = np.where(ind != (blank - 1))[0]
            if len(valid_ind) == 0:
                continue
            score = np.mean(probs[valid_ind, ind[valid_ind]])
        elif loss_type == "attention":
            preds = np.array(predict[0])
            probs = np.array(predict[1])
            end_pos = np.where(preds[0, :] == 1)[0]
            if len(end_pos) <= 1:
                preds = preds[0, 1:]
                score = np.mean(probs[0, 1:])
            else:
                preds = preds[0, 1:end_pos[1]]
                score = np.mean(probs[0, 1:end_pos[1]])
            preds = preds.reshape(-1)
            preds_text = char_ops.decode(preds)
        elif loss_type == "srn":
            char_num = char_ops.get_char_num()
            preds = np.array(predict[0])
            preds = preds.reshape(-1)
            probs = np.array(predict[1])
            ind = np.argmax(probs, axis=1)
            valid_ind = np.where(preds != int(char_num - 1))[0]
            if len(valid_ind) == 0:
                continue
            score = np.mean(probs[valid_ind, ind[valid_ind]])
            preds = preds[:valid_ind[-1] + 1]
            preds_text = char_ops.decode(preds)
        logger.info("\t index: {}".format(preds))
        logger.info("\t word : {}".format(preds_text))
        logger.info("\t score: {}".format(score))

    # save for inference model
    target_var = []
    for key, values in outputs.items():
        target_var.append(values)

    fluid.io.save_inference_model("./output/",
                                  feeded_var_names=['image'],
                                  target_vars=target_var,
                                  executor=exe,
                                  main_program=eval_prog,
                                  model_filename="model",
                                  params_filename="params")
Пример #23
0
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)
    char_ops = CharacterOps(config['Global'])
    config['Global']['char_ops'] = char_ops

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    #     check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            _, outputs = rec_model(mode="test")
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    init_model(config, eval_prog, exe)

    blobs = reader_main(config, 'test')
    imgs = next(blobs())
    for img in imgs:
        predict = exe.run(program=eval_prog,
                          feed={"image": img},
                          fetch_list=fetch_varname_list,
                          return_numpy=False)

        preds = np.array(predict[0])
        if preds.shape[1] == 1:
            preds = preds.reshape(-1)
            preds_lod = predict[0].lod()[0]
            preds_text = char_ops.decode(preds)
        else:
            end_pos = np.where(preds[0, :] == 1)[0]
            if len(end_pos) <= 1:
                preds_text = preds[0, 1:]
            else:
                preds_text = preds[0, 1:end_pos[1]]
            preds_text = preds_text.reshape(-1)
            preds_text = char_ops.decode(preds_text)

        print(preds)
        print(preds_text)

    # save for inference model
    target_var = []
    for key, values in outputs.items():
        target_var.append(values)

    fluid.io.save_inference_model(
        "./output/",
        feeded_var_names=['image'],
        target_vars=target_var,
        executor=exe,
        main_program=eval_prog,
        model_filename="model",
        params_filename="params")
Пример #24
0
def main():
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    char_ops = CharacterOps(config['Global'])
    config['Global']['char_num'] = char_ops.get_char_num()
    print(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    train_prog = fluid.Program()
    with fluid.program_guard(train_prog, startup_prog):
        with fluid.unique_name.guard():
            train_loader, train_outputs = rec_model(mode="train")
            save_var = train_outputs[1]

            if "gradient_clip" in config['Global']:
                gradient_clip = config['Global']['gradient_clip']
                clip = fluid.clip.GradientClipByGlobalNorm(gradient_clip)
                fluid.clip.set_gradient_clip(clip, program=train_prog)

            train_fetch_list = [v.name for v in train_outputs]
            train_loss = train_outputs[0]
            opt_params = config['Optimizer']
            optimizer = create_module(opt_params['function'])(opt_params)
            optimizer.minimize(train_loss)
            global_lr = optimizer._global_learning_rate()
            global_lr.persistable = True
            train_fetch_list.append(global_lr.name)

    train_reader = reader.train_eval_reader(config=config,
                                            char_ops=char_ops,
                                            mode="train")
    train_loader.set_sample_list_generator(train_reader, places=place)

    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            eval_loader, eval_outputs = rec_model(mode="eval")
            eval_fetch_list = [v.name for v in eval_outputs]

    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    eval_reader = reader.train_eval_reader(config=config,
                                           char_ops=char_ops,
                                           mode="eval")
    eval_loader.set_sample_list_generator(eval_reader, places=place)

    # compile program for multi-devices
    train_compile_program = create_multi_devices_program(
        train_prog, train_loss.name)

    pretrain_weights = config['Global']['pretrain_weights']
    if pretrain_weights is not None:
        load_pretrain(exe, train_prog, pretrain_weights)

    train_batch_id = 0
    train_log_keys = ['loss', 'acc']
    log_smooth_window = config['Global']['log_smooth_window']
    epoch_num = config['Global']['epoch_num']
    loss_type = config['Global']['loss_type']
    print_step = config['Global']['print_step']
    eval_step = config['Global']['eval_step']
    save_epoch_step = config['Global']['save_epoch_step']
    save_dir = config['Global']['save_dir']
    train_stats = TrainingStats(log_smooth_window, train_log_keys)
    best_eval_acc = -1
    best_batch_id = 0
    best_epoch = 0
    for epoch in range(epoch_num):
        train_loader.start()
        try:
            while True:
                t1 = time.time()
                train_outs = exe.run(program=train_compile_program,
                                     fetch_list=train_fetch_list,
                                     return_numpy=False)
                loss = np.mean(np.array(train_outs[0]))
                lr = np.mean(np.array(train_outs[-1]))

                preds = np.array(train_outs[1])
                preds_lod = train_outs[1].lod()[0]
                labels = np.array(train_outs[2])
                labels_lod = train_outs[2].lod()[0]

                acc, acc_num, img_num = cal_predicts_accuracy(
                    char_ops, preds, preds_lod, labels, labels_lod)

                t2 = time.time()
                train_batch_elapse = t2 - t1

                stats = {'loss': loss, 'acc': acc}
                train_stats.update(stats)
                if train_batch_id > 0 and train_batch_id % print_step == 0:
                    logs = train_stats.log()
                    strs = 'epoch: {}, iter: {}, lr: {:.6f}, {}, time: {:.3f}'.format(
                        epoch, train_batch_id, lr, logs, train_batch_elapse)
                    logger.info(strs)

                if train_batch_id > 0 and train_batch_id % eval_step == 0:
                    outs = eval_run(exe, eval_prog, eval_loader,
                                    eval_fetch_list, char_ops, train_batch_id,
                                    "eval")
                    eval_acc, acc_num, sample_num = outs
                    if eval_acc > best_eval_acc:
                        best_eval_acc = eval_acc
                        best_batch_id = train_batch_id
                        best_epoch = epoch
                        save_path = save_dir + "/best_accuracy"
                        save_model(train_prog, save_path)

                    strs = 'Test iter: {}, acc:{:.6f}, best_acc:{:.6f}, best_epoch:{}, best_batch_id:{}, sample_num:{}'.format(
                        train_batch_id, eval_acc, best_eval_acc, best_epoch,
                        best_batch_id, sample_num)
                    logger.info(strs)
                train_batch_id += 1

        except fluid.core.EOFException:
            train_loader.reset()

        if epoch > 0 and epoch % save_epoch_step == 0:
            save_path = save_dir + "/iter_epoch_%d" % (epoch)
            save_model(train_prog, save_path)
def main():
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    print(config)

    alg = config['Global']['algorithm']
    assert alg in ['EAST', 'DB']

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    det_model = create_module(
        config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    train_prog = fluid.Program()
    with fluid.program_guard(train_prog, startup_prog):
        with fluid.unique_name.guard():
            train_loader, train_outputs = det_model(mode="train")
            train_fetch_list = [v.name for v in train_outputs]
            train_loss = train_outputs[0]
            opt_params = config['Optimizer']
            optimizer = create_module(opt_params['function'])(opt_params)
            optimizer.minimize(train_loss)
            global_lr = optimizer._global_learning_rate()
            global_lr.persistable = True
            train_fetch_list.append(global_lr.name)

    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            eval_loader, eval_outputs = det_model(mode="eval")
            eval_fetch_list = [v.name for v in eval_outputs]
    eval_prog = eval_prog.clone(for_test=True)

    train_reader = reader.train_reader(config=config)
    train_loader.set_sample_list_generator(train_reader, places=place)

    exe.run(startup_prog)

    # compile program for multi-devices
    train_compile_program = create_multi_devices_program(
        train_prog, train_loss.name)

    pretrain_weights = config['Global']['pretrain_weights']
    if pretrain_weights is not None:
        load_pretrain(exe, train_prog, pretrain_weights)
        print("pretrain weights loaded!")

    train_batch_id = 0
    if alg == 'EAST':
        train_log_keys = ['loss_total', 'loss_cls', 'loss_offset']
    elif alg == 'DB':
        train_log_keys = [
            'loss_total', 'loss_shrink', 'loss_threshold', 'loss_binary'
        ]
    log_smooth_window = config['Global']['log_smooth_window']
    epoch_num = config['Global']['epoch_num']
    print_step = config['Global']['print_step']
    eval_step = config['Global']['eval_step']
    save_epoch_step = config['Global']['save_epoch_step']
    save_dir = config['Global']['save_dir']
    train_stats = TrainingStats(log_smooth_window, train_log_keys)
    best_eval_hmean = -1
    best_batch_id = 0
    best_epoch = 0
    for epoch in range(epoch_num):
        train_loader.start()
        try:
            while True:
                t1 = time.time()
                train_outs = exe.run(program=train_compile_program,
                                     fetch_list=train_fetch_list,
                                     return_numpy=False)
                loss_total = np.mean(np.array(train_outs[0]))
                if alg == 'EAST':
                    loss_cls = np.mean(np.array(train_outs[1]))
                    loss_offset = np.mean(np.array(train_outs[2]))
                    stats = {'loss_total':loss_total, 'loss_cls':loss_cls,\
                        'loss_offset':loss_offset}
                elif alg == 'DB':
                    loss_shrink_maps = np.mean(np.array(train_outs[1]))
                    loss_threshold_maps = np.mean(np.array(train_outs[2]))
                    loss_binary_maps = np.mean(np.array(train_outs[3]))
                    stats = {'loss_total':loss_total, 'loss_shrink':loss_shrink_maps, \
                        'loss_threshold':loss_threshold_maps, 'loss_binary':loss_binary_maps}
                lr = np.mean(np.array(train_outs[-1]))
                t2 = time.time()
                train_batch_elapse = t2 - t1

                # stats = {'loss_total':loss_total, 'loss_cls':loss_cls,\
                #     'loss_offset':loss_offset}
                train_stats.update(stats)
                if train_batch_id > 0 and train_batch_id % print_step == 0:
                    logs = train_stats.log()
                    strs = 'epoch: {}, iter: {}, lr: {:.6f}, {}, time: {:.3f}'.format(
                        epoch, train_batch_id, lr, logs, train_batch_elapse)
                    logger.info(strs)

                if train_batch_id > 0 and\
                    train_batch_id % eval_step == 0:
                    metrics = eval_det_run(exe, eval_prog, eval_fetch_list,
                                           config, "eval")
                    hmean = metrics['hmean']
                    if hmean >= best_eval_hmean:
                        best_eval_hmean = hmean
                        best_batch_id = train_batch_id
                        best_epoch = epoch
                        save_path = save_dir + "/best_accuracy"
                        save_model(train_prog, save_path)
                    strs = 'Test iter: {}, metrics:{}, best_hmean:{:.6f}, best_epoch:{}, best_batch_id:{}'.format(
                        train_batch_id, metrics, best_eval_hmean, best_epoch,
                        best_batch_id)
                    logger.info(strs)
                train_batch_id += 1

        except fluid.core.EOFException:
            train_loader.reset()

        if epoch > 0 and epoch % save_epoch_step == 0:
            save_path = save_dir + "/iter_epoch_%d" % (epoch)
            save_model(train_prog, save_path)
def main():
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    print(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    det_model = create_module(config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            eval_outputs = det_model(mode="test")
            eval_fetch_list = [v.name for v in eval_outputs]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    pretrain_weights = config['Global']['pretrain_weights']
    if pretrain_weights is not None:
        fluid.load(eval_prog, pretrain_weights)
    else:
        logger.info("Not find pretrain_weights:%s" % pretrain_weights)
        sys.exit(0)

    save_res_path = config['Global']['save_res_path']
    with open(save_res_path, "wb") as fout:
        test_reader = reader.test_reader(config=config)
        tackling_num = 0
        for data in test_reader():
            img_num = len(data)
            tackling_num = tackling_num + img_num
            logger.info("tackling_num:%d", tackling_num)
            img_list = []
            ratio_list = []
            img_name_list = []
            for ino in range(img_num):
                img_list.append(data[ino][0])
                ratio_list.append(data[ino][1])
                img_name_list.append(data[ino][2])
            img_list = np.concatenate(img_list, axis=0)
            outs = exe.run(eval_prog,\
                feed={'image': img_list},\
                fetch_list=eval_fetch_list)

            global_params = config['Global']
            postprocess_params = deepcopy(config["PostProcess"])
            postprocess_params.update(global_params)
            postprocess = create_module(postprocess_params['function'])\
                (params=postprocess_params)
            dt_boxes_list = postprocess(outs, ratio_list)
            for ino in range(img_num):
                dt_boxes = dt_boxes_list[ino]
                img_name = img_name_list[ino]
                dt_boxes_json = []
                for box in dt_boxes:
                    tmp_json = {"transcription": ""}
                    tmp_json['points'] = box.tolist()
                    dt_boxes_json.append(tmp_json)
                otstr = img_name + "\t" + json.dumps(dt_boxes_json) + "\n"
                fout.write(otstr.encode())
                #draw_det_res(dt_boxes, config, img_name, ino)
    logger.info("success!")
def main():
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    char_ops = CharacterOps(config['Global'])
    config['Global']['char_num'] = char_ops.get_char_num()

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    if use_gpu:
        devices_num = fluid.core.get_cuda_device_count()
    else:
        devices_num = int(
            os.environ.get('CPU_NUM', multiprocessing.cpu_count()))

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            eval_loader, eval_outputs = rec_model(mode="eval")
            eval_fetch_list = [v.name for v in eval_outputs]
    eval_prog = eval_prog.clone(for_test=True)

    exe.run(startup_prog)
    pretrain_weights = config['Global']['pretrain_weights']
    if pretrain_weights is not None:
        fluid.load(eval_prog, pretrain_weights)

    eval_data_list = ['IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867',\
        'IC13_857', 'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80']
    eval_data_dir = config['TestReader']['lmdb_sets_dir']
    total_forward_time = 0
    total_evaluation_data_number = 0
    total_correct_number = 0
    eval_data_acc_info = {}
    for eval_data in eval_data_list:
        config['TestReader']['lmdb_sets_dir'] = \
            eval_data_dir + "/" + eval_data
        eval_reader = reader.train_eval_reader(config=config,
                                               char_ops=char_ops,
                                               mode="test")
        eval_loader.set_sample_list_generator(eval_reader, places=place)

        start_time = time.time()
        outs = eval_run(exe, eval_prog, eval_loader, eval_fetch_list, char_ops,
                        "best", "test")
        infer_time = time.time() - start_time
        eval_acc, acc_num, sample_num = outs
        total_forward_time += infer_time
        total_evaluation_data_number += sample_num
        total_correct_number += acc_num
        eval_data_acc_info[eval_data] = outs

    avg_forward_time = total_forward_time / total_evaluation_data_number
    avg_acc = total_correct_number * 1.0 / total_evaluation_data_number
    logger.info('-' * 50)
    strs = ""
    for eval_data in eval_data_list:
        eval_acc, acc_num, sample_num = eval_data_acc_info[eval_data]
        strs += "\n {}, accuracy:{:.6f}".format(eval_data, eval_acc)
    strs += "\n average, accuracy:{:.6f}, time:{:.6f}".format(
        avg_acc, avg_forward_time)
    logger.info(strs)
    logger.info('-' * 50)