Esempio n. 1
0
def main():
    logger = get_logger()

    global_config = config['Global']

    # 初始化设备
    use_gpu = global_config['use_gpu']
    if global_config['local_rank'] == -1 or not use_gpu:
        device = torch.device(
            "cuda" if torch.cuda.is_available() and use_gpu else "cpu")
        global_config.update(
            {'n_gpu': torch.cuda.device_count() if use_gpu else 1})
    else:
        torch.cuda.set_device(global_config['local_rank'])
        device = torch.device('cuda', global_config['local_rank'])
        dist.init_process_group(backend='nccl')
        global_config.update({'n_gpu': 1})
    global_config.update({'device': device})
    logger.warning(
        f"\n\tProcess Rank:{global_config['local_rank']} \n"
        f"\tDevice: {device}\n"
        f"\tGpus: {global_config['n_gpu']}\n"
        f"\tDistributed: {bool(global_config['local_rank'] != -1)}\n"
        f"\t16-bits training: {global_config['fp16']}")

    rank_id = global_config['local_rank']
    set_seed(global_config['seed'], use_gpu)

    # 阻塞子进程,下面的操作仅主进程进行
    if not is_main_process(rank_id):
        dist.barrier()

    post_process = build_post_process(config['PostProcess'], global_config)

    # 构建模型
    arch_config = config.pop('Architecture')
    if hasattr(post_process, 'character'):
        char_num = len(getattr(post_process, 'character'))
        arch_config["Head"]['out_channels'] = char_num
    logger.info(f"\nModel Info:" f"\n{json.dumps(arch_config, indent=4)}")
    model = build_model(arch_config)
    state_dict = torch.load(global_config['pretrained_model'])
    model.load_state_dict(state_dict)

    # 加载训练数据
    if global_config['local_rank'] == 0:
        dist.barrier()
    logger.info(f"\nLoad train Data:"
                f"\n{json.dumps(config['Train'], indent=4)}")
    train_dataloader = build_dataloader(config, logger, 'Train')

    logger.info(f"\nLoad Eval Data:"
                f"\n{json.dumps(config['Eval'], indent=4)}")
    eval_dataloader = build_dataloader(config, logger, 'Eval')
    if global_config['local_rank'] == 0:
        dist.barrier()

    model.to(device)
Esempio n. 2
0
def main():
    global_config = config['Global']

    device = torch.device('cpu')
    if global_config['use_gpu'] and torch.cuda.is_available():
        device = torch.device('cuda')
    logger.info('使用设备:{}'.format(device))

    logger.info('模型信息:{}'.format(config['Architecture']))
    model = build_model(config['Architecture'])
    model.to(device)

    logger.info('加载预训练模型:{}'.format(global_config['pretrained_model']))
    state_dict = torch.load(global_config['pretrained_model'])
    model.load_state_dict(state_dict)

    post_process_class = build_post_process(config['PostProcess'])

    ops = []
    for op in config['Eval']['dataset']['transforms']:
        op_name = list(op)[0]
        if 'Label' in op_name:
            continue
        elif op_name == "KeepKeys":
            op[op_name]['keep_keys'] = ['image', 'shape']
        ops.append(op)
    transforms = create_transformers(ops)

    save_res_path = global_config['save_res_path']
    save_dir = os.path.dirname(save_res_path)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    model.eval()
    with open(save_res_path, 'wb') as fout:
        for file in get_img_list(global_config['infer_img']):
            logger.info(f"测试图像:{file}")
            data = {'image': file}
            batch = transforms(data)

            images = np.expand_dims(batch[0], axis=0)
            shape_list = np.expand_dims(batch[1], axis=0)
            images = torch.from_numpy(images).to(device)
            preds = model(images)
            post_result = post_process_class(preds, shape_list)
            boxes = post_result[0]['points']

            dt_boxes_json = []
            for box in boxes:
                tmp_json = {"transcription": ""}
                tmp_json['points'] = box.tolist()
                dt_boxes_json.append(tmp_json)
            otstr = file + "\t" + json.dumps(dt_boxes_json) + '\n'
            fout.write(otstr.encode())
            src_img = cv.imread(file)
            draw_det_res(boxes, save_dir, src_img, file)
        logger.info("结果已保存!")
Esempio n. 3
0
def main():
    global_config = config['Global']

    device = torch.device('cpu')
    if global_config['use_gpu'] is True and torch.cuda.is_available():
        device = torch.device('cuda')
    logger.info('使用设备:{}'.format(device))

    post_process_class = build_post_process(config['PostProcess'],
                                            global_config)

    if hasattr(post_process_class, 'character'):
        config['Architecture']["Head"]['out_channels'] = len(
            getattr(post_process_class, 'character'))
    logger.info('构建模型,字典包含{}个字'.format(
        config['Architecture']["Head"]['out_channels']))
    logger.info('模型结构:{}'.format(config['Architecture']))
    model = build_model(config['Architecture'])
    model.to(device)

    logger.info('加载预训练模型 {}...'.format(global_config['pretrained_model']))
    state_dict = torch.load(global_config['pretrained_model'])
    model.load_state_dict(state_dict)

    ops = []
    for op in config['Eval']['dataset']['transforms']:
        op_name = list(op)[0]
        if 'Label' in op_name:
            continue
        elif op_name in ['RecResizeImg']:
            op[op_name]['infer_mode'] = True
        elif op_name == 'KeepKeys':
            op[op_name]['keep_keys'] = ['image']
        ops.append(op)
    global_config['infer_mode'] = True
    transforms = create_transformers(ops, global_config)

    model.eval()
    for file in get_img_list(config['Global']['infer_img']):
        logger.info('输入图像:{}'.format(file))
        data = {'image': file}
        batch = transforms(data)

        images = torch.from_numpy(batch[0]).to(device)
        images = images.unsqueeze(0)
        preds = model(images)
        post_result = post_process_class(preds)
        logger.info("result: {}".format(post_result))
Esempio n. 4
0
def main():
    global_config = config['Global']

    use_gpu = global_config['use_gpu']
    n_gpus = 1
    device = torch.device('cpu')
    if use_gpu:
        if torch.cuda.is_available():
            n_gpus = torch.cuda.device_count()
            device = torch.device('cuda')
        else:
            logger.warning("未发现可用于计算的GPU设备")

    # 创建数据集
    config['Eval']['loader'].update({
        'batch_size':
        config['Eval']['loader']['batch_size_per_card'] * n_gpus
    })
    dataloader = build_dataloader(config, device, logger, 'Eval')
    batch_size = config['Eval']['loader']['batch_size']
    logger.info(f'测试数据共 {len(dataloader)}个batch, 每个batch包含{batch_size}个样本')

    post_process_class = build_post_process(config['PostProcess'],
                                            global_config)
    if hasattr(post_process_class, 'character'):
        config['Architecture']["Head"]['out_channels'] = len(
            getattr(post_process_class, 'character'))
    model = build_model(config['Architecture'])

    # 加载预训练模型
    state_dict = torch.load(global_config['pretrained_model'],
                            map_location=torch.device('cpu'))
    model.load_state_dict(state_dict)
    model.to(device)

    eval_class = build_metric(config['Metric'])
    metric = train_utils.eval(model, dataloader, post_process_class,
                              eval_class, device)
    logger.info('metric eval ***************')
    for k, v in metric.items():
        logger.info('{}:{}'.format(k, v))
Esempio n. 5
0
# -*- coding: utf-8 -*-
# __author__:Song Zhenqi
# 2021-01-20

from argparse import ArgumentParser
import paddle
from utils import get_logger, get_config
from models.architectures import build_model
import torch


def get_args():
    parser = ArgumentParser()
    parser.add_argument('--paddle', '-p', help='Paddle Model path')
    parser.add_argument('--cfg', '-c', help='Config File')

    return parser.parse_args()


if __name__ == '__main__':
    args = get_args()
    print(args.cfg)
    config = get_config(args.cfg)
    config['Architecture']["Head"]['out_channels'] = 6625
    net = build_model(config['Architecture'])
    # static_dict = torch.load('./test.pth')
    paddle_dict = paddle.load(args.paddle)
    # net.load_state_dict(static_dict)
    net.load_paddle_state_dict(paddle_dict)
    torch.save(net.state_dict(), 'mobilev3_crnn_ctc.pth')