예제 #1
0
    def __init__(self, model_path, gpu_id=None):
        """
        初始化gluon模型
        :param model_path: 模型地址
        :param gpu_id: 在哪一块gpu上运行
        """
        info = pickle.load(open(model_path.replace('.params', '.info'), 'rb'))
        print('load {} epoch params'.format(info['epoch']))
        config = info['config']
        alphabet = config['dataset']['alphabet']
        self.ctx = try_gpu(gpu_id)

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)

        self.gpu_id = gpu_id
        img_h, img_w = 32, 100
        for process in config['dataset']['train']['dataset']['args']['pre_processes']:
            if process['type'] == "Resize":
                img_h = process['args']['img_h']
                img_w = process['args']['img_w']
                break
        self.img_w = img_w
        self.img_h = img_h
        self.img_mode = config['dataset']['train']['dataset']['args']['img_mode']
        self.alphabet = alphabet
        self.net = get_model(len(alphabet), self.ctx, config['arch']['args'])
        self.net.load_parameters(model_path, self.ctx)
        # self.net = gluon.SymbolBlock.imports('crnn_lite-symbol.json', ['data'], 'crnn_lite-0000.params', ctx=self.ctx)
        self.net.hybridize()
예제 #2
0
    def __init__(self, model_path, post_p_thre=0.7, gpu_id=None):
        '''
        初始化pytorch模型
        :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
        :param gpu_id: 在哪一块gpu上运行
        '''
        self.gpu_id = gpu_id

        if self.gpu_id is not None and isinstance(
                self.gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % self.gpu_id)
        else:
            self.device = torch.device("cpu")
        print('device:', self.device)
        checkpoint = torch.load(model_path, map_location=self.device)

        config = checkpoint['config']
        config['arch']['args']['pretrained'] = False
        self.model = get_model(config['arch'])
        # config['post_processing']['args']['unclip_ratio'] = 3
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args'][
            'img_mode']
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.to(self.device)
        self.model.eval()
        print(config)

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)
예제 #3
0
    def __init__(self, model_path, post_p_thre=0.7, gpu_id=None):
        '''
        初始化pytorch模型
        :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件) model_path='/home/share/gaoluoluo/dbnet/output/DBNet_resnet18_FPN_DBHead/checkpoint/model_latest.pth'
        :param gpu_id: 在哪一块gpu上运行
        '''
        self.gpu_id = gpu_id

        if self.gpu_id is not None and isinstance(
                self.gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % self.gpu_id)
        else:
            self.device = torch.device("cpu")
        # print('device:', self.device)
        checkpoint = torch.load(model_path, map_location=self.device)
        # print("checkpoint:",checkpoint)

        config = checkpoint['config']
        # print(checkpoint['config'])
        config['arch']['backbone']['pretrained'] = False
        self.model = build_model(config['arch'])
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args'][
            'img_mode']
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.to(self.device)
        self.model.eval()

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)
예제 #4
0
    def __init__(self, model_path, post_p_thre=0.7, gpu_id=None):
        self.gpu_id = gpu_id

        if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % self.gpu_id)
        else:
            self.device = torch.device("cpu")
        print('device:', self.device)
        checkpoint = torch.load(model_path, map_location=self.device)

        config = checkpoint['config']
        config['arch']['backbone']['pretrained'] = False
        self.model = build_model(config['arch'])
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args']['img_mode']
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.to(self.device)
        self.model.eval()

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)
예제 #5
0
def load_model_eval(model_path, is_state_dict, use_cpu=False):
    device = torch.device("cuda") if (torch.cuda.is_available()
                                      and not use_cpu) else torch.device("cpu")
    if is_state_dict:
        model = maskkeypointrcnn_resnet50_fpn(num_classes=2,
                                              num_keypoints=6).to(device)
        model.load_state_dict(torch.load(model_path))
    else:
        model = torch.load(model_path).to(device)
    model.eval()
    transforms = get_transforms(train=False)
    return model, transforms, device
예제 #6
0
    def __init__(self, model_path, gpu_id=None):
        """
        初始化模型
        :param model_path: 模型地址
        :param gpu_id: 在哪一块gpu上运行
        """
        checkpoint = torch.load(model_path)
        print(f"load {checkpoint['epoch']} epoch params")
        config = checkpoint['config']
        alphabet = config['dataset']['alphabet']
        if gpu_id is not None and isinstance(
                gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % gpu_id)
        else:
            self.device = torch.device("cpu")
        print('device:', self.device)

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)

        self.gpu_id = gpu_id
        img_h, img_w = 32, 100
        for process in config['dataset']['train']['dataset']['args'][
                'pre_processes']:
            if process['type'] == "Resize":
                img_h = process['args']['img_h']
                img_w = process['args']['img_w']
                break
        self.img_w = img_w
        self.img_h = img_h
        self.img_mode = config['dataset']['train']['dataset']['args'][
            'img_mode']
        self.alphabet = alphabet
        img_channel = 3 if config['dataset']['train']['dataset']['args'][
            'img_mode'] != 'GRAY' else 1

        if config['arch']['args']['prediction']['type'] == 'CTC':
            self.converter = CTCLabelConverter(config['dataset']['alphabet'])
        elif config['arch']['args']['prediction']['type'] == 'Attn':
            self.converter = AttnLabelConverter(config['dataset']['alphabet'])
        self.net = get_model(img_channel, len(self.converter.character),
                             config['arch']['args'])
        self.net.load_state_dict(checkpoint['state_dict'])
        # self.net = torch.jit.load('crnn_lite_gpu.pt')
        self.net.to(self.device)
        self.net.eval()
        sample_input = torch.zeros(
            (2, img_channel, img_h, img_w)).to(self.device)
        self.net.get_batch_max_length(sample_input)
예제 #7
0
    def __init__(self,
                 model_path,
                 post_p_thre=0.7,
                 gpu_id=None,
                 save_wts=False):
        '''
        初始化pytorch模型
        :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
        :param gpu_id: 在哪一块gpu上运行
        '''
        self.gpu_id = gpu_id

        if self.gpu_id is not None and isinstance(
                self.gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % self.gpu_id)
        else:
            self.device = torch.device("cpu")

        print('device:', self.device)
        checkpoint = torch.load(model_path, map_location=self.device)

        config = checkpoint['config']
        config['arch']['backbone']['pretrained'] = False
        self.model = build_model(config['arch'])
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args'][
            'img_mode']
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.to(self.device)
        self.model.eval()
        # 保存wts
        # save wts
        if save_wts:
            f = open('DBNet.wts', 'w')
            f.write('{}\n'.format(len(self.model.state_dict().keys())))
            for k, v in self.model.state_dict().items():
                vr = v.reshape(-1).cpu().numpy()
                f.write('{} {} '.format(k, len(vr)))
                for vv in vr:
                    f.write(' ')
                    f.write(struct.pack('>f', float(vv)).hex())
                f.write('\n')

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)

        self.init_onnx_session()
        self.init_ie()
예제 #8
0
def predict(model, img_path: str, is_output_polygon=False, short_size: int = 1024):
    device = torch.device("cuda:0")

    img = cv2.imread(img_path, 1)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    h, w = img.shape[:2]
    img = resize_image(img, short_size)

    checkpoint = torch.load('C:/Users/94806/Desktop/model_best_recall_0.558953_precision_0.769060_hmean_0.647386.pth',
                            map_location=device)
    config = checkpoint['config']

    transform = []
    for t in config['dataset']['train']['dataset']['args']['transforms']:
        if t['type'] in ['ToTensor', 'Normalize']:
            transform.append(t)
    transform = get_transforms(transform)

    tensor = transform(img)
    tensor = tensor.unsqueeze_(0)

    tensor = tensor.cuda()
    batch = {'shape': [(h, w)]}
    with torch.no_grad():
        torch.cuda.synchronize(device)
        start = time.time()
        preds = model(tensor)
        torch.cuda.synchronize(device)

        post_process = get_post_processing(config['post_processing'])

        box_list, score_list = post_process(batch, preds, is_output_polygon=is_output_polygon)
        box_list, score_list = box_list[0], score_list[0]
        if len(box_list) > 0:
            if is_output_polygon:
                idx = [x.sum() > 0 for x in box_list]
                box_list = [box_list[i] for i, v in enumerate(idx) if v]
                score_list = [score_list[i] for i, v in enumerate(idx) if v]
            else:
                idx = box_list.reshape(box_list.shape[0], -1).sum(axis=1) > 0  # 去掉全为0的框
                box_list, score_list = box_list[idx], score_list[idx]
        else:
            box_list, score_list = [], []
        t = time.time() - start
    return preds[0, 0, :, :].detach().cpu().numpy(), box_list, score_list, t
예제 #9
0
    def __init__(self, model_path, post_p_thre=0.7):
        '''
        初始化pytorch模型
        :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
        :param gpu_id: 在哪一块gpu上运行
        '''
        self.device = "cuda:0"
        checkpoint = torch.load(model_path, map_location=self.device)
        config = checkpoint['config']
        config['arch']['backbone']['pretrained'] = False
        self.model = build_model(config['arch'])
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args']['img_mode']
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.to(self.device)
        self.model.eval()

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)
예제 #10
0
def test_model(model,
               candidates_dir,
               kp_names=("head", "ass"),
               box_score_thre=0.5,
               kp_score_thre=0,
               mask_thre=0.5,
               save_image_out=True,
               show_image_out=False):
    out_dir = candidates_dir.rstrip("/") + ".detected"
    os.makedirs(out_dir, exist_ok=True)

    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")
    model.to(device)
    model.eval()

    transforms = get_transforms(train=False)
    for p in tqdm(os.listdir(candidates_dir)):
        image_path = os.path.join(candidates_dir, p)
        image_cv2 = cv2.imread(image_path)

        canvas, results = predict(image_cv2,
                                  model,
                                  transforms=transforms,
                                  device=device,
                                  kp_names=kp_names,
                                  box_score_thre=box_score_thre,
                                  kp_score_thre=kp_score_thre,
                                  mask_thre=mask_thre,
                                  show=False)

        if save_image_out:
            cv2.imwrite(os.path.join(out_dir, p), canvas)
        if show_image_out:
            cv2.imshow("res", canvas)
            cv2.waitKey(0)
            cv2.destroyWindow("res")
예제 #11
0
    def __init__(self, model_path, post_p_thre=0.7, gpu_id=None):
        '''
        初始化pytorch模型
        :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
        :param gpu_id: 在哪一块gpu上运行
        '''
        self.gpu_id = gpu_id

        if self.gpu_id is not None and isinstance(
                self.gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % self.gpu_id)
        else:
            self.device = torch.device("cpu")
        print('device:', self.device)
        checkpoint = torch.load(model_path, map_location=self.device)

        config = yaml.load(
            open(
                '/content/DBNet.pytorch/config/icdar2015_dcn_resnet18_FPN_DBhead_polyLR.yaml',
                'r'))
        config['arch']['backbone']['pretrained'] = False
        self.model = build_model(config['arch'])
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args'][
            'img_mode']
        self.model.load_state_dict(
            torch.load('/content/DBNet.pytorch/ic15_resnet18'))
        self.model.to(self.device)
        self.model.eval()

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)
예제 #12
0
    def __init__(self,
                 model_path,
                 post_p_thre=0.7,
                 gpu_id=None,
                 imageH=960,
                 imageW=480):
        '''
        初始化pytorch模型, 转换tensorRT engine
        :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
        :param gpu_id: 在哪一块gpu上运行
        '''
        self.gpu_id = gpu_id

        if self.gpu_id is not None and isinstance(
                self.gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % self.gpu_id)
        else:
            self.device = torch.device("cpu")
        print('device:', self.device)
        self.model_path = model_path
        checkpoint = torch.load(model_path, map_location=self.device)

        config = checkpoint['config']
        config['arch']['backbone']['pretrained'] = False
        self.model = build_model(config['arch'])
        self.model.forward = self.model.forward4trt  # comment F.interpolate() of 'biliner' mode

        config['post_processing']['args']['unclip_ratio'] = 1.8
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args'][
            'img_mode']
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.to(self.device)
        self.model.eval()

        self.imageH = imageH
        self.imageW = imageW
        self.batch_size = 1
        self.input_names = ['inputs']
        self.output_names = ['pred_maps']
        # =============================================================================================================
        # 为了达到动态输入尺寸的目的,我们加载两个不同输入尺寸的engine, 640x640 和 960x480
        # ===== 640x640 =====
        input_h, input_w = (imageH + imageW) * 4 // 9, (imageH +
                                                        imageW) * 4 // 9
        # Convert to onnx
        if not os.path.exists(
                model_path.replace('.pth', '_{}_{}.onnx'.format(
                    input_h, input_w))):
            dummy_input = torch.randn(self.batch_size, 3, input_h,
                                      input_w).to(self.device)
            torch.onnx.export(self.model,
                              dummy_input,
                              model_path.replace(
                                  '.pth',
                                  '_{}_{}.onnx'.format(input_h, input_w)),
                              input_names=self.input_names,
                              output_names=self.output_names,
                              verbose=True,
                              opset_version=9)
            print('Converted to onnx model, save path {}!'.format(
                model_path.replace('.pth',
                                   '_{}_{}.onnx'.format(input_h, input_w))))

        # Convert to tensorRT engine
        self.engine_1x1 = get_engine(
            model_path.replace('.pth', '_{}_{}.onnx'.format(input_h, input_w)),
            model_path.replace('.pth',
                               '_{}_{}.engine'.format(input_h, input_w)))
        print('Converted to tensorRT engine, save path {}!'.format(
            model_path.replace('.pth',
                               '_{}_{}.engine'.format(input_h, input_w))))
        self.context_1x1 = self.engine_1x1.create_execution_context()
        # ===== 960x480 =====
        input_h, input_w = imageH, imageW
        # Convert to onnx
        if not os.path.exists(
                model_path.replace('.pth', '_{}_{}.onnx'.format(
                    input_h, input_w))):
            dummy_input = torch.randn(self.batch_size, 3, input_h,
                                      input_w).to(self.device)
            torch.onnx.export(self.model,
                              dummy_input,
                              model_path.replace(
                                  '.pth',
                                  '_{}_{}.onnx'.format(input_h, input_w)),
                              input_names=self.input_names,
                              output_names=self.output_names,
                              verbose=True,
                              opset_version=9)
            print('Converted to onnx model, save path {}!'.format(
                model_path.replace('.pth',
                                   '_{}_{}.onnx'.format(input_h, input_w))))

        # Convert to tensorRT engine
        self.engine_2x1 = get_engine(
            model_path.replace('.pth', '_{}_{}.onnx'.format(input_h, input_w)),
            model_path.replace('.pth',
                               '_{}_{}.engine'.format(input_h, input_w)))
        print('Converted to tensorRT engine, save path {}!'.format(
            model_path.replace('.pth',
                               '_{}_{}.engine'.format(input_h, input_w))))
        self.context_2x1 = self.engine_2x1.create_execution_context()
        # =============================================================================================================

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)
예제 #13
0
    def __init__(self,
                 model_path,
                 post_p_thre=0.7,
                 gpu_id=None,
                 short_size=640):
        '''
        初始化pytorch模型, 转换tensorRT engine
        :param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
        :param gpu_id: 在哪一块gpu上运行
        '''
        self.gpu_id = gpu_id

        if self.gpu_id is not None and isinstance(
                self.gpu_id, int) and torch.cuda.is_available():
            self.device = torch.device("cuda:%s" % self.gpu_id)
        else:
            self.device = torch.device("cpu")
        print('device:', self.device)
        self.model_path = model_path
        checkpoint = torch.load(model_path, map_location=self.device)

        config = checkpoint['config']
        config['arch']['backbone']['pretrained'] = False
        self.model = build_model(config['arch'])
        self.model.forward = self.model.forward4trt  # comment F.interpolate() of 'biliner' mode

        config['post_processing']['args']['unclip_ratio'] = 1.8
        self.post_process = get_post_processing(config['post_processing'])
        self.post_process.box_thresh = post_p_thre
        self.img_mode = config['dataset']['train']['dataset']['args'][
            'img_mode']
        self.model.load_state_dict(checkpoint['state_dict'])
        self.model.to(self.device)
        self.model.eval()

        self.short_size = short_size
        self.batch_size = 1
        self.input_names = ['inputs']
        self.output_names = ['pred_maps']
        # =============================================================================================================
        # Convert to onnx
        if not os.path.exists(model_path.replace('.pth', '_dynamic.onnx')):
            dummy_input = torch.randn(self.batch_size, 3, self.short_size,
                                      self.short_size).to(self.device)
            # dynamic_axes = {self.input_names[0]: {2:'width', 3:'height'},
            #                 self.output_names[0]: {2:'width', 3:'height'}}
            torch.onnx.export(
                self.model,
                dummy_input,
                model_path.replace('.pth', '_dynamic.onnx'),
                # dynamic_axes= dynamic_axes,
                input_names=self.input_names,
                output_names=self.output_names,
                verbose=True,
                opset_version=10)
            # onnx_model = onnx.load(model_path.replace('.pth', '_dynamic.onnx'))
            # onnx.checker.check_model(onnx_model)
            print('Converted to onnx model, save path {}!'.format(
                model_path.replace('.pth', '_dynamic.onnx')))

        # Convert to tensorRT engine
        self.engine = get_engine(model_path.replace('.pth', '_dynamic.onnx'),
                                 model_path.replace('.pth', '_dynamic.engine'))
        print('Converted to tensorRT engine, save path {}!'.format(
            model_path.replace('.pth', '_dynamic.engine')))
        self.context = self.engine.create_execution_context()
        # =============================================================================================================

        self.transform = []
        for t in config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] in ['ToTensor', 'Normalize']:
                self.transform.append(t)
        self.transform = get_transforms(self.transform)
예제 #14
0
            if x == 'train':
                dataloaders_dict[x] = torch.utils.data.DataLoader(
                    tmp_dataset,
                    batch_size=batch_size,
                    sampler=
                    sampler,  #torch.utils.data.SubsetRandomSampler(indices[x]), 
                    num_workers=8)
            else:
                dataloaders_dict[x] = torch.utils.data.DataLoader(
                    tmp_dataset, batch_size=batch_size, num_workers=8)

            if not finetune:
                if x == 'train':
                    mean, std, transform = data_loader.get_transforms(
                        dataloaders_dict[x],
                        x,
                        input_size,
                        mean=None,
                        std=None)
                else:
                    _, _, transform = data_loader.get_transforms(
                        dataloaders_dict[x], x, input_size, mean=mean, std=std)
            else:
                _, _, transform = data_loader.get_transforms(
                    dataloaders_dict[x], x, input_size, mean=mean, std=std)

            dataloaders_dict[x].dataset.transform = transform

            dataloaders_dict[x].dataset.mean = mean
            dataloaders_dict[x].dataset.std = std

        num_classes = dataloaders_dict['train'].dataset.num_classes