コード例 #1
0
class OcrDetCTPN():
    def __init__(self, model_path='./checkpoints/CTPN.pth'):
        self.model = CTPN_Model()
        self.use_gpu = torch.cuda.is_available()
        if self.use_gpu:
            self.model.cuda()
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.model.load_state_dict(
            torch.load(model_path, map_location=device)['model_state_dict'])
        for p in self.model.parameters():
            p.requires_grad = False
        self.model.eval()
        self.prob_thresh = 0.5

    def inference(self, image):
        image_sz = resize(image, height=ctpn_params.IMAGE_HEIGHT)
        # 宽高缩放比例(等比例缩放)
        rescale_fac = image.shape[0] / image_sz.shape[0]
        h, w = image_sz.shape[:2]
        # 减均值
        image_sz = image_sz.astype(np.float32) - ctpn_params.IMAGE_MEAN
        image_sz = torch.from_numpy(image_sz.transpose(
            2, 0, 1)).unsqueeze(0).float()

        if self.use_gpu:
            image_sz = image_sz.cuda()
        cls, regr = self.model(image_sz)
        cls_prob = F.softmax(cls, dim=-1).cpu().numpy()
        regr = regr.cpu().numpy()
        anchor = gen_anchor((int(h / 16), int(w / 16)), 16)
        bbox = bbox_transfor_inv(anchor, regr)
        bbox = clip_box(bbox, [h, w])

        fg = np.where(cls_prob[0, :, 1] > self.prob_thresh)[0]
        select_anchor = bbox[fg, :]
        select_score = cls_prob[0, fg, 1]
        select_anchor = select_anchor.astype(np.int32)
        keep_index = filter_bbox(select_anchor, 16)

        # nms
        select_anchor = select_anchor[keep_index]
        select_score = select_score[keep_index]
        select_score = np.reshape(select_score, (select_score.shape[0], 1))
        nmsbox = np.hstack((select_anchor, select_score))
        keep = nms(nmsbox, 0.3)
        select_anchor = select_anchor[keep]
        select_score = select_score[keep]

        # text line-
        textConn = TextProposalConnectorOriented()
        text = textConn.get_text_lines(select_anchor, select_score, [h, w])
        text = [np.hstack((res[:8] * rescale_fac, res[8])) for res in text]

        return text
コード例 #2
0
import config


prob_thresh = 0.8
width = 600

#device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')

#weights = os.path.join(config.checkpoints_dir, 'trained weights file.pth.tar')
weights = config.model_path

model = CTPN_Model()
model.load_state_dict(torch.load(weights, map_location=device)['model_state_dict'])
model.to(device)
model.eval()


def dis(image):
    cv2.imshow('image', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


filenames = [os.path.join(config.img_path, file) for file in os.listdir(config.img_path)]

print(filenames)


for k in range(len(filenames)):