Exemplo n.º 1
0
class OcrDetCTPN():
    def __init__(self, model_path='./checkpoints/CTPN.pth'):
        self.model = CTPN_Model()
        self.use_gpu = torch.cuda.is_available()
        if self.use_gpu:
            self.model.cuda()
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.model.load_state_dict(
            torch.load(model_path, map_location=device)['model_state_dict'])
        for p in self.model.parameters():
            p.requires_grad = False
        self.model.eval()
        self.prob_thresh = 0.5

    def inference(self, image):
        image_sz = resize(image, height=ctpn_params.IMAGE_HEIGHT)
        # 宽高缩放比例(等比例缩放)
        rescale_fac = image.shape[0] / image_sz.shape[0]
        h, w = image_sz.shape[:2]
        # 减均值
        image_sz = image_sz.astype(np.float32) - ctpn_params.IMAGE_MEAN
        image_sz = torch.from_numpy(image_sz.transpose(
            2, 0, 1)).unsqueeze(0).float()

        if self.use_gpu:
            image_sz = image_sz.cuda()
        cls, regr = self.model(image_sz)
        cls_prob = F.softmax(cls, dim=-1).cpu().numpy()
        regr = regr.cpu().numpy()
        anchor = gen_anchor((int(h / 16), int(w / 16)), 16)
        bbox = bbox_transfor_inv(anchor, regr)
        bbox = clip_box(bbox, [h, w])

        fg = np.where(cls_prob[0, :, 1] > self.prob_thresh)[0]
        select_anchor = bbox[fg, :]
        select_score = cls_prob[0, fg, 1]
        select_anchor = select_anchor.astype(np.int32)
        keep_index = filter_bbox(select_anchor, 16)

        # nms
        select_anchor = select_anchor[keep_index]
        select_score = select_score[keep_index]
        select_score = np.reshape(select_score, (select_score.shape[0], 1))
        nmsbox = np.hstack((select_anchor, select_score))
        keep = nms(nmsbox, 0.3)
        select_anchor = select_anchor[keep]
        select_score = select_score[keep]

        # text line-
        textConn = TextProposalConnectorOriented()
        text = textConn.get_text_lines(select_anchor, select_score, [h, w])
        text = [np.hstack((res[:8] * rescale_fac, res[8])) for res in text]

        return text
Exemplo n.º 2
0
from ctpn_utils import gen_anchor, bbox_transfor_inv, clip_box, filter_bbox,nms, TextProposalConnectorOriented
from ctpn_utils import resize
import config


prob_thresh = 0.8
width = 600

#device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')

#weights = os.path.join(config.checkpoints_dir, 'trained weights file.pth.tar')
weights = config.model_path

model = CTPN_Model()
model.load_state_dict(torch.load(weights, map_location=device)['model_state_dict'])
model.to(device)
model.eval()


def dis(image):
    cv2.imshow('image', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


filenames = [os.path.join(config.img_path, file) for file in os.listdir(config.img_path)]

print(filenames)

Exemplo n.º 3
0
    if os.path.exists(checkpoints_weight):
        pretrained = False

    # dataset = VOCDataset(args['image_dir'], args['labels_dir'])
    dataset = FakepagesDataset(args['image_dir'], args['labels_dir'])
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            shuffle=True,
                            num_workers=args['num_workers'])
    model = CTPN_Model()
    model.to(device)

    if os.path.exists(checkpoints_weight):
        print('using pretrained weight: {}'.format(checkpoints_weight))
        cc = torch.load(checkpoints_weight, map_location=device)
        model.load_state_dict(cc['model_state_dict'])
        try:
            resume_epoch = cc['epoch']
        except KeyError:
            resume_epoch = 0

    params_to_uodate = model.parameters()
    optimizer = optim.SGD(params_to_uodate, lr=lr, momentum=0.9)

    critetion_cls = RPN_CLS_Loss(device)
    critetion_regr = RPN_REGR_Loss(device)

    best_loss_cls = 100
    best_loss_regr = 100
    best_loss = 100
    best_model = None