Ejemplo n.º 1
0
def get_data(data_dir, num, batch_size, worker, is_train):
    if isinstance(data_dir, list):
        dataset_list = []
        for data_dir_ in data_dir:
            dataset_list.append(
                dataset.Chinese_LmdbDataset(data_dir_,
                                            num,
                                            is_train=is_train,
                                            transform=dataset.resizeNormalize(
                                                (100, 32))))
        datasets = concatdataset.ConcatDataset(dataset_list)
    else:
        datasets = dataset.Chinese_LmdbDataset(
            data_dir,
            num,
            is_train=is_train,
            transform=dataset.resizeNormalize((100, 32)))
    print('total image', len(datasets))

    data_loader = DataLoader(datasets,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=worker,
                             drop_last=True)

    return datasets, data_loader
Ejemplo n.º 2
0
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from tqdm import tqdm
import json
import os


os.environ['CUDA_VISIBLE_DEVICES'] = '1'

with open('/data1/zem/Resnet.CRNN/alphabet.json', 'r') as f:
    data = json.load(f)
alphabet = data['alphabet']
convert = dataset.strLabelToInt(alphabet)
class_num = convert.num_class
test_data_dir = '/data1/zem/OCR_and_STR/data/lmdb/recognition/ReCTS'
datasets = dataset.LmdbDataset(test_data_dir,1000000,transform=dataset.resizeNormalize((100,32)))
test_loader = DataLoader(datasets,batch_size=800,shuffle=True,num_workers=2,drop_last=True)
print('the test loader has %d steps' % (len(test_loader)))
convert = dataset.strLabelToInt(alphabet)
#加载模型
device = torch.device('cuda:0')
model = ResNet_ASTER(num_class=convert.num_class,with_lstm=True).to(device)
checkpoint = torch.load('/data1/zem/Resnet.CRNN/expr/Chinese_best_model.pth',map_location='cuda:0')
model.load_state_dict(checkpoint['model'])

n_correct = 0

for p in model.parameters():
    p.requires_grad = False
model.eval()
val_iter = iter(test_loader)
Ejemplo n.º 3
0
    print(torch.cuda.get_device_name(0),' is avaliable')
    device = torch.device('cuda:1')
else:
    print('using cpu actually')
    device = torch.device('cpu')










train_dataset = dataset.LmdbDataset(root=opt.trainRoot,num=opt.trainNumber,transform=dataset.resizeNormalize((100,32)))

train_loader = DataLoader(dataset=train_dataset,batch_size=opt.batchSize,shuffle=True,num_workers=opt.worker,drop_last=True)

test_dataset = dataset.LmdbDataset(root=opt.valRoot,num=opt.testNumber,transform=dataset.resizeNormalize((100,32)))


convert = dataset.strLabelToInt(opt.alphabet)
criterion = nn.CTCLoss()
loss_avg_for_val = utils.Averager()
loss_avg_for_tra = utils.Averager()



crnn = resnet_aster.ResNet_ASTER(num_class=len(opt.alphabet)+1,with_lstm=True).to(device)
Ejemplo n.º 4
0
is_english = True

if is_english:
    alphabet = string.printable[:-6]
    convert = dataset.strLabelToInt(alphabet)
    class_num = convert.num_class
else:
    with open('./alphabet.json', 'r') as f:
        data = json.load(f)
    alphabet = data['alphabet']
    convert = dataset.strLabelToInt(alphabet)
    class_num = convert.num_class

model = resnet_aster.ResNet_ASTER(num_class=class_num, with_lstm=True)
print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path, map_location='cpu'))

transformer = dataset.resizeNormalize((100, 32))
image = Image.open(img_path).convert('RGB')
image = transformer(image)
image = image.unsqueeze(0)

model.eval()
preds = model(image)

_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)

preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = convert.decoder(preds.data, preds_size.data)
print('%-20s' % (sim_pred))