예제 #1
0
nclass = len(opt.alphabet.split(opt.sep))
converter = utils.strLabelConverterForAttention(opt.alphabet, opt.sep)
criterion = torch.nn.CrossEntropyLoss()

MODEL = MODEL(opt.n_bm, nclass, dec_layer=opt.dec_layer, LR=opt.LR)

# print("MODEL have {} paramerters in total".format(sum(x.numel() for x in MODEL.parameters())))

if opt.MODEL != '':
    print('loading pretrained model from %s' % opt.MODEL)
    state_dict = torch.load(opt.MODEL)
    MODEL_state_dict_rename = OrderedDict()
    for k, v in state_dict.items():
        name = k.replace("module.", "")  # remove `module.`
        MODEL_state_dict_rename[name] = v
    MODEL.load_state_dict(MODEL_state_dict_rename, strict=True)

image = torch.FloatTensor(opt.batchSize, 1, opt.imgH, opt.imgW)
text1_ori = torch.LongTensor(opt.batchSize * 5)
text2_ori = torch.LongTensor(opt.batchSize * 5)
length_ori = torch.IntTensor(opt.batchSize)

if opt.cuda:
    MODEL.cuda()
    MODEL = torch.nn.DataParallel(MODEL, device_ids=range(opt.ngpu))
    text1_ori = text1_ori.cuda()
    text2_ori = text2_ori.cuda()
    criterion = criterion.cuda()
    length_ori = length_ori.cuda()

image = Variable(image)
예제 #2
0
imgW = 160
imgH = 48
nclass = len(alphabet.split(' '))
MODEL = MODEL(n_bm, nclass)


if torch.cuda.is_available():
    MODEL = MODEL.cuda()

print('loading pretrained model from %s' % model_path)
state_dict = torch.load(model_path)
MODEL_state_dict_rename = OrderedDict()
for k, v in state_dict.items():
    name = k.replace("module.", "") # remove `module.`
    MODEL_state_dict_rename[name] = v
MODEL.load_state_dict(MODEL_state_dict_rename)

for p in MODEL.parameters():
    p.requires_grad = False
MODEL.eval()

converter = utils.strLabelConverterForAttention(alphabet, ' ')
transformer = dataset.resizeNormalize((imgW, imgH))
image = Image.open(img_path).convert('RGB')
image = transformer(image)

if torch.cuda.is_available():
    image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
text = torch.LongTensor(1 * 5)