Exemple #1
0
def main():

    if not os.path.exists(opt.output):
        os.makedirs(opt.output)

    converter = utils.strLabelConverter(opt.alphabet)

    collate = dataset.AlignCollate()
    train_dataset = dataset.TextLineDataset(text_file=opt.train_list, transform=dataset.ResizeNormalize(100, 32), converter=converter)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchsize, shuffle=True,
                                               num_workers=opt.num_workers, collate_fn=collate)
    test_dataset = dataset.TextLineDataset(text_file=opt.train_list, transform=dataset.ResizeNormalize(100, 32), converter=converter)
    test_loader = torch.utils.data.DataLoader(test_dataset, shuffle=False, batch_size=opt.batchsize,
                                              num_workers=opt.num_workers, collate_fn=collate)

    criterion = nn.CTCLoss()

    import models.crnn as crnn

    crnn = crnn.CRNN(opt.imgH, opt.nc, opt.num_classes, opt.nh)
    crnn.apply(utils.weights_init)
    if opt.pretrained != '':
        print('loading pretrained model from %s' % opt.pretrained)
        crnn.load_state_dict(torch.load(opt.pretrained), strict=False)
    print(crnn)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    crnn = crnn.to(device)
    criterion = criterion.to(device)


    # setup optimizer
    optimizer = optim.Adam(crnn.parameters(), lr=opt.lr)

    for epoch in range(opt.num_epochs):

        loss_avg = 0.0
        i = 0
        while i < len(train_loader):

            time0 = time.time()
            # 训练
            train_iter = iter(train_loader)

            cost = trainBatch(crnn, train_iter, criterion, optimizer, device) # 一个批次,一个批次训练
            loss_avg += cost
            i += 1

            if i % opt.interval == 0:
                print('[%d/%d][%d/%d] Loss: %f Time: %f s' %
                      (epoch, opt.num_epochs, i, len(train_loader), loss_avg,
                       time.time() - time0))
                loss_avg = 0.0



        if (epoch + 1) % opt.valinterval == 0:
            val(crnn, test_loader, criterion, converter=converter, device=device, max_iter=100)
Exemple #2
0
            alphabet.append(u' ')
            alphabet = ''.join(alphabet)

        converter = utils.strLabelConverter(alphabet, attention=False)

        nclass = converter.num_classes

        crnn = models.crnn.CRNN(imgH, nc, nclass, num_hidden)
        crnn.apply(weights_init)

        if args.cuda:
            crnn = crnn.cuda()
            crnn = torch.nn.DataParallel(crnn)

        logger.info("Loading pretrained model from {}".format(args.ckpt))
        file_weights = torch.load(args.ckpt)

        crnn.load_state_dict(file_weights)

        print("The oracle network:",
              crnn)  # Logging can't print torch models :thinking:

        image = Image.open(args.input[0]).convert('L')
        attack = CarliniAttack(crnn, alphabet, image.size, args.target,
                               file_weights)

        attack.execute(args.input, args.out)

    except KeyboardInterrupt:
        pass
Exemple #3
0
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


nclass = len(alpha) + 1
print(nclass)
crnn = crnn.CRNN(params.imgH, params.nc, nclass, params.nh)
crnn.apply(weights_init)
if params.pretrained != '':
    print('loading pretrained model from %s' % params.pretrained)
    if params.multi_gpu:
        crnn = torch.nn.DataParallel(crnn)
    crnn.load_state_dict(torch.load(params.pretrained))
print(crnn)

# -------------------------------------------------------------------------------------------------
converter = utils.strLabelConverter(alpha)
criterion = CTCLoss()

image = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH)
text = torch.IntTensor(params.batchSize * 5)
length = torch.IntTensor(params.batchSize)
if params.cuda and torch.cuda.is_available():
    crnn.cuda()
    if params.multi_gpu:
        crnn = torch.nn.DataParallel(crnn, device_ids=range(params.ngpu))
    image = image.cuda()
    criterion = criterion.cuda()
Exemple #4
0
crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    pre_trainmodel = torch.load(opt.crnn)
    pre_trainmodel_rename = collections.OrderedDict()
    for k, v in pre_trainmodel.items():
        name = k[7:]
        pre_trainmodel_rename[name] = v
    model_dict = crnn.state_dict()
    weig1 = 'rnn.1.embedding.weight'
    bias1 = 'rnn.1.embedding.bias'
    if len(model_dict[weig1]) == len(pre_trainmodel[weig1]) and len(
            model_dict[bias1]) == len(pre_trainmodel[bias1]):
        crnn.load_state_dict(pre_trainmodel)
    else:
        for k, v in model_dict.items():
            if (k != weig1 or k != bias1):
                model_dict[k] = pre_trainmodel[k]
        crnn.load_state_dict(model_dict)
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    crnn.cuda()
    crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
    image = image.cuda()
Exemple #5
0
            m.bias.data.fill_(0)
    elif classname.find('BatchNorm2d') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, opt.imgC, nclass, opt.nh)
crnn.apply(weights_init)
if opt.model_path != '':
    print('loading pretrained model from %s' % opt.model_path)
    state_dict = torch.load(opt.model_path)
    state_dict_rename = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        state_dict_rename[name] = v
    crnn.load_state_dict(state_dict_rename)
print(crnn)
image = torch.FloatTensor(opt.batchSize, opt.imgC, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    if opt.gpu_idx != -1:
        torch.cuda.set_device(opt.gpu_idx)
        crnn = torch.nn.DataParallel(crnn,
                                     device_ids=range(opt.gpu_idx,
                                                      opt.gpu_idx + opt.ngpu))
    else:
        crnn.cuda()
        crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
        image = image.cuda()
Exemple #6
0
    test_dataset = dataset.lmdbDataset(root=test_lmdb_path,
                                       transform=dataset.resizeNormalize(
                                           (100, 32)),
                                       type="test")

    nclass = len(alphabet) + 1
    nc = 1
    converter = utils.strLabelConverter(alphabet)

    crnn = crnn.CRNN(32, nc, nclass, 256)

    image = torch.FloatTensor(64, 3, 32, 32)
    text = torch.IntTensor(64 * 5)
    length = torch.IntTensor(64)

    if use_cuda:
        crnn.cuda()
        crnn = torch.nn.DataParallel(crnn, device_ids=range(1))
        image = image.cuda()

    print('loading pretrained model from %s' % saved_model_path)
    crnn.load_state_dict(torch.load(saved_model_path))
    print(crnn)

    # image = Variable(image)
    # text = Variable(text)
    # length = Variable(length)

    test_by_xzy(crnn, test_dataset)
Exemple #7
0

# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)

crnn = crnn.CRNN(opt.imgH, nc, nclass, nh, ngpu)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    crnn.load_state_dict(torch.load(opt.crnn))
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    crnn.cuda()
    image = image.cuda()
    criterion = criterion.cuda()

image = Variable(image)
text = Variable(text)
length = Variable(length)
Exemple #8
0
                            shuffle=True,
                            num_workers=params.workers)
    converter = utils.strLabelConverter(dataset.alphabet)
    nclass = len(params.alphabet) + 1
    nc = 1

    criterion = torch.nn.CTCLoss(reduction='sum')
    # criterion = CTCLoss()

    # cnn and rnn
    crnn = crnn.CRNN(32, nc, nclass, params.nh)

    crnn.apply(weights_init)
    if params.crnn != '':
        print('loading pretrained model from %s' % params.crnn)
        crnn.load_state_dict(torch.load(params.crnn))

    # loss averager
    loss_avg = utils.averager()

    # setup optimizer
    if params.adam:
        optimizer = optim.Adam(crnn.parameters(),
                               lr=params.lr,
                               betas=(params.beta1, 0.999))
    elif params.adadelta:
        optimizer = optim.Adadelta(crnn.parameters(), lr=params.lr)
    else:
        optimizer = optim.RMSprop(crnn.parameters(), lr=params.lr)

    main(crnn, train_loader, val_loader, criterion, optimizer)
# crnn_path = '/export/home/frankzhan/files/weights/CRNN_19.pth'

cudnn.benchmark = True
alphabet = keys.alphabet
converter = util.strLabelConverter(alphabet)
image = torch.FloatTensor(batchSize, 3, imgH, imgH)
text = torch.IntTensor(batchSize * 5)
length = torch.IntTensor(batchSize)
image = Variable(image)
text = Variable(text)
length = Variable(length)
criterion = CTCLoss()
crnn = crnn.CRNN(imgH, nc, nclass, nh, 1)
# crnn.apply(weights_init)
print('loading pretrained model')
crnn.load_state_dict(torch.load(crnn_path))

image = image.cuda()
crnn = crnn.cuda()
criterion = criterion.cuda()

test_dataset = dataset.lmdbDataset(root=valroot_path)
# sampler_mode = dataset.randomSequentialSampler(test_dataset, batchSize)
# loss averager
loss_avg = util.averager()


def val(net, test_dataset, criterion, max_iter=100):
    print('Start val')

    for p in crnn.parameters():
Exemple #10
0
            count += 1
    
    # raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:params.n_test_disp]
    # for raw_pred, pred, gt in zip(raw_preds, sim_preds, list_1):
    #     print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt))

    accuracy = n_correct / len(data_loader.dataset)
    print('Num correct: %d, accuray: %f' % (n_correct, accuracy))


manualSeed = random.randint(1, 10000)  # fix seed
random.seed(manualSeed)
np.random.seed(manualSeed)
torch.manual_seed(manualSeed)
cudnn.benchmark = True
 
test_dataset = dataset.lmdbDataset(
        root=params.valroot, transform=dataset.resizeNormalize((160, 32)))

converter = utils.strLabelConverter(params.alphabet)
nclass = len(params.alphabet) + 1
nc = 1
crnn = crnn.CRNN(params.imgH, nc, nclass, params.nh).cuda()
text = torch.IntTensor(params.batchSize * 5)
image = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH).cuda()
length = torch.IntTensor(params.batchSize)

crnn.load_state_dict(torch.load(params.restore_ckpt))

val(crnn, test_dataset)
Exemple #11
0
# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, nc, nclass, nh, ngpu)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    crnn.load_state_dict(torch.load(opt.crnn, map_location='cpu'))
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    crnn.cuda()
    image = image.cuda()
    criterion = criterion.cuda()

image = Variable(image)
text = Variable(text)
length = Variable(length)
Exemple #12
0
dataloader_params = {
    'batch_size': batch,
    'shuffle': True,
    'collate_fn': old_collate,
    'drop_last': True

}

test_loader = DataLoader(test_dataset, **dataloader_params)

PATH = '../weights/hand_print_mobilenet_v2_24.pth'

# crnn = crnn.CRNN(parameters.max_image_height, 3, parameters.number_of_classes, 256)
crnn = MobilenetRNN(parameters.input_channels, parameters.number_of_classes, 256)
crnn = torch.nn.DataParallel(crnn)
crnn.load_state_dict(torch.load(PATH, map_location=torch.device('cpu')))
crnn.eval()

image = torch.FloatTensor(dataloader_params['batch_size'], 3, parameters.max_image_width, parameters.max_image_height)
text = torch.IntTensor(dataloader_params['batch_size'] * 5)
length = torch.IntTensor(dataloader_params['batch_size'])

with torch.no_grad():
    string_converter = StrLabelConverter()
    loss_function = CTCLoss(zero_infinity=True)
    total_cost = 0
    if torch.cuda.is_available():
        crnn.cuda()
        crnn = torch.nn.DataParallel(crnn, device_ids=range(1))
        image = image.cuda()
        text = text.cuda()
Exemple #13
0
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    if opt.model=='ctc':
        crnn.cuda()
        crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
        criterion = criterion.cuda()

    image = image.cuda()


if opt.pre_model != '':
    if opt.model=='ctc':
        print('loading pretrained model from %s' % opt.pre_model)
        pre_model = torch.load(opt.pre_model)
        crnn.load_state_dict(pre_model)

elif opt.mode == "test":
    print("Pretrained model directory should be provided for testing mode.")
    os.exit(0)


if opt.model=='ctc':
    print("Your neural network:", crnn)


image = Variable(image)
text = Variable(text)
length = Variable(length)

# loss averager
Exemple #14
0
        criterion = criterion.cuda()

    crnn.apply(weights_init)
    if params.crnn != '':
        print('loading pretrained model from %s' % params.crnn)

        preWeightDict = torch.load(
            params.crnn,
            map_location=lambda storage, loc: storage)  # 加入项目训练的权重
        modelWeightDict = crnn.state_dict()
        for k, v in preWeightDict.items():
            name = k.replace('module.', '')  # remove `module.`
            if 'rnn.1.embedding' not in name:  # 不加载最后一层权重
                modelWeightDict[name] = v

        crnn.load_state_dict(modelWeightDict)

    image = Variable(image)
    text = Variable(text)
    length = Variable(length)

    # loss averager
    loss_avg = utils.averager()

    # setup optimizer
    if params.adam:
        optimizer = optim.Adam(crnn.parameters(),
                               lr=params.lr,
                               betas=(params.beta1, 0.999))
    elif params.adadelta:
        optimizer = optim.Adadelta(crnn.parameters(), lr=params.lr)
Exemple #15
0
crnn.apply(weights_init)
if opt.cuda:
    crnn.cuda()
    #crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
    image = image.cuda()
    probs = probs.cuda()
    text = text.cuda()
    criterion = criterion.cuda()
if opt.pretrained != '':
    print('loading pretrained model from %s' % opt.pretrained)
    try:
        ckpt = torch.load(opt.pretrained)
        if 'module' in next(iter(ckpt.keys())):
            print('DataParallel model.')
            crnn = nn.DataParallel(crnn)
        crnn.load_state_dict(ckpt)
    except:
        if not opt.strict:
            print('\tStrict load failed, use unstricted loading.')
            model_params = crnn.get_params_name()
            print(model_params)
            for par, val in crnn.named_parameters():
                if par in ckpt.keys() and val.size() != ckpt[par].size():
                    print('par size mismatch:', val.size(), ckpt[par].size())
                    del ckpt[par]
            crnn.load_state_dict(ckpt, strict=False)
        else:
            print('Failed to load model')
            sys.exit(0)
print(crnn)
'''
Exemple #16
0
crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    pre_trainmodel = torch.load(opt.crnn)
    model_dict = crnn.state_dict()
    weig1 = 'module.rnn.1.embedding.weight'
    bias1 = 'module.rnn.1.embedding.bias'
    mymodel = {}
    if len(model_dict[weig1[7:]]) == len(pre_trainmodel[weig1]) and len(
            model_dict[bias1[7:]]) == len(pre_trainmodel[bias1]):
        for k, v in pre_trainmodel.items():
            mymodel[k[7:]] = v
            # print(k, len(v))
        crnn.load_state_dict(mymodel)
    else:
        for k, v in model_dict.items():
            if (k != weig1 or k != bias1):
                model_dict[k] = pre_trainmodel[k]
        crnn.load_state_dict(model_dict)
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    crnn.cuda()
    crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
    image = image.cuda()
# 继续训练
crnnPath = opt.crnn
if crnnPath is None or crnnPath == '':
    crnnPath = file_path + '/expr'
if crnnPath is not None:
    pths = os.listdir(crnnPath)
    # 解决了加载失败的问题,不需要找倒数第二个了,找最新的就行
    if len(pths) > 0:
        pths.sort()
        if pths[len(pths) - 1].endswith(".pth"):
            continue_path = crnnPath + "/" + pths[len(pths) - 1]
            print_msg("从上次文件继续训练:{}".format(continue_path))
            crnn = torch.nn.DataParallel(crnn)
            state_dict = torch.load(continue_path)
            try:
                crnn.load_state_dict(state_dict)
            except Exception as ex:
                print_msg("加载方式有误{0},开始切换加载方式:使用自定义dict".format(ex.message))
                from collections import OrderedDict

                new_state_dict = OrderedDict()
                for k, v in state_dict.items():
                    name = k[7:]  # remove `module.`
                    new_state_dict[name] = v
                # load params
                crnn.load_state_dict(new_state_dict)
        else:
            print_msg("你这不符合格式啊:{}".format(pths[0]))

# 三个张量 分别存储 图片数据、字符串、字符数
image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
Exemple #18
0
train_loader = loader.train_loader(opt.batch_size, num_workers=opt.workers)
test_loader = loader.test_loader(opt.batch_size, num_workers=opt.workers)

alphabet = open(os.path.join(opt.root, opt.alphabet)).read().rstrip()
nclass = len(alphabet) + 1
nc = 3

print(len(alphabet), alphabet)
converter = utils.strLabelConverter(alphabet, ignore_case=False)
criterion = CTCLoss()

crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh)
if opt.pretrained != '':
    print('loading pretrained model from %s' % opt.pretrained)
    pretrain = torch.load(opt.pretrained)
    crnn.load_state_dict(pretrain, strict=False)

image = torch.FloatTensor(opt.batch_size, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batch_size * 5)
length = torch.IntTensor(opt.batch_size)

if opt.cuda:
    crnn.cuda()
    image = image.cuda()
    criterion = criterion.cuda()

summary(crnn.cnn, (3, opt.imgH, opt.imgW))

image = Variable(image)
text = Variable(text)
length = Variable(length)
Exemple #19
0
    #讲字符进行转换
    converter = utils.strLabelConverter(params.alphabet)
    criterion = CTCLoss()
    # criterion = torch.nn.CTCLoss()
    # cnn and rnn
    image = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH)
    text = torch.IntTensor(params.batchSize * 5)
    length = torch.IntTensor(params.batchSize)

    # crnn = crnn.CRNN(params.imgH, nc, nclass, params.nh)
    crnn = crnn.CRNN(6736, hidden_unit=256)

    crnn_model_path = 'trained_models/netCRNN_4_48000.pth'
    #导入预训练模型权重
    print("loading pretrained model from %s" % crnn_model_path)
    crnn.load_state_dict(torch.load(crnn_model_path, map_location='cpu'))
    # 获取预训练的参数
    pretrained_dict = crnn.state_dict()

    # mycrnn = mycrnn.CRNN(params.imgH, nc, nclass, params.nh)
    mycrnn = mycrnn.CRNN(class_num=nclass, hidden_unit=256)
    mycrnn_dict = mycrnn.state_dict()

    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in mycrnn_dict
    }
    # 使用预训练模型来更新参数
    mycrnn_dict.update(pretrained_dict)
    mycrnn.load_state_dict(mycrnn_dict)
Exemple #20
0
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    while True:
        try:
            crnn.load_state_dict(torch.load(opt.crnn))
            break
        except:
            if opt.cuda:
                crnn.cuda()
                crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))

print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    if opt.crnn == '':
        crnn.cuda()
    crnn = crnn.CRNN(test_params.imgH, nc, nclass, test_params.nh)
    if opt.cuda:
        crnn.cuda()
        image = image.cuda()
        criterion = criterion.cuda()

    crnn.apply(weights_init)
    if test_params.crnn != '':
        logger.info('loading pretrained model from %s' % test_params.crnn)
        if test_params.without_fully:
            pretrained_dict = torch.load(test_params.crnn)
            model_dict = crnn.state_dict()
            pretrained_dict.pop('rnn.1.embedding.weight')
            pretrained_dict.pop('rnn.1.embedding.bias')
            crnn.load_state_dict(pretrained_dict, strict=False)
        else:
            crnn.load_state_dict(torch.load(test_params.crnn), strict=False)

    image = Variable(image)
    text = Variable(text)
    length = Variable(length)

    # loss averager
    loss_avg = utils.averager()

    # setup optimizer
    if test_params.adam:
        optimizer = optim.Adam(crnn.parameters(),
                               lr=test_params.lr,
                               betas=(test_params.beta1, 0.999))
Exemple #22
0
model_path = opt.model
img_path = opt.image
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'

with open('./data/vocabulary.txt', 'r') as voca_file:
    alphabet = voca_file.readline()

if torch.cuda.is_available():
    torch.cuda.set_device(opt.gpu)
    print('device:', torch.cuda.current_device())

crnn = crnn.CRNN(32, 1, len(alphabet) + 1, 256)
if torch.cuda.is_available():
    crnn = crnn.cuda()
print('loading pretrained model from %s' % model_path)
crnn.load_state_dict(torch.load(model_path))
crnn.eval()

converter = utils.strLabelConverter(alphabet)


def test_image(image_path, label, keep_ratio=False):
    image = Image.open(image_path).convert('L')
    if keep_ratio:
        h, w = image.shape
        resize_w = 32.0 * w / h
        transformer = dataset.resizeNormalize((resize_w, 32))
    else:
        transformer = dataset.resizeNormalize((576, 32))

    image = transformer(image)
    # cnn and rnn
    image = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH)
    text = torch.IntTensor(params.batchSize * 5)
    length = torch.IntTensor(params.batchSize)

    crnn = crnn.CRNN(params.imgH, nc, nclass, params.nh)
    crnn = torch.nn.DataParallel(crnn)
    if opt.cuda:
        crnn.cuda()
        image = image.cuda()
        criterion = criterion.cuda()

    crnn.apply(weights_init)
    if params.crnn != '':
        logger.info('loading pretrained model from %s' % params.crnn)
        crnn.load_state_dict(torch.load(params.crnn), strict=False)

    image = Variable(image)
    text = Variable(text)
    length = Variable(length)

    # loss averager
    loss_avg = utils.averager()

    # setup optimizer
    if params.adam:
        optimizer = optim.Adam(crnn.parameters(),
                               lr=params.lr,
                               betas=(params.beta1, 0.999))
    elif params.adadelta:
        optimizer = optim.Adadelta(crnn.parameters(), lr=params.lr)
Exemple #24
0
# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    crnn.load_state_dict(torch.load(opt.crnn), strict=False)
print(crnn)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
crnn = crnn.to(device)
criterion = criterion.to(device)

# loss averager
loss_avg = utils.averager()

# setup optimizer
# optimizer = optim.Adam(crnn.parameters(), lr=opt.lr, weight_decay=1e-4)
optimizer = optim.Adadelta(crnn.parameters(), lr=opt.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                            step_size=2000,
                                            gamma=0.3)
Exemple #25
0
# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:

        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh, 1)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    crnn.load_state_dict(
        torch.load(opt.crnn, map_location=lambda storage, loc: storage))
# pretrained_dict = torch.load(opt.crnn)
# new_state_dict = OrderedDict()
# for k, v in pretrained_dict.items():
#     name = k[7:]  # remove module.
#     new_state_dict[name] = v

# for name, module in crnn.named_children():
#     if name == 'cnn':
#         module_dict = module.state_dict()
#         new_state_dict = {k: v for k, v in new_state_dict.items() if k in module_dict}

#        module_dict.update(new_state_dict)
#         module.load_state_dict(module_dict)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
Exemple #26
0
# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, num_channels, nclass, opt.hidden_size)
crnn.apply(weights_init)
if opt.pretrained != '':
    print('loading pretrained model from %s' % opt.pretrained)
    crnn.load_state_dict(torch.load(opt.pretrained))
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if torch.cuda.is_available():
    crnn = crnn.cuda(opt.gpu)
    # crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
    image = image.cuda(opt.gpu)
    criterion = criterion.cuda(opt.gpu)

image = Variable(image)
text = Variable(text)
length = Variable(length)
Exemple #27
0
# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh)
crnn.apply(weights_init)
if opt.pretrained != '':
    print('loading pretrained model from %s' % opt.pretrained)
    #crnn.load_state_dict(torch.load(opt.pretrained))
    crnn.load_state_dict({k.replace('module.',''):v for k,v in torch.load(opt.pretrained).items()})
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    crnn.cuda()
    crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
    image = image.cuda()
    criterion = criterion.cuda()

image = Variable(image)
text = Variable(text)
length = Variable(length)