コード例 #1
0
def main():

    if not os.path.exists(opt.output):
        os.makedirs(opt.output)

    converter = utils.strLabelConverter(opt.alphabet)

    collate = dataset.AlignCollate()
    train_dataset = dataset.TextLineDataset(text_file=opt.train_list, transform=dataset.ResizeNormalize(100, 32), converter=converter)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchsize, shuffle=True,
                                               num_workers=opt.num_workers, collate_fn=collate)
    test_dataset = dataset.TextLineDataset(text_file=opt.train_list, transform=dataset.ResizeNormalize(100, 32), converter=converter)
    test_loader = torch.utils.data.DataLoader(test_dataset, shuffle=False, batch_size=opt.batchsize,
                                              num_workers=opt.num_workers, collate_fn=collate)

    criterion = nn.CTCLoss()

    import models.crnn as crnn

    crnn = crnn.CRNN(opt.imgH, opt.nc, opt.num_classes, opt.nh)
    crnn.apply(utils.weights_init)
    if opt.pretrained != '':
        print('loading pretrained model from %s' % opt.pretrained)
        crnn.load_state_dict(torch.load(opt.pretrained), strict=False)
    print(crnn)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    crnn = crnn.to(device)
    criterion = criterion.to(device)


    # setup optimizer
    optimizer = optim.Adam(crnn.parameters(), lr=opt.lr)

    for epoch in range(opt.num_epochs):

        loss_avg = 0.0
        i = 0
        while i < len(train_loader):

            time0 = time.time()
            # 训练
            train_iter = iter(train_loader)

            cost = trainBatch(crnn, train_iter, criterion, optimizer, device) # 一个批次,一个批次训练
            loss_avg += cost
            i += 1

            if i % opt.interval == 0:
                print('[%d/%d][%d/%d] Loss: %f Time: %f s' %
                      (epoch, opt.num_epochs, i, len(train_loader), loss_avg,
                       time.time() - time0))
                loss_avg = 0.0



        if (epoch + 1) % opt.valinterval == 0:
            val(crnn, test_loader, criterion, converter=converter, device=device, max_iter=100)
コード例 #2
0
criterion = CTCLoss()
writer = SummaryWriter()


# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


crnn = crnn.CRNN(opt.imgH, num_channels, nclass, opt.hidden_size)
crnn.apply(weights_init)
if opt.pretrained != '':
    print('loading pretrained model from %s' % opt.pretrained)
    crnn.load_state_dict(torch.load(opt.pretrained))
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if torch.cuda.is_available():
    crnn = crnn.cuda(opt.gpu)
    # crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
    image = image.cuda(opt.gpu)
    criterion = criterion.cuda(opt.gpu)
コード例 #3
0
    converter = utils.strLabelConverter(params.alphabet)
    #criterion = CTCLoss()
    criterion = torch.nn.CTCLoss()

    # cnn and rnn
    image = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH)
    text = torch.IntTensor(params.batchSize * 5)
    length = torch.IntTensor(params.batchSize)

    crnn = crnn.CRNN(params.imgH, nc, nclass, params.nh)
    if opt.cuda:
        crnn.cuda()
        image = image.cuda()
        criterion = criterion.cuda()

    crnn.apply(weights_init)  #参数初始化
    if params.crnn != '':
        print('loading pretrained model from %s' % params.crnn)
        crnn.load_state_dict(torch.load(params.crnn))

    image = Variable(image)
    text = Variable(text)
    length = Variable(length)

    # loss averager
    loss_avg = utils.averager()

    # setup optimizer
    if params.adam:
        optimizer = optim.Adam(crnn.parameters(),
                               lr=params.lr,
コード例 #4
0
ファイル: train.py プロジェクト: yiwangchunyu/CVCR
def main(arg):
    print(arg)
    train_dataset = dataset.lmdbDataset(
        path=arg.train_root,
        # transform=dataset.resizeNormalize((imgW,imgH)),
    )
    test_dataset = dataset.lmdbDataset(
        path=arg.test_root,
        # transform=dataset.resizeNormalize((arg.imgW,arg.imgH)),
    )
    d = test_dataset.__getitem__(0)
    l = test_dataset.__len__()
    train_loader = DataLoader(train_dataset,
                              num_workers=arg.num_workers,
                              batch_size=arg.batch_size,
                              collate_fn=dataset.alignCollate(
                                  imgH=arg.imgH,
                                  imgW=arg.imgW,
                                  keep_ratio=arg.keep_ratio),
                              shuffle=True,
                              drop_last=True)

    criterion = CTCLoss()
    converter = utils.Converter(arg.num_class)
    crnn = CRNN(imgH=arg.imgH, nc=3, nclass=arg.num_class + 1, nh=256)

    # custom weights initialization called on crnn
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    crnn.apply(weights_init)
    print(crnn)

    image = torch.FloatTensor(arg.batch_size, 3, arg.imgH, arg.imgW)
    text = torch.IntTensor(arg.batch_size * 5)
    length = torch.IntTensor(arg.batch_size)

    image = Variable(image)
    text = Variable(text)
    length = Variable(length)

    # loss averager
    loss_avg = utils.averager()

    # setup optimizer
    if arg.opt == 'adam':
        optimizer = optim.Adam(crnn.parameters(), 0.01, betas=(0.5, 0.999))
    elif arg.opt == 'adadelta':
        optimizer = optim.Adadelta(crnn.parameters())
    else:
        optimizer = optim.RMSprop(crnn.parameters(), 0.01)

    for epoch in range(arg.n_epoch):
        train_iter = iter(train_loader)
        i = 0
        while i < len(train_loader):
            for p in crnn.parameters():
                p.requires_grad = True
            crnn.train()

            data = train_iter.next()
            cpu_images, cpu_texts = data
            batch_size = cpu_images.size(0)
            utils.loadData(image, cpu_images)
            text_labels, l = converter.encode(cpu_texts)
            utils.loadData(text, text_labels)
            utils.loadData(length, l)

            preds = crnn(image)
            preds_size = Variable(torch.IntTensor([preds.size(0)] *
                                                  batch_size))
            cost = criterion(preds, text, preds_size, length) / batch_size
            crnn.zero_grad()
            cost.backward()
            optimizer.step()

            loss_avg.add(cost)
            i += 1

            if i % arg.displayInterval == 0:
                print(
                    '[%d/%d][%d/%d] Loss: %f' %
                    (epoch, arg.n_epoch, i, len(train_loader), loss_avg.val()))
                loss_avg.reset()

            if i % arg.testInterval == 0:
                test(arg, crnn, test_dataset, criterion, image, text, length)

            # do checkpointing
            if i % arg.saveInterval == 0:
                name = '{0}/netCRNN_{1}_{2}_{3}_{4}.pth'.format(
                    arg.model_dir, arg.num_class, arg.type, epoch, i)
                torch.save(crnn.state_dict(), name)
                print('model saved at ', name)
    torch.save(
        crnn.state_dict(),
        '{0}/netCRNN_{1}_{2}.pth'.format(arg.model_dir, arg.num_class,
                                         arg.type))
コード例 #5
0
converter = utils.strLabelConverter(alphabet)
criterion = CTCLoss()


# custom weights initialization called on crnn
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)

crnn = crnn.CRNN(opt.imgH, nc, nclass, nh, ngpu)
crnn.apply(weights_init)
if opt.crnn != '':
    print('loading pretrained model from %s' % opt.crnn)
    crnn.load_state_dict(torch.load(opt.crnn))
print(crnn)

image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)

if opt.cuda:
    crnn.cuda()
    image = image.cuda()
    criterion = criterion.cuda()

image = Variable(image)
コード例 #6
0
    text = torch.IntTensor(params.batchSize *
                           5)  # text length no less than batchsize * 5
    length = torch.IntTensor(params.batchSize)  # define each word length

    crnn = crnn.CRNN(params.imgH, nc, nclass, params.nh)
    #if opt.cuda:
    #    crnn.cuda()
    #    image = image.cuda()
    #    criterion = criterion.cuda()
    crnn = crnn.to(device)
    image = image.to(device)
    criterion = criterion.to(device)
    text = text.to(device)
    #length = length.to(device)

    crnn.apply(weights_init)  # self-define weight initialize function
    #print("crnn =",crnn)
    #print("crnn parameters =",crnn.cnn)
    if params.crnn != '':
        print('loading pretrained model from %s' % params.crnn)
        crnn.load_state_dict(torch.load(params.crnn))

    #for para in crnn.parameters():
    #print("parameters =", para)#para.requires_grad=False
    #crnn.state_dict()#get parameters list-------->cnn.conv0.weight cnn.conv0.bias cnn.conv1.weight cnn.conv1.bias cnn.conv2.weight cnn.conv2.bias cnn.batchnorm2.weight  cnn.batchnorm2.bias
    #crnn.state_dict().items()#get parameters name and its value ------------>('rnn.1.embedding.bias', tensor([-0.0602, -0.3962, -0.3687, -0.3052, -0.2965, -0.3442, -0.4302, -0.3631,\
    #-0.3303, -0.2937, -0.2485, -0.4897, -0.2815, -0.3473, -0.3228, -0.2575,\
    #-0.3200, -0.3391, -0.4191, -0.2042, -0.5009, -0.4935, -0.3103, -0.2821, \
    #-0.3521, -0.2895, -0.3934, -0.2745, -0.3072, -0.2851, -0.2467, -0.3485,\
    #-0.2747, -0.2944, -0.3731, -0.4065, -0.3084, -0.3154, -0.4246],\
    #device='cuda:0'))