Пример #1
0
def train(crnn, train_loader, criterion, epoch):
    for p in crnn.parameters():
        p.requires_grad = True
    crnn.train()
    #loss averager
    loss_avg = utils.averager()
    for i_batch, (image, index) in enumerate(train_loader):
        #[b,c,h,w] [32,1,32,160]
        image = image.to(device)
        print('image.shape:', image.shape)
        batch_size = image.size(0)
        #['xxx','xxxx',...batch]
        label = utils.get_batch_label(dataset, index)
        #[41,batch,nclass]
        preds = crnn(image)
        # print('preds.shape',preds.shape)
        # index = np.array(index.data.numpy())
        #[, , ,]    [len(lable[0]),len(lable[1]),...]
        label_text, label_length = converter.encode(label)
        # print('label_text:', len(label_text))
        # print('label_length:', label_length)
        #[41,41,41,...]*batch
        preds_size = torch.IntTensor([preds.size(0)] * batch_size)
        # print('preds.shape, label_text.shape, preds_size.shape, label_length.shape',preds.shape, label_text.shape, preds_size.shape, label_length.shape)
        # torch.Size([41, 32, 6736]) torch.Size([320]) torch.Size([320]) torch.Size([320])
        cost = criterion(preds, label_text, preds_size,
                         label_length) / batch_size
        # print('cost:',cost)
        crnn.zero_grad()
        cost.backward()
        optimizer.step()

        loss_avg.add(cost)

        if (i_batch + 1) % params.displayInterval == 0:
            print('[%d/%d][%d/%d] Loss: %f' %
                  (epoch, params.epochs, i_batch, len(train_loader),
                   loss_avg.val()))
            loss_avg.reset()
Пример #2
0
        net.zero_grad()
        cost.backward()
        optimizer.step()
    return cost


for epoch in range(config.niter):
    loss_avg.reset()
    print('epoch {}....'.format(epoch))
    train_iter = iter(train_loader)
    i = 0
    n_batch = len(train_loader)
    while i < len(train_loader):
        for p in crnn.parameters():
            p.requires_grad = True
        crnn.train()
        cost = trainBatch(crnn, criterion, optimizer)
        print('epoch: {} iter: {}/{} Train loss: {:.3f}'.format(
            epoch, i, n_batch, cost.item()))
        loss_avg.add(cost)
        loss_avg.add(cost)
        i += 1
    print('Train loss: %f' % (loss_avg.val()))
    if config.use_log:
        with open(log_filename, 'a') as f:
            f.write('{}\n'.format(
                datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')))
            f.write('train_loss:{}\n'.format(loss_avg.val()))

    val(crnn, test_dataset, criterion)