예제 #1
0
def val(net, dataset, criterion, max_iter=100):
    print('Start val')
    for p in net.parameters():
        p.requires_grad = False

    num_correct, num_all = val_model(config.val_infofile,
                                     net,
                                     True,
                                     log_file='compare-' +
                                     config.saved_model_prefix + '.log')
    accuracy = num_correct / num_all

    print('ocr_acc: %f' % (accuracy))
    if config.use_log:
        with open(log_filename, 'a') as f:
            f.write('ocr_acc:{}\n'.format(accuracy))
    global best_acc
    if accuracy > best_acc:
        best_acc = accuracy
        torch.save(
            crnn.state_dict(),
            '{}/{}_{}_{}.pth'.format(config.saved_model_dir,
                                     config.saved_model_prefix, epoch,
                                     int(best_acc * 1000)))
    torch.save(
        crnn.state_dict(), '{}/{}.pth'.format(config.saved_model_dir,
                                              config.saved_model_prefix))
예제 #2
0
def main(crnn, train_loader, val_loader, criterion, optimizer):

    crnn = crnn.to(device)
    criterion = criterion.to(device)
    for i, epoch in enumerate(range(params.epochs)):
        # if i<1:
        train(crnn, train_loader, criterion, epoch)
        # # ## max_i: cut down the consuming time of testing, if you'd like to validate on the whole testset, please set it to len(val_loader)
        accuracy = val(crnn, val_loader, criterion, epoch, max_i=1000)
        for p in crnn.parameters():
            p.requires_grad = True
        # if accuracy > params.best_accuracy:
        torch.save(
            crnn.state_dict(),
            '{0}/crnn_Rec_done_{1}_{2}.pth'.format(params.experiment, epoch,
                                                   accuracy))
        torch.save(crnn.state_dict(),
                   '{0}/crnn_best.pth'.format(params.experiment))
        print("is best accuracy: {0}".format(accuracy > params.best_accuracy))
예제 #3
0
        preds = preds.view(-1, preds_size.item(), 1, 23)

        for idx, head in enumerate(preds):

            _, preds1 = head.max(2)
            preds1 = preds1.transpose(1, 0).contiguous().view(-1)
            raw_pred = converter.decode(preds1.data,
                                        preds_size.data,
                                        raw=False)

            postpro = re.findall(temp, raw_pred)
            sim_pred = postpro[0] if postpro != [] else 'Unknown'

            result[idx] = sim_pred

        if result[0] == 'Unknown':
            fp_1 += 1
            fp_2 += 1
        elif result[0] == y[0]:
            tp_1 += 1
            if result[0] == result[1]:
                tp_2 += 1
            else:
                fp_2 += 1
        elif result[0] != y[0]:
            fn_1 += 1
            if result[0] == result[1]:
                fn_2 += 1

torch.save(crnn.state_dict(), '/content/drive/My Drive/WeightNet/OCR(1.6)')
예제 #4
0
        #     numLoss = 0
        # else:
        cost = trainBatch(crnn, criterion, optimizer)
        loss_avg.add(cost)
        i += 1

        # if i % opt.displayInterval == 0:
        #     print('[%d/%d][%d/%d] Loss: %f' % (epoch, opt.niter, i, len(train_loader), loss_avg.val()))
        #     loss_avg.reset()

        if i % opt.valInterval == 0:
            testLoss, accuracy = val(crnn, criterion)
            # print('Test loss: %f, accuray: %f' % (testLoss, accuracy))
            print("epoch: {}, step: {}, train loss: {:.3f}, test loss: {:.3f}, test accuracy: {:.3f}".format(epoch, num, loss_avg.val(), testLoss, accuracy))
            loss_avg.reset()
        num += 1
        # lasttestLoss = min(lasttestLoss, testLoss)
        
        # checkpoint
        if lasttestLoss > testLoss:
             print("Get a best val loss, model saving...\n")
             lasttestLoss = testLoss
             # delete(opt.experiment)  # 删除历史模型
             torch.save(crnn.state_dict(), '{}/netCRNN.pth'.format(opt.experiment))
             numLoss = 0
        else:
            numLoss += 1
    
    adjust_learning_rate(optimizer)

예제 #5
0
파일: train.py 프로젝트: FLming/CRNN
    trainloader, validloader = prepare_dataloader()

    crnn = crnn.CRNN(num_classes).to(device)

    criterion = torch.nn.CTCLoss().to(device)
    if args.adam:
        optimizer = optim.Adam(crnn.parameters(), lr=args.lr)
    elif args.rmsprop:
        optimizer = optim.RMSprop(crnn.parameters(), lr=args.lr)
    else:
        optimizer = optim.Adadelta(crnn.parameters())

    if args.pretrained != '':
        print('loading pretrained model from {}'.format(args.pretrained))
        crnn.load_state_dict(torch.load(args.pretrained))

    crnn.train()
    for epoch in range(args.num_epoch):

        train(trainloader, crnn, converter, criterion, optimizer)

        if epoch % args.eval_epoch == 0:
            print('-------------------- eval --------------------')
            crnn.eval()
            validate(validloader, crnn, converter)
            crnn.train()
        if epoch % args.save_epoch == 0:
            torch.save(crnn.state_dict(),
                       '{}/crnn_{}.pth'.format(args.save_path, epoch + 1))