Пример #1
0
def val(net, data_loader, criterion):
    print('Start val')
    for p in net.parameters():
        p.requires_grad = False

    net.eval()
    val_iter = iter(data_loader)
    val_loss_avg = utils.averager()
    val_cer_avg = utils.averager()
    max_iter = len(data_loader)
    print('Total files:', num_files, ', Number of iters:', max_iter)
    with torch.no_grad():
        for i in range(max_iter):
            if i % 10 == 0:
                print('iter', i)
            data = val_iter.next()
            cpu_images, cpu_texts, imgpath = data
            batch_sz = cpu_images.size(0)
            utils.loadData(image, cpu_images)
            t, l = converter.encode(cpu_texts)
            utils.loadData(text, t)
            utils.loadData(length, l)

            preds = net(image)
            preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_sz))
            cost = criterion(preds, text, preds_size, length) / batch_sz
            cost = cost.detach().item()
            val_loss_avg.add(cost)

            _, preds = preds.max(2)
            preds = preds.transpose(1, 0).contiguous().view(-1)
            sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
            if batch_size == 1:
                sim_preds = [sim_preds]
            cer_loss = utils.cer_loss(sim_preds, cpu_texts)
            if debug and cer_loss[0] > 0 and batch_size == 1:
                print(i,'\nimg path', imgpath)
                print('sim pred', sim_preds)
                print('cpu text', cpu_texts)
                print('cer', cer_loss)
                inv_tensor = inv_normalize(cpu_images[0])
                cv_img = inv_tensor.permute(1, 2, 0).numpy()
                cv_img_convert = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
                cv2.imshow('image data', cv_img_convert)
                ch = cv2.waitKey(0)
                if ch == 27:
                    break
            val_cer_avg.add(cer_loss)

    raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp]
    for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts):
        print('\nraw: %-30s \nsim: %-30s\n gt: %-30s' % (raw_pred, pred, gt))
    test_loss = val_loss_avg.val()
    test_cer = val_cer_avg.val()
    print('\nTest loss: %f - test cer %f' % (test_loss, test_cer))
    return test_loss, test_cer
Пример #2
0
def val(net, data_loader, criterion, max_iter=1000):
    print('Start val')

    for p in crnn.parameters():
        p.requires_grad = False

    net.eval()

    val_iter = iter(data_loader)

    val_loss_avg = utils.averager()
    val_cer_avg = utils.averager()
    max_iter = min(max_iter, len(data_loader))
    with torch.no_grad():
        for i in range(max_iter):
            data = val_iter.next()
            cpu_images, cpu_texts = data
            batch_size = cpu_images.size(0)
            utils.loadData(image, cpu_images)
            t, l = converter.encode(cpu_texts)
            utils.loadData(text, t)
            utils.loadData(length, l)

            preds = crnn(image)
            preds_size = Variable(torch.IntTensor([preds.size(0)] *
                                                  batch_size))
            cost = criterion(preds, text, preds_size, length) / batch_size
            cost = cost.detach().item()
            val_loss_avg.add(cost)

            _, preds = preds.max(2)
            preds = preds.transpose(1, 0).contiguous().view(-1)
            sim_preds = converter.decode(preds.data,
                                         preds_size.data,
                                         raw=False)
            cer_loss = utils.cer_loss(sim_preds, cpu_texts)
            val_cer_avg.add(cer_loss)

    raw_preds = converter.decode(preds.data, preds_size.data,
                                 raw=True)[:opt.n_test_disp]
    for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts):
        print('%-30s => %-30s, gt: %-30s' % (raw_pred, pred, gt))

    print('Test loss: %f - cer loss %f' %
          (val_loss_avg.val(), val_cer_avg.val()))
Пример #3
0
def trainBatch(net, data, criterion, optimizer):
    cpu_images, cpu_texts = data
    batch_size = cpu_images.size(0)
    utils.loadData(image, cpu_images)
    t, l = converter.encode(cpu_texts)
    utils.loadData(text, t)
    utils.loadData(length, l)

    preds = crnn(image)
    preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
    cost = criterion(preds, text, preds_size, length) / batch_size
    crnn.zero_grad()
    cost.backward()
    optimizer.step()
    cost = cost.detach().item()

    _, preds = preds.max(2)
    preds = preds.transpose(1, 0).contiguous().view(-1)
    sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
    cer_loss = utils.cer_loss(sim_preds, cpu_texts)
    return cost, cer_loss, len(cpu_images)
Пример #4
0
def predict(dir, batch_sz, max_iter=10000):
    print('Init CRNN classifier')
    image = torch.FloatTensor(batch_sz, 3, imgH, imgH)
    model = crnn.CRNN64(imgH, nc, nclass, 256)
    #model = crnn128.CRNN128(imgH, nc, nclass, 256)
    if gpu != None:
        print('Use GPU', gpu)
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu
        model = model.cuda()
        image = image.cuda()
    print('loading pretrained model from %s' % pretrained)
    model.load_state_dict(torch.load(pretrained, map_location='cpu'))

    converter = strLabelConverter(alphabet, ignore_case=False)
    val_dataset = ImageFileLoader(dir,
                                  flist=test_list,
                                  label=label,
                                  transform=transform_test)
    num_files = len(val_dataset)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_sz,
                                             num_workers=workers,
                                             shuffle=False)

    image = Variable(image)

    # for p in crnn.parameters():
    #     p.requires_grad = False
    model.eval()
    print('Start predict in folder', img_dir)
    val_iter = iter(val_loader)
    max_iter = min(max_iter, len(val_loader))
    print('Number of samples', num_files)
    begin = time.time()
    with torch.no_grad():
        for i in range(max_iter):
            data = val_iter.next()
            cpu_images, cpu_texts, img_paths = data
            batch_size = cpu_images.size(0)
            utils.loadData(image, cpu_images)
            preds = model(image)

            preds = preds.squeeze(1)
            raw_pred, sim_pred, sent_prob = decode(converter, preds)

            # preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
            # _, preds = preds.max(2)
            # preds = preds.transpose(1, 0).contiguous().view(-1)
            # sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
            # #print(sim_pred)
            # raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
            #print(cpu_texts[0])
            if debug:
                print('\n', raw_pred)
                print('\n', round(sent_prob, 3), sim_pred, img_paths)
                inv_tensor = inv_normalize(cpu_images[0])
                cv_img = inv_tensor.permute(1, 2, 0).numpy()
                cv_img_convert = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
                cv2.imshow('image data', cv_img_convert)
                ch = cv2.waitKey(0)
                if ch == 27:
                    break
    end = time.time()
    processing_time = end - begin
    print('Processing time:', processing_time)
    print('Speed:', num_files / processing_time, 'fps')