示例#1
0
文件: iqa.py 项目: Gavinylk/CNN-FRIQA
def train(train_loader, model, criterion, optimizer, epoch):
    losses = AverageMeter()
    len_train = len(train_loader)
    pb = ProgressBar(len_train-1)

    print("Training")

    # Switch to train mode
    model.train()
    criterion.cuda()
    for i, ((img,ref), score) in enumerate(train_loader):
        img, ref, score = img.cuda(), ref.cuda(), score.squeeze().cuda()

        # Compute output
        output = model(img, ref)
        loss = criterion(output, score)

        # Measure accuracy and record loss
        losses.update(loss.data, img.shape[0])

        # Compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        pb.show(i, '[{0:5d}/{1:5d}]\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                .format(i, len_train, loss=losses))
示例#2
0
def test(test_data_loader, model):
    srocc = SROCC()
    plcc = PLCC()
    rmse = RMSE()
    len_test = len(test_data_loader)
    pb = ProgressBar(len_test, show_step=True)

    print("Testing")

    model.eval()
    with torch.no_grad():
        for i, ((img, ref), score) in enumerate(test_data_loader):
            img, ref = img.cuda(), ref.cuda()
            output = model(img, ref).cpu().data.numpy()
            score = score.data.numpy()

            srocc.update(score, output)
            plcc.update(score, output)
            rmse.update(score, output)

            pb.show(
                i, "Test: [{0:5d}/{1:5d}]\t"
                "Score: {2:.4f}\t"
                "Label: {3:.4f}".format(i + 1, len_test, float(output),
                                        float(score)))

    print("\n\nSROCC: {0:.4f}\n"
          "PLCC: {1:.4f}\n"
          "RMSE: {2:.4f}".format(srocc.compute(), plcc.compute(),
                                 rmse.compute()))
示例#3
0
文件: iqa.py 项目: Gavinylk/CNN-FRIQA
def validate(val_loader, model, criterion, show_step=False):
    losses = AverageMeter()
    srocc = SROCC()
    len_val = len(val_loader)
    pb = ProgressBar(len_val-1, show_step=show_step)

    print("Validation")

    # Switch to evaluate mode
    model.eval()

    with torch.no_grad():
        for i, ((img,ref), score) in enumerate(val_loader):
            img, ref, score = img.cuda(), ref.cuda(), score.squeeze().cuda()

            # Compute output
            output = model(img, ref)
            
            loss = criterion(output, score)
            losses.update(loss.data, img.shape[0])

            output = output.cpu().data
            score = score.cpu().data
            srocc.update(score.numpy(), output.numpy())

            pb.show(i, '[{0:5d}/{1:5d}]\t'
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                    'Output {out:.4f}\t'
                    'Target {tar:.4f}\t'
                    .format(i, len_val, loss=losses, 
                    out=output, tar=score))


    return float(1.0-srocc.compute())  # losses.avg
示例#4
0
 def _to_pool(self):
     len_data = self.__len__()
     pb = SimpleProgressBar(len_data)
     print("\ninitializing data pool...")
     for index in range(len_data):
         self._pool(index).store(self.__getitem__(index)[0])
         pb.show(index, "[{:d}]/[{:d}] ".format(index + 1, len_data))
示例#5
0
文件: iqa.py 项目: Gavinylk/CNN-FRIQA
def test(test_data_loader, model):
    scores = []
    srocc = SROCC()
    plcc = PLCC()
    rmse = RMSE()
    len_test = len(test_data_loader)
    pb = ProgressBar(len_test-1, show_step=True)

    print("Testing")

    model.eval()
    with torch.no_grad():
        for i, ((img, ref), score) in enumerate(test_data_loader):
            img, ref = img.cuda(), ref.cuda()
            output = model(img, ref).cpu().data.numpy()
            score = score.data.numpy()

            srocc.update(score, output)
            plcc.update(score, output)
            rmse.update(score, output)

            pb.show(i, 'Test: [{0:5d}/{1:5d}]\t'
                    'Score: {2:.4f}\t'
                    'Label: {3:.4f}'
                    .format(i, len_test, float(output), float(score)))

            scores.append(output)
    
    # Write scores to file
    with open('../test/scores.txt', 'w') as f:
        stat = list(map(lambda s: f.write(str(s)+'\n'), scores))

    print('\n\nSROCC: {0:.4f}\n'
            'PLCC: {1:.4f}\n'
            'RMSE: {2:.4f}'
            .format(srocc.compute(), plcc.compute(), rmse.compute())
    )