Beispiel #1
0
def init_logger(args, model):
    # set loggers
    exp_name = args.name
    exp_logger = logger.Experiment(exp_name, args.__dict__)
    exp_logger.add_meters('train', metrics.make_meters(args.num_classes))
    exp_logger.add_meters('val', metrics.make_meters(args.num_classes))
    exp_logger.add_meters('hyperparams', {'learning_rate': metrics.ValueMeter()})
    return exp_logger
def test(args, eval_data_loader, model, criterion, epoch, eval_score=None,
         output_dir='pred', has_gt=True, print_freq=10):

    model.eval()
    meters = metrics.make_meters()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(eval_data_loader):
            # print(input.size())
            batch_size = input.size(0)
            target = target[1]
            meters['data_time'].update(time.time()-end, n=batch_size)
           
            input, target = input.to(args.device).requires_grad_(), target.to(args.device)
            
            output = model(input)

            loss = criterion(output, target)
            
            meters['loss'].update(loss.data.item(), n=batch_size)

            # measure accuracy and record loss
            if eval_score is not None:
                mae, squared_mse, count = eval_score(output, target)
                meters['mae'].update(mae, n=batch_size)
                meters['squared_mse'].update(squared_mse, n=batch_size)

            end = time.time()
            meters['batch_time'].update(time.time() - end, n=batch_size)

            end = time.time()
            print('Testing: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'MAE {mae.val:.3f} ({mae.avg:.3f})\t'
                      'MSE {mse:.3f} ({avg_mse:.3f})'.format(
                      i, len(eval_data_loader), batch_time=meters['batch_time'], loss=meters['loss'],
                      mae=meters['mae'],mse=np.sqrt(meters['squared_mse'].val),
                       avg_mse = np.sqrt(meters['squared_mse'].avg), flush=True))

            if True == args.short_run:
                if 12 == i:
                    print(' --- running in short-run mode: leaving epoch earlier ---')
                    break    


        print(' * Test set: Average loss {:.4f}, MAE {:.3f}, MSE {:.3f} \n'.format(meters['loss'].avg, meters['mae'].avg, np.sqrt(meters['squared_mse'].avg)))

    metrics.save_meters(meters, os.path.join(args.log_dir, 'test_results_ep{}.json'.format(epoch)), epoch)        
Beispiel #3
0
def test(args,
         eval_data_loader,
         model,
         criterion,
         epoch,
         eval_score=None,
         output_dir='pred',
         has_gt=True,
         print_freq=10):

    model.eval()
    meters = metrics.make_meters(args.num_classes)
    end = time.time()
    hist = np.zeros((args.num_classes, args.num_classes))
    res_list = {}
    with torch.no_grad():
        for i, (input, target_class, name) in enumerate(eval_data_loader):
            # print(input.size())
            batch_size = input.size(0)
            meters['data_time'].update(time.time() - end, n=batch_size)

            label = target_class.numpy()
            input, target_class = input.to(
                args.device).requires_grad_(), target_class.to(args.device)

            output = model(input)

            loss = criterion(output, target_class)

            meters['loss'].update(loss.data.item(), n=batch_size)

            # measure accuracy and record loss
            if eval_score is not None:
                acc1, pred, buff_label = eval_score(output, target_class)
                meters['acc1'].update(acc1, n=batch_size)
                meters['confusion_matrix'].update(
                    pred.squeeze(), buff_label.type(torch.LongTensor))

                _, pred = torch.max(output, 1)
                pred = pred.cpu().data.numpy()
                hist += metrics.fast_hist(pred.flatten(), label.flatten(),
                                          args.num_classes)
                mean_ap = round(
                    np.nanmean(metrics.per_class_iu(hist)) * 100, 2)
                meters['mAP'].update(mean_ap, n=batch_size)

                for idx, curr_name in enumerate(name):
                    res_list[curr_name] = [
                        pred[idx].item(), target_class[idx].item()
                    ]

            end = time.time()
            meters['batch_time'].update(time.time() - end, n=batch_size)

            end = time.time()
            print('Testing: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Acc@1 {score.val:.3f} ({score.avg:.3f})'.format(
                      i,
                      len(eval_data_loader),
                      batch_time=meters['batch_time'],
                      loss=meters['loss'],
                      score=meters['acc1']),
                  flush=True)

            if True == args.short_run:
                if 12 == i:
                    print(
                        ' --- running in short-run mode: leaving epoch earlier ---'
                    )
                    break

    if eval_score is not None:
        acc, acc_cls, mean_iu, fwavacc = metrics.evaluate(hist)
        meters['acc_class'].update(acc_cls)
        meters['meanIoU'].update(mean_iu)
        meters['fwavacc'].update(fwavacc)

        print(
            ' * Test set: Average loss {:.4f}, Accuracy {:.3f}%, Accuracy per class {:.3f}%, meanIoU {:.3f}%, fwavacc {:.3f}% \n'
            .format(meters['loss'].avg, meters['acc1'].avg,
                    meters['acc_class'].val, meters['meanIoU'].val,
                    meters['fwavacc'].val))

    metrics.save_meters(
        meters,
        os.path.join(args.log_dir, 'test_results_ep{}.json'.format(epoch)),
        epoch)
    utils.save_res_list(
        res_list,
        os.path.join(args.res_dir,
                     'test_results_list_ep{}.json'.format(epoch)))
Beispiel #4
0
def test_digitsum(args, test_loader, model, criterion, epoch, eval_score,
                  output_dir, has_gt, print_freq):

    # switch to evaluate mode
    model.eval()
    meters = metrics.make_meters(args.num_classes)
    end = time.time()

    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            batch_size = len(batch)

            meters['data_time'].update(time.time() - end, n=batch_size)

            output_batch, target_batch = utils.compute_batch(
                batch, args, model)
            output_batch, target_batch = output_batch.to(
                args.device).requires_grad_(), target_batch.to(args.device)

            loss = criterion(output_batch, target_batch)
            meters['loss'].update(loss.data.item(), n=batch_size)

            # measure accuracy and record loss
            if eval_score is not None:
                input_sizes = utils.compute_input_sizes(batch)

                acc_batch, pred, buff_label = eval_score(
                    output_batch, target_batch)

                # Update accuracy Acc1 on batch
                meters['acc1'].update(acc_batch, n=batch_size)

                # accuracy per class of set size

                class_correct_batch, class_total_batch = metrics.set_acc_class(
                    pred, buff_label, batch_size, input_sizes,
                    args.max_size_val)

                class_correct_batch = class_correct_batch.to(
                    'cpu').data.numpy()
                class_total_batch = class_total_batch.to('cpu').data.numpy()

                class_correct = meters['set_class_correct'].val
                class_total = meters['set_class_total'].val

                class_correct += class_correct_batch
                class_total += class_total_batch

                meters['set_class_correct'].update(class_correct)
                meters['set_class_total'].update(class_total)

            # measure elapsed time
            meters['batch_time'].update(time.time() - end, n=batch_size)
            end = time.time()

            if i % print_freq == 0:
                print('Testing: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Acc@1 {score.val:.3f} ({score.avg:.4f})'.format(
                          i,
                          len(test_loader),
                          batch_time=meters['batch_time'],
                          loss=meters['loss'],
                          score=meters['acc1']),
                      flush=True)

    if eval_score is not None:
        set_mAP = metrics.set_mAP(meters,
                                  args.min_size_val,
                                  args.max_size_val,
                                  weight=args.set_weight)
        if isinstance(set_mAP, torch.Tensor):
            set_mAP = set_mAP.cpu().data.item()
        meters['set_mAP'].update(set_mAP)

        print(' * Testing set: \t'
              'Average loss {:.4f}, Accuracy {:.3f}%\n'.format(
                  meters['loss'].avg, meters['acc1'].avg))

        print('Accuracy per class of set size:')
        for i in range(args.min_size_val, args.max_size_val + 1):
            if (meters['set_class_total'].val[i] != 0):
                print('  Acc@ set size = {0} : {score:.2f} %'.format(
                    i,
                    score=100 * meters['set_class_correct'].val[i] /
                    meters['set_class_total'].val[i]))

        # convert numpy ndarrays to lists to be processed into json format in run.py
        class_correct = meters['set_class_correct'].val.tolist()
        class_total = meters['set_class_total'].val.tolist()
        meters['set_class_correct'].update(class_correct)
        meters['set_class_total'].update(class_total)

    metrics.save_meters(
        meters,
        os.path.join(args.log_dir, 'test_results_ep{}.json'.format(epoch)),
        epoch)
Beispiel #5
0
def test_classification(args, test_loader, model, criterion, epoch, eval_score,
                        output_dir, has_gt, print_freq):

    model.eval()
    meters = metrics.make_meters(args.num_classes)
    end = time.time()
    hist = np.zeros((args.num_classes, args.num_classes))

    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            batch_size = len(batch)

            meters['data_time'].update(time.time() - end, n=batch_size)

            output_batch, target_batch = utils.compute_batch(
                batch, args, model)
            output_batch, target_batch = output_batch.to(
                args.device).requires_grad_(), target_batch.to(args.device)

            label_batch = target_batch.numpy()

            loss = criterion(output_batch, target_batch)
            meters['loss'].update(loss.data.item(), n=batch_size)

            # measure accuracy and record loss
            if eval_score is not None:
                acc1, pred, buff_label = eval_score(output_batch, target_batch)
                meters['acc1'].update(acc1, n=batch_size)
                meters['confusion_matrix'].update(
                    pred.squeeze(), buff_label.type(torch.LongTensor))

                _, pred = torch.max(output_batch, 1)

                pred = pred.to('cpu').data.numpy()
                hist += metrics.fast_hist(pred.flatten(),
                                          label_batch.flatten(),
                                          args.num_classes)
                mean_ap = round(
                    np.nanmean(metrics.per_class_iu(hist)) * 100, 2)
                meters['mAP'].update(mean_ap, n=batch_size)

            # measure elapsed time
            meters['batch_time'].update(time.time() - end, n=batch_size)
            end = time.time()

            if i % print_freq == 0:
                print('Testing: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Acc@1 {score.val:.3f} ({score.avg:.3f})'.format(
                          i,
                          len(test_loader),
                          batch_time=meters['batch_time'],
                          loss=meters['loss'],
                          score=meters['acc1']),
                      flush=True)

    if eval_score is not None:
        acc, acc_cls, mean_iu, fwavacc = metrics.evaluate(hist)
        meters['acc_class'].update(acc_cls)
        meters['meanIoU'].update(mean_iu)
        meters['fwavacc'].update(fwavacc)

        print(
            ' * Test set: Average loss {:.4f}, Accuracy {:.3f}%, Accuracy per class {:.3f}%, meanIoU {:.3f}%, \
            fwavacc {:.3f}% \n'.format(meters['loss'].avg, meters['acc1'].avg,
                                       meters['acc_class'].val,
                                       meters['meanIoU'].val,
                                       meters['fwavacc'].val))

    metrics.save_meters(
        meters,
        os.path.join(args.log_dir, 'test_results_ep{}.json'.format(epoch)),
        epoch)