コード例 #1
0
def evaluate_F_score(val_loader, net, thresh, Fpc, Fc):
    for vi, data in enumerate(val_loader):
        input, mask, img_names = data
        assert len(input.size()) == 4 and len(mask.size()) == 3
        assert input.size()[2:] == mask.size()[1:]
        input, mask_cuda = input.cuda(), mask.cuda()

        with torch.no_grad():
            seg_out = net(input)

        seg_predictions = seg_out.data.max(1)[1].cpu()

        print('evaluating: %d / %d' % (vi + 1, len(val_loader)))
        _Fpc, _Fc = eval_mask_boundary(seg_predictions.numpy(), mask.numpy(), args.dataset_cls.num_classes,
                                       bound_th=float(thresh))
        Fc += _Fc
        Fpc += _Fpc

        del seg_out, vi, data

    if args.apex:
        Fc_tensor = torch.cuda.FloatTensor(Fc)
        torch.distributed.all_reduce(Fc_tensor, op=torch.distributed.ReduceOp.SUM)
        Fc = Fc_tensor.cpu().numpy()
        Fpc_tensor = torch.cuda.FloatTensor(Fpc)
        torch.distributed.all_reduce(Fpc_tensor, op=torch.distributed.ReduceOp.SUM)
        Fpc = Fpc_tensor.cpu().numpy()

    if args.local_rank == 0:
        logging.info('Threshold: ' + thresh)
        logging.info('F_Score: ' + str(np.sum(Fpc / Fc) / args.dataset_cls.num_classes))
        logging.info('F_Score (Classwise): ' + str(Fpc / Fc))

    return Fpc
コード例 #2
0
ファイル: train.py プロジェクト: DailyActie/AI_DL_NET-GSCNN
def evaluate(val_loader, net):
    '''
    Runs the evaluation loop and prints F score
    val_loader: Data loader for validation
    net: thet network
    return: 
    '''
    net.eval()
    for thresh in args.eval_thresholds.split(','):
        mf_score1 = AverageMeter()
        mf_pc_score1 = AverageMeter()
        ap_score1 = AverageMeter()
        ap_pc_score1 = AverageMeter()
        Fpc = np.zeros((args.dataset_cls.num_classes))
        Fc = np.zeros((args.dataset_cls.num_classes))
        for vi, data in enumerate(val_loader):
            input, mask, edge, img_names = data
            assert len(input.size()) == 4 and len(mask.size()) == 3
            assert input.size()[2:] == mask.size()[1:]
            h, w = mask.size()[1:]

            batch_pixel_size = input.size(0) * input.size(2) * input.size(3)
            input, mask_cuda, edge_cuda = input.cuda(), mask.cuda(), edge.cuda(
            )

            with torch.no_grad():
                seg_out, edge_out = net(input)

            seg_predictions = seg_out.data.max(1)[1].cpu()
            edge_predictions = edge_out.max(1)[0].cpu()

            logging.info('evaluating: %d / %d' % (vi + 1, len(val_loader)))
            _Fpc, _Fc = eval_mask_boundary(seg_predictions.numpy(),
                                           mask.numpy(),
                                           args.dataset_cls.num_classes,
                                           bound_th=float(thresh))
            Fc += _Fc
            Fpc += _Fpc

            del seg_out, edge_out, vi, data

        logging.info('Threshold: ' + thresh)
        logging.info('F_Score: ' +
                     str(np.sum(Fpc / Fc) / args.dataset_cls.num_classes))
        logging.info('F_Score (Classwise): ' + str(Fpc / Fc))