Ejemplo n.º 1
0
def test_metric():
    print("###precision###")
    print precision(test_label, knn_predict)
    print precision_score(test_label, knn_predict, average='macro')

    print("###recall###")
    print recall(test_label, knn_predict)
    print recall_score(test_label, knn_predict, average='macro')

    print("###f1###")
    print f1(test_label, knn_predict)
    print f1_score(test_label, knn_predict, average='macro')
Ejemplo n.º 2
0
def print_evl(predict, label):
    """
    Utility method that prints the evaluation results:
        accuracy
        recall
        precision
        kappa
        Contingency table
    Args:
        predict: prediction
        label: labels

    Returns:

    """
    print("Accuracy:  {}".format(metric.accuracy(predict, label)))
    print("Recall:    {}".format(metric.recall(predict, label)))
    print("Precision: {}".format(metric.precision(predict, label)))
    print("Kappa:     {}".format(metric.kappa(predict, label)))
    print(
        "Contingency Table:\n"
        "{0:5d}          {2:5d}\n"
        "{3:5d}          {1:5d}\n"
        "Number of Inputs: {4}".format(*metric.basic_metrics(predict, label)))
Ejemplo n.º 3
0
def add_result_to_csvfile(img_name, prediction, gt_data, output_dir, outfile, compute_dice=1, compute_recall=1, compute_precision=1):
    with open(join(output_dir,  outfile + 'scores.csv'), 'ab+') as csvfile:
        writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
        row = img_name
        prediction = prediction.reshape(gt_data.shape)
        if compute_dice:
            print('Computing Dice')
            dice_arr = K.eval(dice_coefficient(prediction.astype(np.float32), gt_data.astype(np.float32)))
            print('\tDice: {}'.format(dice_arr))
            row.append(dice_arr)

        if compute_recall:
            print('Computing Recall')
            recall_arr = K.eval(recall(prediction.astype(np.float32), gt_data.astype(np.float32)))
            print('\tRecall: {}'.format(recall_arr))
            row.append(recall_arr)

        if compute_precision:
            print('Computing Precision')
            precision_arr = K.eval(precision(prediction.astype(np.float32), gt_data.astype(np.float32)))
            print('\tPrecision: {}'.format(precision_arr))
            row.append(precision_arr)

        writer.writerow(row)
def validate(val_loader, model, criterion, config, logger, output_pt):
    batch_time = callback.AverageMeter('Time=', ':6.3f')
    data_time = callback.AverageMeter('Data=', ':6.5f')
    losses = callback.AverageMeter('Loss=', ':.4e')
    acc = callback.AverageMeter('Acc=', ':1.3f')
    prec = callback.AverageMeter('Pre=', ':1.3f')
    rec = callback.AverageMeter('Rec=', ':1.3f')

    progress = callback.ProgressMeter(
        len(val_loader), [batch_time, data_time, losses, acc, prec, rec],
        logger,
        prefix='Test: ')

    # define roc and auc variables
    outputs = np.empty([0, 1], dtype=np.float32)
    targets = np.empty([0, 1], dtype=np.float32)

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):

            # measure data loading time
            data_time.update(time.time() - end, images.size(0))

            target = target.type(torch.FloatTensor)
            target_ = torch.unsqueeze(target, 1)

            if config.gpu is not None:
                gpu_id = int(config.gpu)
                images = images.cuda(gpu_id, non_blocking=True)
            if torch.cuda.is_available():
                target_ = target_.cuda(gpu_id, non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target_)
            if config.vis:
                imshow(images, title=target[0])

            # measure accuracy and record loss
            acc_ = metric.accuracy(output, target_)
            pre_ = metric.precision(output, target_)
            rec_ = metric.recall(output, target_)

            losses.update(loss.item() * images.size(0), images.size(0))
            acc.update(acc_[0], acc_[1])
            prec.update(pre_[0], pre_[1])
            rec.update(rec_[0], rec_[1])

            # record results and labels for computing auc and roc
            outputs = np.concatenate([outputs, output.cpu().numpy()])
            targets = np.concatenate([targets, target_.cpu().numpy()])

            # measure elapsed time
            batch_time.update(time.time() - end, images.size(0))
            end = time.time()

            if i % config.print_freq == 0:
                progress.display(i)

        F1 = 2 * prec.avg * rec.avg / (prec.avg + rec.avg + 1e-6)
        fpr, tpr, roc_auc = metric.roc(outputs, targets)
        print(
            'Final Validation-Loss:{losses.avg:.3f} \t Validation-Acc:{acc.avg:.3f} \t Validation-Prec:{prec.avg:.3f} \t Validation-Recall:{rec.avg:.3f} \t Validation-F1:{f1:.3f} \t Validation-Auc:{auc:.3f}'
            .format(losses=losses,
                    acc=acc,
                    prec=prec,
                    rec=rec,
                    f1=F1,
                    auc=roc_auc))
        logger.info(
            'Final Validation-Loss:{losses.avg:.3f} \t Validation-Acc:{acc.avg:.3f} \t Validation-Prec:{prec.avg:.3f} \t Validation-Recall:{rec.avg:.3f} \t Validation-F1:{f1:.3f} \t Validation-Auc:{auc:.3f}'
            .format(losses=losses,
                    acc=acc,
                    prec=prec,
                    rec=rec,
                    f1=F1,
                    auc=roc_auc))

    return acc.avg