示例#1
0
def scorer(pred,label,vxlspacing):
	"""

	:param pred:
	:param label:
	:param voxelspacing:
	:return:
	"""

	volscores = {}

	volscores['dice'] = metric.dc(pred,label)
	volscores['jaccard'] = metric.binary.jc(pred,label)
	volscores['voe'] = 1. - volscores['jaccard']
	volscores['rvd'] = metric.ravd(label,pred)

	if np.count_nonzero(pred) ==0 or np.count_nonzero(label)==0:
		volscores['assd'] = 0
		volscores['msd'] = 0
	else:
		evalsurf = Surface(pred,label,physical_voxel_spacing = vxlspacing,mask_offset = [0.,0.,0.], reference_offset = [0.,0.,0.])
		volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()
	
		volscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)



	logging.info("\tDice " + str(volscores['dice']))
	logging.info("\tJaccard " + str(volscores['jaccard']))
	logging.info("\tVOE " + str(volscores['voe']))
	logging.info("\tRVD " + str(volscores['rvd']))
	logging.info("\tASSD " + str(volscores['assd']))
	logging.info("\tMSD " + str(volscores['msd']))

	return volscores
示例#2
0
def get_scores(pred, label, vxlspacing):
    """
    pred: HxWxZ (x, y, z) of boolean
    label: HxWxZ (e.g. (512,512,75))
    vxlspacing: 3-tuple of float (spacing)

    """
    volscores = {}

    volscores['dice'] = metric.dc(pred, label)
    try:
        jaccard = metric.binary.jc(pred, label)
    except ZeroDivisionError:
        jaccard = 0.0
    volscores['jaccard'] = jaccard
    volscores['voe'] = 1. - volscores['jaccard']
    try:
        rvd = metric.ravd(label, pred)
    except:
        rvd = None
    volscores['rvd'] = rvd

    if np.count_nonzero(pred) == 0 or np.count_nonzero(label) == 0:
        volscores['assd'] = 0
        volscores['msd'] = 0
    else:
        evalsurf = Surface(pred, label, physical_voxel_spacing=vxlspacing,
                           mask_offset=[0., 0., 0.], reference_offset=[0., 0., 0.])
        volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()

        volscores['msd'] = metric.hd(label, pred, voxelspacing=vxlspacing)

    return volscores
示例#3
0
文件: utils.py 项目: qgking/FRGAN
def compute_segmentation_scores(prediction_mask, reference_mask):
    """
    Calculates metrics scores from numpy arrays and returns an dict.

    Assumes that each object in the input mask has an integer label that
    defines object correspondence between prediction_mask and
    reference_mask.

    :param prediction_mask: numpy.array, int
    :param reference_mask: numpy.array, int
    :param voxel_spacing: list with x,y and z spacing
    :return: dict with dice, jaccard, voe, rvd, assd, rmsd, and msd
    """

    scores = {'dice': [], 'jaccard': [], 'voe': [], 'rvd': []}

    for i, obj_id in enumerate(np.unique(prediction_mask)):
        if obj_id == 0:
            continue  # 0 is background, not an object; skip

        # Limit processing to the bounding box containing both the prediction
        # and reference objects.
        target_mask = (reference_mask == obj_id) + (prediction_mask == obj_id)
        bounding_box = ndimage.find_objects(target_mask)[0]
        p = (prediction_mask == obj_id)[bounding_box]
        r = (reference_mask == obj_id)[bounding_box]
        if np.any(p) and np.any(r):
            dice = metric.dc(p, r)
            jaccard = dice / (2. - dice)
            scores['dice'].append(dice)
            scores['jaccard'].append(jaccard)
            scores['voe'].append(1. - jaccard)
            scores['rvd'].append(metric.ravd(r, p))
    return scores
def compute_segmentation_scores(prediction_mask, reference_mask,
                                voxel_spacing):
    """
    Calculates metrics scores from numpy arrays and returns an dict.
    Assumes that each object in the input mask has an integer label that
    defines object correspondence between prediction_mask and
    reference_mask.
    :param prediction_mask: numpy.array, int
    :param reference_mask: numpy.array, int
    :param voxel_spacing: list with x,y and z spacing
    :return: dict with dice, jaccard, voe, rvd, assd, rmsd, and msd
    """
    scores = {
        'dice': [],
        'jaccard': [],
        'voe': [],
        'rvd': [],
        'assd': [],
        'rmsd': [],
        'msd': []
    }

    p = (prediction_mask > 0)
    r = (reference_mask > 0)
    if np.any(p) and np.any(r):
        dice = metric.dc(p, r)
        jaccard = dice / (2. - dice)
        scores['dice'].append(dice)
        scores['jaccard'].append(jaccard)
        scores['voe'].append(1. - jaccard)
        scores['rvd'].append(metric.ravd(r, p))
        evalsurf = Surface(p,
                           r,
                           physical_voxel_spacing=voxel_spacing,
                           mask_offset=[0., 0., 0.],
                           reference_offset=[0., 0., 0.])

        assd = evalsurf.get_average_symmetric_surface_distance()
        rmsd = evalsurf.get_root_mean_square_symmetric_surface_distance()
        msd = evalsurf.get_maximum_symmetric_surface_distance()
        scores['assd'].append(assd)
        scores['rmsd'].append(rmsd)
        scores['msd'].append(msd)
    else:
        # There are no objects in the prediction, in the reference, or both
        scores['dice'].append(0)
        scores['jaccard'].append(0)
        scores['voe'].append(1.)
        # Surface distance (and volume difference) metrics between the two
        # masks are meaningless when any one of the masks is empty. Assign
        # maximum penalty. The average score for these metrics, over all
        # objects, will thus also not be finite as it also loses meaning.
        scores['rvd'].append(LARGE)
        scores['assd'].append(LARGE)
        scores['rmsd'].append(LARGE)
        scores['msd'].append(LARGE)

    return scores
 def calculate_metrics(mask1, mask2):
     true_positives = metric.obj_tpr(mask1, mask2)
     false_positives = metric.obj_fpr(mask1, mask2)
     dc = metric.dc(mask1, mask2)
     hd = metric.hd(mask1, mask2)
     precision = metric.precision(mask1, mask2)
     recall = metric.recall(mask1, mask2)
     ravd = metric.ravd(mask1, mask2)
     assd = metric.assd(mask1, mask2)
     asd = metric.asd(mask1, mask2)
     return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
 def calculate_metrics(mask1, mask2):
     true_positives = metric.obj_tpr(mask1, mask2)
     false_positives = metric.obj_fpr(mask1, mask2)
     dc = metric.dc(mask1, mask2)
     hd = metric.hd(mask1, mask2)
     precision = metric.precision(mask1, mask2)
     recall = metric.recall(mask1, mask2)
     ravd = metric.ravd(mask1, mask2)
     assd = metric.assd(mask1, mask2)
     asd = metric.asd(mask1, mask2)
     return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
示例#7
0
def total_score(pred_path,gt_path, file_names, metric_name):
    print(pred_path)
    
    
    gt_to_pred = {k:k for k in [0,1]}
    list_labels = sorted(gt_to_pred.keys())
    score = dict()
    thre = 400
    score['names'] = []
    score['lesion'] = []
    
    
    for name in file_names:
        ground_truth = gt_path.format(name)
        ground_truth = os.path.expanduser(ground_truth)
        image_gt = nibabel.load(ground_truth)
        image_gt= nibabel.funcs.as_closest_canonical(image_gt).get_data()
        image_gt = image_gt.reshape(image_gt.shape[:3])
        
        
        pred = pred_path.format(name)
        pred = os.path.expanduser(pred)
        image_pred = nibabel.load(pred)
        affine = image_pred.affine
        voxel = [affine[0,0],affine[1,1],affine[2,2]]
        image_pred = image_pred.get_data()
        image_pred = image_pred.reshape(image_pred.shape[:3])


        score['names'].append(name)
        if metric_name=='assd':
            score['lesion'].append(metric.assd(image_gt,image_pred,voxel))
        elif metric_name=='rve':
            score['lesion'].append(metric.ravd(image_gt,image_pred))
        else:
            score['lesion'].append(metric.dc(image_gt,image_pred))


    print('Sample size: {}'.format(len(list(score.values())[0])))
    for label in score.keys():
        
        if label != 'names':
            print('Label: {}, {} mean: {}'.format(label, metric_name, round(np.mean(score[label]),2)))
            print('Label: {}, {} std: {}'.format(label, metric_name, round(np.std(score[label]),2)))
        

    return score
示例#8
0
def get_scores(pred,label,vxlspacing):
	volscores = {}

	volscores['dice'] = metric.dc(pred,label)
	volscores['jaccard'] = metric.binary.jc(pred,label)
	volscores['voe'] = 1. - volscores['jaccard']
	volscores['rvd'] = metric.ravd(label,pred)

	if np.count_nonzero(pred) ==0 or np.count_nonzero(label)==0:
		volscores['assd'] = 0
		volscores['msd'] = 0
	else:
		evalsurf = Surface(pred,label,physical_voxel_spacing = vxlspacing,mask_offset = [0.,0.,0.], reference_offset = [0.,0.,0.])
		volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()

		volscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)

	return volscores
示例#9
0
def calculate_validation_metrics(probas_pred,
                                 image_gt,
                                 class_labels=None,
                                 num_classes=5):
    classes = np.arange(probas_pred.shape[-1])
    # determine valid classes (those that actually appear in image_gt). Some images may miss some classes
    classes = [c for c in classes if np.sum(image_gt == c) != 0]
    image_pred = probas_pred.argmax(-1)
    assert image_gt.shape == image_pred.shape
    accuracy = np.sum(image_gt == image_pred) / float(image_pred.size)
    class_metrics = {}
    y_true = convert_seg_flat_to_binary_label_indicator_array(
        image_gt.ravel(), num_classes).astype(int)[:, classes]
    y_pred = probas_pred.transpose(3, 0, 1,
                                   2).reshape(num_classes,
                                              -1).transpose(1, 0)[:, classes]
    scores = roc_auc_score(y_true, y_pred, None)
    for i, c in enumerate(classes):
        true_positives = metric.obj_tpr(image_gt == c, image_pred == c)
        false_positives = metric.obj_fpr(image_gt == c, image_pred == c)
        dc = metric.dc(image_gt == c, image_pred == c)
        hd = metric.hd(image_gt == c, image_pred == c)
        precision = metric.precision(image_gt == c, image_pred == c)
        recall = metric.recall(image_gt == c, image_pred == c)
        ravd = metric.ravd(image_gt == c, image_pred == c)
        assd = metric.assd(image_gt == c, image_pred == c)
        asd = metric.asd(image_gt == c, image_pred == c)
        label = c
        if class_labels is not None and c in class_labels.keys():
            label = class_labels[c]
        class_metrics[label] = {
            'true_positives': true_positives,
            'false_positives': false_positives,
            'DICE\t\t': dc,
            'Hausdorff dist': hd,
            'precision\t': precision,
            'recall\t\t': recall,
            'rel abs vol diff': ravd,
            'avg surf dist symm': assd,
            'avg surf dist\t': asd,
            'roc_auc\t\t': scores[i]
        }
    return accuracy, class_metrics
示例#10
0
 def calculate_metrics(mask1, mask2):
     try:
         true_positives = metric.obj_tpr(mask1, mask2)
         if mask2.sum() != 0:
             false_positives = metric.obj_fpr(mask1, mask2)
         else:
             false_positives = 0
         if mask1.sum() == 0 or mask2.sum() == 0:
             hd = 999
             assd = 999
             asd = 999
         else:
             hd = 999  #metric.hd(mask1, mask2)
             assd = 999  #metric.assd(mask1, mask2)
             asd = 999  #metric.asd(mask1, mask2)
         dc = metric.dc(mask1, mask2)
         precision = metric.precision(mask1, mask2)
         recall = metric.recall(mask1, mask2)
         ravd = metric.ravd(mask1, mask2)
         return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
     except:
         return 99999, 99999, 99999, 99999, 99999, 99999, 99999, 99999, 99999
示例#11
0
for name in filenames:
    name_sp = name.split(sep='_')
    name_num = re.findall('\d+', name_sp[1])[0]
#    name_num = name[3:5]
    gt_nii = nb.load(os.path.join(gt_path, name_sp[0] + '_' + name_num + '.nii.gz'))
    gt = gt_nii.get_data() # tumorgt
    vxlspacing = gt_nii.header.get_zooms()
    seg = nb.load(os.path.join(seg_path, name)).get_data()
    seg = np.squeeze(seg)
#    seg = np.uint8(seg>1)
    if np.max(seg)>0:
#        gt = gt>1
#        seg = getLargestCC(seg>0)
        seg_dice = round(metric.dc(seg>0, gt), 4)
        seg_RAVD = round(metric.ravd(seg, gt), 4)   
        seg_ASSD = round(metric.binary.assd(seg, gt, vxlspacing), 4)
        seg_MSSD = round(metric.hd(seg, gt, vxlspacing), 4)
    else:
        seg_dice = 0.0
        seg_RAVD = 0.0  
        seg_ASSD = 0.0
        seg_MSSD = 0.0
    
    
    seg_measures['image'].append(name)
    seg_measures['Dice'].append(seg_dice)
    seg_measures['RAVD'].append(seg_RAVD)
    seg_measures['ASSD'].append(seg_ASSD)
    seg_measures['MSSD'].append(seg_MSSD)
    print(name, seg_dice)
def metric_3d(logits3d, labels3d, required=None, **kwargs):
    """
    Compute 3D metrics:

    * (Dice) Dice Coefficient

    * (VOE)  Volumetric Overlap Error

    * (VD)   Relative Volume Difference

    * (ASD)  Average Symmetric Surface Distance

    * (RMSD) Root Mean Square Symmetric Surface Distance

    * (MSD)  Maximum Symmetric Surface Distance

    Parameters
    ----------
    logits3d: ndarray
        3D binary prediction, shape is the same with `labels3d`, it should be an int
        array or boolean array.
    labels3d: ndarray
        3D labels for segmentation, shape [None, None, None], it should be an int array
        or boolean array. If the dimensions of `logits3d` and `labels3d` are greater than
        3, then `np.squeeze` will be applied to remove extra single dimension and then
        please make sure these two variables are still have 3 dimensions. For example,
        shape [None, None, None, 1] or [1, None, None, None, 1] are allowed.
    required: str or list
        a string or a list of string to specify which metrics need to be return, default
        this function will return all the metrics listed above. For example, if use
        ```python
        _metric_3D(logits3D, labels3D, require=["Dice", "VOE", "ASD"])
        ```
        then only these three metrics will be returned.
    kwargs: dict
        sampling: list
            the pixel resolution or pixel size. This is entered as an n-vector where n
            is equal to the number of dimensions in the segmentation i.e. 2D or 3D. The
            default value is 1 which means pixls are 1x1x1 mm in size

    Returns
    -------
    metrics required

    Notes
    -----
    Thanks to the code snippet from @MLNotebook's blog.

    [Blog link](https://mlnotebook.github.io/post/surface-distance-function/).
    """
    metrics = ["Dice", "VOE", "RVD", "ASSD", "RMSD", "MSD"]
    need_dist_map = False

    if required is None:
        required = metrics
    elif isinstance(required, str):
        required = [required]
        if required[0] not in metrics:
            raise ValueError("Not supported metric: %s" % required[0])
        elif required in metrics[3:]:
            need_dist_map = True
        else:
            need_dist_map = False

    for req in required:
        if req not in metrics:
            raise ValueError("Not supported metric: %s" % req)
        if (not need_dist_map) and req in metrics[3:]:
            need_dist_map = True

    if logits3d.ndim > 3:
        logits3d = np.squeeze(logits3d)
    if labels3d.ndim > 3:
        labels3d = np.squeeze(labels3d)

    assert logits3d.shape == labels3d.shape, (
        "Shape mismatch of logits3D and labels3D. \n"
        "Logits3D has shape %r while labels3D has "
        "shape %r" % (logits3d.shape, labels3d.shape))
    logits3d = logits3d.astype(np.bool)
    labels3d = labels3d.astype(np.bool)

    metrics_3d = {}
    sampling = kwargs.get("sampling", [1., 1., 1.])

    if need_dist_map:
        from utils.surface import Surface
        if np.count_nonzero(logits3d) == 0 or np.count_nonzero(labels3d) == 0:
            metrics_3d['ASSD'] = 0
            metrics_3d['MSD'] = 0
        else:
            eval_surf = Surface(logits3d,
                                labels3d,
                                physical_voxel_spacing=sampling,
                                mask_offset=[0., 0., 0.],
                                reference_offset=[0., 0., 0.])

            if "ASSD" in required:
                metrics_3d[
                    "ASSD"] = eval_surf.get_average_symmetric_surface_distance(
                    )
                required.remove("ASSD")
            if "MSD" in required:
                metrics_3d[
                    "MSD"] = eval_surf.get_maximum_symmetric_surface_distance(
                    )
            if "RMSD" in required:
                metrics_3d[
                    "RMSD"] = eval_surf.get_root_mean_square_symmetric_surface_distance(
                    )

    if required:
        if "Dice" in required:
            metrics_3d["Dice"] = mtr.dc(logits3d, labels3d)
        if "VOE" in required:
            metrics_3d["VOE"] = 1. - mtr.jc(logits3d, labels3d)
        if "RVD" in required:
            metrics_3d["RVD"] = mtr.ravd(logits3d, labels3d)

    return metrics_3d
示例#13
0
    overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
    hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()

    reference_segmentation = image  # the refenrence image in this case is the tumor mask
    label = 255
    # init signed mauerer distance as reference metrics
    reference_distance_map = sitk.SignedMaurerDistanceMap(
        reference_segmentation, squaredDistance=False, useImageSpacing=True)
    label_intensity_statistics_filter = sitk.LabelIntensityStatisticsImageFilter(
    )
    ''' Calculate Overlap Metrics'''
    volscores = {}
    ref = sitk.GetArrayFromImage(
        image)  # convert the sitk image to numpy array
    seg = sitk.GetArrayFromImage(segmentation)
    volscores['rvd'] = metric.ravd(seg, ref)
    volscores['dice'] = metric.dc(seg, ref)
    volscores['jaccard'] = metric.binary.jc(seg, ref)
    volscores['voe'] = 1. - volscores['jaccard']
    ''' Add the Volume Overlap Metrics in the Enum vector '''
    overlap_measures_filter.Execute(reference_segmentation, segmentation)
    overlap_results[0, OverlapMeasures.jaccard.
                    value] = overlap_measures_filter.GetJaccardCoefficient()
    overlap_results[0, OverlapMeasures.dice.
                    value] = overlap_measures_filter.GetDiceCoefficient()
    overlap_results[0, OverlapMeasures.volume_similarity.
                    value] = overlap_measures_filter.GetVolumeSimilarity()
    overlap_results[
        0, OverlapMeasures.volumetric_overlap_error.value] = volscores['voe']
    overlap_results[
        0, OverlapMeasures.relative_vol_difference.value] = volscores['rvd']