def test1results(model):
    
    frames = [(0,128),(128,256),(256,261),(261,266),(266,315),(315,364),(364,492),(492,620)]
    test1resarray = []
    
    
    for item in frames:
        x = item[0]
        y = item[1]
        
        test1_pred = model.predict(res1[x:y], batch_size=4)
        test1_result = np.zeros(test1_pred.shape)
        #print test1_result.shape
        test1_result[test1_pred>0.5] = 1
        test1resarray.append(test1_result)
        
        a = test1_result
        b = test1_y1[x:y]
        c = test1_y2[x:y]
        d = test1_y3[x:y]
        
        print("")
        print(dc(a, b))
        print(dc(a, c))
        print(dc(a, d))

        print(recall(a, b))
        print(recall(a, c))
        print(recall(a, d))

        print(precision(a, b))
        print(precision(a, c))
        print(precision(a, d))
        
    '''  
def test2results(model):
    
    frames = [(0,128),(128,256),(256,261),(261,268),(268,275),(275,282),(282,289)]
    test2resarray = []
    
    print("\nTEST 2 RESULTS: ")
    for item in frames:
        x = item[0]
        y = item[1]
        
        test2_pred = model.predict(res2[x:y], batch_size=4)
        test2_result = np.zeros(test2_pred.shape)
        #print test1_result.shape
        test2_result[test2_pred>0.5] = 1
        test2resarray.append(test2_result)
        
        a = test2_result
        b = test2_y1[x:y]
        c = test2_y2[x:y]
        d = test2_y3[x:y]
        
        print("")
        print(dc(a, b))
        print(dc(a, c))
        print(dc(a, d))

        print(recall(a, b))
        print(recall(a, c))
        print(recall(a, d))

        print(precision(a, b))
        print(precision(a, c))
        print(precision(a, d))
Exemple #3
0
    def test(self, data_provider):
        logging.info('start testing.............')
        save_path = os.path.join(self.hps.model_path, 'model.ckpt')
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ''' restore from file '''
            self.model.restore(sess, save_path)

            img_count = 0
            dice_list = []
            for i in range(data_provider.test_data_len):
                cur_im_id = data_provider.test_data_list[i].split(
                    ' ')[0].split('/')[-1].split('.')[1]

                img_count += 1
                x_test = data_provider.get_test_image(i)
                output = sess.run(self.model.prediction,
                                  feed_dict={self.model.x: x_test})
                output = np.argmax(output, -1)
                output = np.reshape(output, (x_test.shape[1], x_test.shape[2]))
                if img_count == 1:
                    output_3D = output[:, :, np.newaxis]
                else:
                    output_new = output[:, :, np.newaxis]
                    output_3D = np.concatenate((output_3D, output_new), axis=2)

                    if i < data_provider.test_data_len - 1:
                        if data_provider.test_data_list[i + 1].split(' ')[
                                0].split('/')[-1].split('.')[1] != cur_im_id:
                            output_3D = output_3D.astype(np.uint8)
                            output_3D = self.connected_filter(output_3D)
                            self.save_data(output_3D, cur_im_id)
                            path = os.path.join(
                                data_provider.liver_path,
                                'standard-segmentation-' + str(cur_im_id) +
                                '.nii')
                            liver_region = nib.load(path).get_data() > 0
                            dice = metric.dc(output_3D, liver_region)
                            dice_list.append(dice)
                            logging.info(cur_im_id + ' dice is ' + str(dice))
                            img_count = 0
                            continue
                    if i == data_provider.test_data_len - 1:
                        output_3D = output_3D.astype(np.uint8)
                        output_3D = self.connected_filter(output_3D)
                        self.save_data(output_3D, cur_im_id)
                        path = os.path.join(
                            data_provider.liver_path,
                            'standard-segmentation-' + str(cur_im_id) + '.nii')
                        liver_region = nib.load(path).get_data() > 0
                        dice = metric.dc(output_3D, liver_region)
                        dice_list.append(dice)
                        logging.info(cur_im_id + ' dice is ' + str(dice))
                        logging.info('dice per case is ' +
                                     str(np.mean(dice_list)))
                        break
def find_tp_and_fp(result, reference, connectivity=1):
    result = np.atleast_1d(result.astype(np.bool))
    reference = np.atleast_1d(reference.astype(np.bool))

    assert result.shape == reference.shape
    struct = ndi.morphology.generate_binary_structure(result.ndim,
                                                      connectivity)

    # label distinct binary objects
    labeled_res, n_res = ndi.label(result, struct)
    labeled_ref, n_ref = ndi.label(reference, struct)

    slices = ndi.find_objects(labeled_res)

    fp_lists = []
    tp_lists = []
    for res_obj_id, slice_ in enumerate(slices):
        res_obj_id += 1
        res_obj_mask = labeled_res[slice_] == res_obj_id
        ref_obj_mask = labeled_ref[slice_].astype(
            np.bool)  # We don't distinguish different objects in reference
        iou = mtr.dc(res_obj_mask, ref_obj_mask)
        if iou < 0.1:
            fp_lists.append([x.start
                             for x in slice_] + [x.stop for x in slice_])

    slices = ndi.find_objects(labeled_ref)
    for slice_ in slices:
        tp_lists.append([x.start for x in slice_] + [x.stop for x in slice_])

    return fp_lists, tp_lists
Exemple #5
0
def scorer(pred,label,vxlspacing):
	"""

	:param pred:
	:param label:
	:param voxelspacing:
	:return:
	"""

	volscores = {}

	volscores['dice'] = metric.dc(pred,label)
	volscores['jaccard'] = metric.binary.jc(pred,label)
	volscores['voe'] = 1. - volscores['jaccard']
	volscores['rvd'] = metric.ravd(label,pred)

	if np.count_nonzero(pred) ==0 or np.count_nonzero(label)==0:
		volscores['assd'] = 0
		volscores['msd'] = 0
	else:
		evalsurf = Surface(pred,label,physical_voxel_spacing = vxlspacing,mask_offset = [0.,0.,0.], reference_offset = [0.,0.,0.])
		volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()
	
		volscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)



	logging.info("\tDice " + str(volscores['dice']))
	logging.info("\tJaccard " + str(volscores['jaccard']))
	logging.info("\tVOE " + str(volscores['voe']))
	logging.info("\tRVD " + str(volscores['rvd']))
	logging.info("\tASSD " + str(volscores['assd']))
	logging.info("\tMSD " + str(volscores['msd']))

	return volscores
Exemple #6
0
def get_scores(pred, label, vxlspacing):
    """
    pred: HxWxZ (x, y, z) of boolean
    label: HxWxZ (e.g. (512,512,75))
    vxlspacing: 3-tuple of float (spacing)

    """
    volscores = {}

    volscores['dice'] = metric.dc(pred, label)
    try:
        jaccard = metric.binary.jc(pred, label)
    except ZeroDivisionError:
        jaccard = 0.0
    volscores['jaccard'] = jaccard
    volscores['voe'] = 1. - volscores['jaccard']
    try:
        rvd = metric.ravd(label, pred)
    except:
        rvd = None
    volscores['rvd'] = rvd

    if np.count_nonzero(pred) == 0 or np.count_nonzero(label) == 0:
        volscores['assd'] = 0
        volscores['msd'] = 0
    else:
        evalsurf = Surface(pred, label, physical_voxel_spacing=vxlspacing,
                           mask_offset=[0., 0., 0.], reference_offset=[0., 0., 0.])
        volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()

        volscores['msd'] = metric.hd(label, pred, voxelspacing=vxlspacing)

    return volscores
Exemple #7
0
def compute_segmentation_scores(prediction_mask, reference_mask):
    """
    Calculates metrics scores from numpy arrays and returns an dict.

    Assumes that each object in the input mask has an integer label that
    defines object correspondence between prediction_mask and
    reference_mask.

    :param prediction_mask: numpy.array, int
    :param reference_mask: numpy.array, int
    :param voxel_spacing: list with x,y and z spacing
    :return: dict with dice, jaccard, voe, rvd, assd, rmsd, and msd
    """

    scores = {'dice': [], 'jaccard': [], 'voe': [], 'rvd': []}

    for i, obj_id in enumerate(np.unique(prediction_mask)):
        if obj_id == 0:
            continue  # 0 is background, not an object; skip

        # Limit processing to the bounding box containing both the prediction
        # and reference objects.
        target_mask = (reference_mask == obj_id) + (prediction_mask == obj_id)
        bounding_box = ndimage.find_objects(target_mask)[0]
        p = (prediction_mask == obj_id)[bounding_box]
        r = (reference_mask == obj_id)[bounding_box]
        if np.any(p) and np.any(r):
            dice = metric.dc(p, r)
            jaccard = dice / (2. - dice)
            scores['dice'].append(dice)
            scores['jaccard'].append(jaccard)
            scores['voe'].append(1. - jaccard)
            scores['rvd'].append(metric.ravd(r, p))
    return scores
Exemple #8
0
def load_niftis_threshold_compute_dice(gt_file, pred_file, thresholds: Tuple[list, tuple]):
    gt = sitk.GetArrayFromImage(sitk.ReadImage(gt_file))
    pred = sitk.GetArrayFromImage(sitk.ReadImage(pred_file))
    mask_pred = pred == 3
    mask_gt = gt == 3
    num_pred = np.sum(mask_pred)

    num_gt = np.sum(mask_gt)
    dice = dc(mask_pred, mask_gt)

    res_dice = {}
    res_was_smaller = {}

    for t in thresholds:
        was_smaller = False

        if num_pred < t:
            was_smaller = True
            if num_gt == 0:
                dice_here = 1.
            else:
                dice_here = 0.
        else:
            dice_here = deepcopy(dice)

        res_dice[t] = dice_here
        res_was_smaller[t] = was_smaller

    return res_was_smaller, res_dice
Exemple #9
0
def calculate_metric_percase(pred, gt, num_classes):
    "二分类、多分类的指标统计"
    if num_classes is None:
        num_classes = len(np.unique(gt))  #注意:gt不是onehot编码
    print('np.unique(gt):', np.unique(gt))
    if num_classes == 2:
        dice = metric.binary.dc(pred, gt)
        jc = metric.binary.jc(pred, gt)
        hd = metric.binary.hd95(pred, gt)
        asd = metric.binary.asd(pred, gt)
    elif num_classes > 2:
        from keras.utils import to_categorical
        gt_onehot = to_categorical(gt, num_classes)
        pred_onehot = to_categorical(pred, num_classes)
        dice = []
        jc = []
        hd = []
        asd = []
        for k in range(num_classes):
            pred_k = pred_onehot[..., k]
            gt_k = gt_onehot[..., k]
            dice += [metric.dc(result=pred_k, reference=gt_k)]
            #jc += [metric.jc(result=pred_k, reference=gt_k)]
            #hd += [metric.hd95(result=pred_k, reference=gt_k)]
            #asd += [metric.asd(result=pred_k, reference=gt_k)]
    else:
        raise ValueError("pred和gt不能是onehot编码")
    return dice  #, jc#, hd, asd
Exemple #10
0
 def evaluate(self,
              score_map,
              seg,
              spacing,
              connectivity=1,
              use_post_process=False):
     seg = self.seg2one_hot(seg, self.num_class)
     res = dict()
     if use_post_process:
         prediction = self.post_process(score_map)
     else:
         prediction = np.argmax(score_map, axis=0)
     prediction = self.seg2one_hot(prediction, self.num_class)
     for i in range(1, self.num_class):
         res[i] = dict()
         if np.any(prediction[i]) and np.any(seg[i]):
             # res[i]['jaccard'] = metric.jc(prediction[i], seg[i])
             res[i]['dice'] = metric.dc(prediction[i], seg[i])
             # res[i]['hausdorff'] = metric.hd(prediction[i], seg[i], voxelspacing=spacing, connectivity=connectivity)
             # res[i]['hausdorff95'] = metric.hd95(prediction[i], seg[i], voxelspacing=spacing, connectivity=connectivity)
             # res[i]['mean_surface_distance'] = metric.asd(prediction[i], seg[i], voxelspacing=spacing, connectivity=connectivity)
         elif not (np.any(prediction[i]) or np.any(seg[i])):
             # res[i]['jaccard'] = 1
             res[i]['dice'] = 1
             # res[i]['hausdorff'] = 0
             # res[i]['hausdorff95'] = 0
             # res[i]['mean_surface_distance'] = 0
         else:
             # res[i]['jaccard'] = 0
             res[i]['dice'] = 0
             # res[i]['hausdorff'] = np.inf
             # res[i]['hausdorff95'] = np.inf
             # res[i]['mean_surface_distance'] = np.inf
     prediction = np.argmax(prediction, axis=0)
     return res, prediction
Exemple #11
0
def get_scores(pred,label,vxlspacing):
    volscores = {}

    if np.count_nonzero(pred)==0 or np.count_nonzero(label)==0:
        volscores['dice'] = 0
        volscores['jaccard'] = 0
        volscores['voe'] = 1.
    volscores['dice'] = metric.dc(pred,label)
    volscores['jaccard'] = volscores['dice']/(2.-volscores['dice'])
    volscores['voe'] = 1. - volscores['jaccard']
    
    #print("DEBUG: ", np.count_nonzero(label), np.count_nonzero(pred))
    #if np.count_nonzero(label)==0:
        #print("DEBUG: ground truth has no lesions!")
    #if np.count_nonzero(pred)==0:
        #print("DEBUG: prediction has no lesions!")
    #volscores['rvd'] = metric.ravd(label,pred)

    #if np.count_nonzero(pred) ==0 or np.count_nonzero(label)==0:
        #volscores['assd'] = 0
        #volscores['msd'] = 0
    #else:
        #evalsurf = Surface(pred,label,
                           #physical_voxel_spacing = vxlspacing,
                           #mask_offset = [0.,0.,0.],
                           #reference_offset = [0.,0.,0.])
        #volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()
        #volscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)
    return volscores
def get_scores(pred, label, vxlspacing):
    volscores = {}

    volscores['dice'] = metric.dc(pred, label)
    volscores['jaccard'] = 0
    # volscores['jaccard'] = metric.binary.jc(pred,label)
    volscores['voe'] = 0
    volscores['rvd'] = 0
    volscores['assd'] = 0
    volscores['msd'] = 0
    # volscores['voe'] = 1. - volscores['jaccard']
    # if np.count_nonzero(label)==0:
    #     volscores['rvd'] = 0
    # else:
    #     volscores['rvd'] = metric.ravd(label,pred)

    # if np.count_nonzero(pred)==0 or np.count_nonzero(label)==0:
    #     volscores['assd'] = 0
    #     volscores['msd'] = 0
    # else:
    #     evalsurf = Surface(pred,label,physical_voxel_spacing = vxlspacing,mask_offset = [0.,0.,0.], reference_offset = [0.,0.,0.])
    #     volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()
    #     volscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)

    return volscores
def compute_segmentation_scores(prediction_mask, reference_mask,
                                voxel_spacing):
    """
    Calculates metrics scores from numpy arrays and returns an dict.
    Assumes that each object in the input mask has an integer label that
    defines object correspondence between prediction_mask and
    reference_mask.
    :param prediction_mask: numpy.array, int
    :param reference_mask: numpy.array, int
    :param voxel_spacing: list with x,y and z spacing
    :return: dict with dice, jaccard, voe, rvd, assd, rmsd, and msd
    """
    scores = {
        'dice': [],
        'jaccard': [],
        'voe': [],
        'rvd': [],
        'assd': [],
        'rmsd': [],
        'msd': []
    }

    p = (prediction_mask > 0)
    r = (reference_mask > 0)
    if np.any(p) and np.any(r):
        dice = metric.dc(p, r)
        jaccard = dice / (2. - dice)
        scores['dice'].append(dice)
        scores['jaccard'].append(jaccard)
        scores['voe'].append(1. - jaccard)
        scores['rvd'].append(metric.ravd(r, p))
        evalsurf = Surface(p,
                           r,
                           physical_voxel_spacing=voxel_spacing,
                           mask_offset=[0., 0., 0.],
                           reference_offset=[0., 0., 0.])

        assd = evalsurf.get_average_symmetric_surface_distance()
        rmsd = evalsurf.get_root_mean_square_symmetric_surface_distance()
        msd = evalsurf.get_maximum_symmetric_surface_distance()
        scores['assd'].append(assd)
        scores['rmsd'].append(rmsd)
        scores['msd'].append(msd)
    else:
        # There are no objects in the prediction, in the reference, or both
        scores['dice'].append(0)
        scores['jaccard'].append(0)
        scores['voe'].append(1.)
        # Surface distance (and volume difference) metrics between the two
        # masks are meaningless when any one of the masks is empty. Assign
        # maximum penalty. The average score for these metrics, over all
        # objects, will thus also not be finite as it also loses meaning.
        scores['rvd'].append(LARGE)
        scores['assd'].append(LARGE)
        scores['rmsd'].append(LARGE)
        scores['msd'].append(LARGE)

    return scores
def compute_dice_scores(ref: str, pred: str):
    ref = sitk.GetArrayFromImage(sitk.ReadImage(ref))
    pred = sitk.GetArrayFromImage(sitk.ReadImage(pred))
    kidney_mask_ref = ref > 0
    kidney_mask_pred = pred > 0
    if np.sum(kidney_mask_pred) == 0 and kidney_mask_ref.sum() == 0:
        kidney_dice = np.nan
    else:
        kidney_dice = dc(kidney_mask_pred, kidney_mask_ref)

    tumor_mask_ref = ref == 2
    tumor_mask_pred = pred == 2
    if np.sum(tumor_mask_ref) == 0 and tumor_mask_pred.sum() == 0:
        tumor_dice = np.nan
    else:
        tumor_dice = dc(tumor_mask_ref, tumor_mask_pred)

    geometric_mean = np.mean((kidney_dice, tumor_dice))
    return kidney_dice, tumor_dice, geometric_mean
def evaluate_case(file_pred: str, file_gt: str, regions):
    image_gt = sitk.GetArrayFromImage(sitk.ReadImage(file_gt))
    image_pred = sitk.GetArrayFromImage(sitk.ReadImage(file_pred))
    results = []
    for r in regions:
        mask_pred = create_region_from_mask(image_pred, r)
        mask_gt = create_region_from_mask(image_gt, r)
        dc = np.nan if np.sum(mask_gt) == 0 and np.sum(mask_pred) == 0 else metric.dc(mask_pred, mask_gt)
        results.append(dc)
    return results
Exemple #16
0
 def evaluate(self,
              score_map,
              seg,
              size,
              spacing,
              connectivity=1,
              use_post_process=True):
     seg = resize(seg,
                  size,
                  order=0,
                  mode='edge',
                  cval=0,
                  clip=True,
                  preserve_range=True,
                  anti_aliasing=False).astype(np.int8)
     seg = self.seg2one_hot(seg, self.num_class)
     score_map = np.vstack([
         resize(score_map[i],
                size,
                order=3,
                mode='reflect',
                cval=0,
                clip=True,
                preserve_range=True,
                anti_aliasing=False).astype('float32')[np.newaxis]
         for i in range(len(score_map))
     ])
     res = dict()
     if use_post_process:
         prediction = post_process(score_map)
     else:
         prediction = np.argmax(score_map, axis=0)
     prediction = self.seg2one_hot(prediction, self.num_class)
     for i in range(1, self.num_class):
         res[i] = dict()
         if np.any(prediction[i]) and np.any(seg[i]):
             # res[i]['jaccard'] = metric.jc(prediction[i], seg[i])
             res[i]['dice'] = metric.dc(prediction[i], seg[i])
             # res[i]['hausdorff'] = metric.hd(prediction[i], seg[i], voxelspacing=spacing, connectivity=connectivity)
             # res[i]['hausdorff95'] = metric.hd95(prediction[i], seg[i], voxelspacing=spacing, connectivity=connectivity)
             # res[i]['mean_surface_distance'] = metric.asd(prediction[i], seg[i], voxelspacing=spacing, connectivity=connectivity)
         elif not (np.any(prediction[i]) or np.any(seg[i])):
             # res[i]['jaccard'] = 1
             res[i]['dice'] = 1
             # res[i]['hausdorff'] = 0
             # res[i]['hausdorff95'] = 0
             # res[i]['mean_surface_distance'] = 0
         else:
             # res[i]['jaccard'] = 0
             res[i]['dice'] = 0
             # res[i]['hausdorff'] = np.inf
             # res[i]['hausdorff95'] = np.inf
             # res[i]['mean_surface_distance'] = np.inf
     prediction = np.argmax(prediction, axis=0)
     return res, prediction
def Dice(output2, target):
    pred_lesion = np.argmax(output2.detach().cpu().numpy(), axis=1)
    target = np.squeeze(target.detach().cpu().numpy(), axis=1)
    true_lesion = target == 2
    # Compute per-case (per patient volume) dice.
    if not np.any(pred_lesion) and not np.any(true_lesion):
        tumor_dice = 1.
        print('tumor_dice = 1')
    else:
        tumor_dice = metric.dc(pred_lesion, true_lesion)
    return tumor_dice
 def calculate_metrics(mask1, mask2):
     true_positives = metric.obj_tpr(mask1, mask2)
     false_positives = metric.obj_fpr(mask1, mask2)
     dc = metric.dc(mask1, mask2)
     hd = metric.hd(mask1, mask2)
     precision = metric.precision(mask1, mask2)
     recall = metric.recall(mask1, mask2)
     ravd = metric.ravd(mask1, mask2)
     assd = metric.assd(mask1, mask2)
     asd = metric.asd(mask1, mask2)
     return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
 def calculate_metrics(mask1, mask2):
     true_positives = metric.obj_tpr(mask1, mask2)
     false_positives = metric.obj_fpr(mask1, mask2)
     dc = metric.dc(mask1, mask2)
     hd = metric.hd(mask1, mask2)
     precision = metric.precision(mask1, mask2)
     recall = metric.recall(mask1, mask2)
     ravd = metric.ravd(mask1, mask2)
     assd = metric.assd(mask1, mask2)
     asd = metric.asd(mask1, mask2)
     return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
def Dice(output1, output2, target):
    pred_liver = np.argmax(output1.detach().cpu().numpy(), axis=1)
    pred_lesion = np.argmax(output2.detach().cpu().numpy(), axis=1)

    target = np.squeeze(target.detach().cpu().numpy(), axis=1)
    true_liver = target >= 1
    true_lesion = target == 2

    # Compute per-case (per patient volume) dice.
    liver_prediction_exists = np.any(pred_liver == 1)
    if not np.any(pred_lesion) and not np.any(true_lesion):
        tumor_dice = 1.
        print('tumor_dice = 1')
    else:
        tumor_dice = metric.dc(pred_lesion, true_lesion)
    if liver_prediction_exists:
        liver_dice = metric.dc(pred_liver, true_liver)
    else:
        liver_dice = 0
        print('liver_dice = 0')
    return tumor_dice, liver_dice
Exemple #21
0
def brats_eval(seg, gt):
    wt_seg = seg>0; wt_gt = gt>0    
    et_seg = seg==2; et_gt = gt==2
    tc_seg = seg==3; tc_gt = gt==3
    wt_dice = round(metric.dc(wt_seg, wt_gt),4)
    if np.count_nonzero(et_gt) == 0:
        if np.count_nonzero(et_seg)>0:
            et_dice = 0
        else:
            et_dice = 1
    else:
        et_dice = round(metric.dc(et_seg, et_gt),4)
        
    if np.count_nonzero(tc_gt) == 0:
        if np.count_nonzero(tc_seg)>0:
            tc_dice = 0
        else:
            tc_dice = 1
    else:    
        tc_dice = round(metric.dc(tc_seg, tc_gt),4)
    
    return wt_dice, et_dice, tc_dice
Exemple #22
0
def total_score(pred_path,gt_path, file_names, metric_name):
    print(pred_path)
    
    
    gt_to_pred = {k:k for k in [0,1]}
    list_labels = sorted(gt_to_pred.keys())
    score = dict()
    thre = 400
    score['names'] = []
    score['lesion'] = []
    
    
    for name in file_names:
        ground_truth = gt_path.format(name)
        ground_truth = os.path.expanduser(ground_truth)
        image_gt = nibabel.load(ground_truth)
        image_gt= nibabel.funcs.as_closest_canonical(image_gt).get_data()
        image_gt = image_gt.reshape(image_gt.shape[:3])
        
        
        pred = pred_path.format(name)
        pred = os.path.expanduser(pred)
        image_pred = nibabel.load(pred)
        affine = image_pred.affine
        voxel = [affine[0,0],affine[1,1],affine[2,2]]
        image_pred = image_pred.get_data()
        image_pred = image_pred.reshape(image_pred.shape[:3])


        score['names'].append(name)
        if metric_name=='assd':
            score['lesion'].append(metric.assd(image_gt,image_pred,voxel))
        elif metric_name=='rve':
            score['lesion'].append(metric.ravd(image_gt,image_pred))
        else:
            score['lesion'].append(metric.dc(image_gt,image_pred))


    print('Sample size: {}'.format(len(list(score.values())[0])))
    for label in score.keys():
        
        if label != 'names':
            print('Label: {}, {} mean: {}'.format(label, metric_name, round(np.mean(score[label]),2)))
            print('Label: {}, {} std: {}'.format(label, metric_name, round(np.std(score[label]),2)))
        

    return score
Exemple #23
0
def compute_BraTS_dice(ref, pred):
    """
    ref and gt are binary integer numpy.ndarray s
    :param ref:
    :param gt:
    :return:
    """
    num_ref = np.sum(ref)
    num_pred = np.sum(pred)

    if num_ref == 0:
        if num_pred == 0:
            return 1
        else:
            return 0
    else:
        return dc(pred, ref)
Exemple #24
0
def calculate_validation_metrics(probas_pred,
                                 image_gt,
                                 class_labels=None,
                                 num_classes=5):
    classes = np.arange(probas_pred.shape[-1])
    # determine valid classes (those that actually appear in image_gt). Some images may miss some classes
    classes = [c for c in classes if np.sum(image_gt == c) != 0]
    image_pred = probas_pred.argmax(-1)
    assert image_gt.shape == image_pred.shape
    accuracy = np.sum(image_gt == image_pred) / float(image_pred.size)
    class_metrics = {}
    y_true = convert_seg_flat_to_binary_label_indicator_array(
        image_gt.ravel(), num_classes).astype(int)[:, classes]
    y_pred = probas_pred.transpose(3, 0, 1,
                                   2).reshape(num_classes,
                                              -1).transpose(1, 0)[:, classes]
    scores = roc_auc_score(y_true, y_pred, None)
    for i, c in enumerate(classes):
        true_positives = metric.obj_tpr(image_gt == c, image_pred == c)
        false_positives = metric.obj_fpr(image_gt == c, image_pred == c)
        dc = metric.dc(image_gt == c, image_pred == c)
        hd = metric.hd(image_gt == c, image_pred == c)
        precision = metric.precision(image_gt == c, image_pred == c)
        recall = metric.recall(image_gt == c, image_pred == c)
        ravd = metric.ravd(image_gt == c, image_pred == c)
        assd = metric.assd(image_gt == c, image_pred == c)
        asd = metric.asd(image_gt == c, image_pred == c)
        label = c
        if class_labels is not None and c in class_labels.keys():
            label = class_labels[c]
        class_metrics[label] = {
            'true_positives': true_positives,
            'false_positives': false_positives,
            'DICE\t\t': dc,
            'Hausdorff dist': hd,
            'precision\t': precision,
            'recall\t\t': recall,
            'rel abs vol diff': ravd,
            'avg surf dist symm': assd,
            'avg surf dist\t': asd,
            'roc_auc\t\t': scores[i]
        }
    return accuracy, class_metrics
def compute_typical_metrics(seg_gt, seg_pred, labels):
    assert seg_gt.shape == seg_pred.shape
    mask_pred = np.zeros(seg_pred.shape, dtype=bool)
    mask_gt = np.zeros(seg_pred.shape, dtype=bool)

    for l in labels:
        mask_gt[seg_gt == l] = True
        mask_pred[seg_pred == l] = True

    vol_gt = np.sum(mask_gt)
    vol_pred = np.sum(mask_pred)

    try:
        cm = confusion_matrix(
            mask_pred.astype(int).ravel(),
            mask_gt.astype(int).ravel())
        TN = cm[0][0]
        FN = cm[0][1]
        FP = cm[1][0]
        TP = cm[1][1]
        precision = TP / float(TP + FP)
        recall = TP / float(TP + FN)
        fpr = FP / float(FP + TN)
        false_omission_rate = FN / float(FN + TN)
    except:
        precision = np.nan
        recall = np.nan
        fpr = np.nan
        false_omission_rate = np.nan

    try:
        dice = metric.dc(mask_pred, mask_gt)
        if np.sum(mask_gt) == 0:
            dice = np.nan
    except:
        dice = np.nan

    try:
        assd = metric.assd(mask_gt, mask_pred)
    except:
        assd = np.nan

    return precision, recall, fpr, false_omission_rate, dice, assd, vol_gt, vol_pred
Exemple #26
0
def evaluate_verse_case(sitk_file_ref: str, sitk_file_test: str):
    """
    Only vertebra that are present in the reference will be evaluated
    :param sitk_file_ref:
    :param sitk_file_test:
    :return:
    """
    gt_npy = sitk.GetArrayFromImage(sitk.ReadImage(sitk_file_ref))
    pred_npy = sitk.GetArrayFromImage(sitk.ReadImage(sitk_file_test))
    dice_scores = []
    for label in range(1, 26):
        mask_gt = gt_npy == label
        if np.sum(mask_gt) > 0:
            mask_pred = pred_npy == label
            dc = metric.dc(mask_pred, mask_gt)
        else:
            dc = np.nan
        dice_scores.append(dc)
    return dice_scores
Exemple #27
0
 def calculate_metrics(mask1, mask2):
     try:
         true_positives = metric.obj_tpr(mask1, mask2)
         if mask2.sum() != 0:
             false_positives = metric.obj_fpr(mask1, mask2)
         else:
             false_positives = 0
         if mask1.sum() == 0 or mask2.sum() == 0:
             hd = 999
             assd = 999
             asd = 999
         else:
             hd = 999  #metric.hd(mask1, mask2)
             assd = 999  #metric.assd(mask1, mask2)
             asd = 999  #metric.asd(mask1, mask2)
         dc = metric.dc(mask1, mask2)
         precision = metric.precision(mask1, mask2)
         recall = metric.recall(mask1, mask2)
         ravd = metric.ravd(mask1, mask2)
         return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
     except:
         return 99999, 99999, 99999, 99999, 99999, 99999, 99999, 99999, 99999
Exemple #28
0
 def sample_network(self, itr):
     # p_target_img, p_target_lab, p_atlas_imgses, p_atlas_labses=self.valid_sampler.get_file()
     # target_img, target_lab, atlas_imgses, atlas_labses=self.valid_sampler.get_data(p_target_img, p_target_lab, p_atlas_imgses, p_atlas_labses)
     target_img, target_lab, atlas_imgses, atlas_labses = self.valid_sampler.next_sample(
     )
     feed_dict = {self.ph_atlas: atlas_labses, self.ph_gt: target_lab}
     summary, out, gt = self.sess.run(
         [self.summary, self.output, self.ph_gt], feed_dict=feed_dict)
     out = np.argmax(out, axis=-1)
     sitk_write_lab(out[0, ...],
                    dir=self.args.sample_dir,
                    name=str(itr) + "pred")
     gt = np.argmax(gt, axis=-1)
     sitk_write_lab(gt[0, ...],
                    dir=self.args.sample_dir,
                    name=str(itr) + "gt")
     sitk_write_image(np.squeeze(target_img[0, ...]),
                      dir=self.args.sample_dir,
                      name=str(itr) + "img")
     dice = dc(out[0, ...], gt[0, ...])
     print("dc:%f" % (dice))
     return dice
Exemple #29
0
def apply_crf(imgvol, segvol, probvol, pred_name):
    print "Running CRF"
    crfparams = {
        'max_iterations': 5,
        'dynamic_z': True,
        'ignore_memory': True,
        'bilateral_x_std': 2.5318,
        'bilateral_y_std': 8.07058,
        'bilateral_z_std': 3.29843,
        'pos_x_std': 0.26038,
        'pos_y_std': 1.68586,
        'pos_z_std': 0.95493,
        'bilateral_intensity_std': 6.1507,
        'bilateral_w': 38.33788,
        'pos_w': 6.31136
    }
    pro = CRFProcessor.CRF3DProcessor(**crfparams)
    print np.max(imgvol), np.min(imgvol)
    liver_pred = pro.set_data_and_run(imgvol, probvol)
    np.save(pred_name, liver_pred)

    _dice = metric.dc(liver_pred == 1, segvol == 1)
    print "Dice before CRF: " + str(_dice)
def calculate_metrics(pred, target):
    sens = metric.sensitivity(pred, target)
    spec = metric.specificity(pred, target)
    dice = metric.dc(pred, target)
Exemple #31
0
def val_tumor_net(model):
    # 模型设置为评估模式
    model.eval()
    model.training = False

    with open('data/val_volumes_list.txt', 'rb') as f:
        volumes = pickle.load(f)[:16]

    # 统计Dice avg 和 Dice global
    total_dice, dice_intersection, dice_union = 0, 0, 0

    for volume in tqdm(volumes, total=len(volumes)):
        # 读取volume.nii文件
        ct = sitk.ReadImage(
            os.path.join(opt.origin_train_root + '/ct', volume),
            sitk.sitkInt16)
        ct_array = sitk.GetArrayFromImage(
            ct)  # ndarray类型,shape为(切片数, 512, 512)

        # 基于GT分割肝脏
        liver_seg = sitk.ReadImage(
            os.path.join(opt.origin_train_root + '/seg',
                         volume.replace('volume', 'segmentation')),
            sitk.sitkInt8)
        liver_seg_array = sitk.GetArrayFromImage(liver_seg)
        liver_seg_array[liver_seg_array > 0] = 1

        # liver_seg_array和ct_array进行element-wise的乘法,提取肝脏区域
        liver_array = liver_seg_array * ct_array

        # 将灰度值在阈值之外的截断掉
        liver_array[liver_array > opt.gray_upper] = opt.gray_upper
        liver_array[liver_array < opt.gray_lower] = opt.gray_lower

        # 对切片块中的每一个切片,进行归一化操作
        liver_array = liver_array.astype(np.float32)
        liver_array = liver_array / 200

        # 对于肿瘤的识别,我们需要进行颜色翻转,避免肿瘤区域的颜色和背景颜色太相近,导致模型不好识别
        liver_array = 1 - liver_array  # 全部取反,此时背景区域颜色还是和肿瘤区域颜色一致
        liver_array = liver_array * liver_seg_array  # 再把背景区域颜色乘以0,变黑,最后只有肿瘤区域为亮色区域

        # 下采样原始array,三次插值法
        liver_array = ndimage.zoom(liver_array, opt.zoom_scale, order=3)

        # 如果原始CT影像切片数不足48,进行padding操作,将原始CT影像前面不变,后面补上若干张切片,使得总深度为48
        too_small = False
        slice_num = liver_array.shape[0]
        if slice_num < opt.block_size:
            temp = np.ones(
                (opt.block_size, int(512 * opt.zoom_scale),
                 int(512 * opt.zoom_scale))) * (opt.gray_lower / 200.0)
            temp[0:slice_num] = liver_array
            liver_array = temp
            too_small = True

        # 将原始CT影像分割成长度为48的一系列的块,如0~47, 48~95, 96~143, .....
        start_slice, end_slice = 0, opt.block_size - 1
        count = np.zeros((liver_array.shape[0], int(
            512 * opt.zoom_scale), int(512 * opt.zoom_scale)),
                         dtype=np.int16)  # 用来统计原始CT影像中的每一个像素点被预测了几次
        probability_map = np.zeros(
            (liver_array.shape[0], int(
                512 * opt.zoom_scale), int(512 * opt.zoom_scale)),
            dtype=np.float32)  # 用来存储每个像素点的预测值

        with torch.no_grad():
            while end_slice < liver_array.shape[0]:
                ct_tensor = torch.as_tensor(liver_array[start_slice:end_slice +
                                                        1]).float()
                if opt.use_gpu:
                    ct_tensor = ct_tensor.cuda(opt.device)
                ct_tensor = ct_tensor.unsqueeze(0).unsqueeze(
                    0)  # shape变为: (1, 1, 16, 256, 256)
                output = model(ct_tensor)
                count[start_slice:end_slice + 1] += 1
                probability_map[start_slice:end_slice + 1] += np.squeeze(
                    output.cpu().detach().numpy())

                # 将输出结果转为ndarray类型,保存在CPU上,再释放掉output,减轻GPU压力
                del output

                # 设置新的块区间
                start_slice += opt.block_size
                end_slice = start_slice + opt.block_size - 1

            # 如果原始图像的切片数超过了48,且不能被48整除,对最后一个块进行处理
            if slice_num > opt.block_size and slice_num % opt.block_size:
                end_slice = slice_num - 1
                start_slice = end_slice - opt.block_size + 1
                ct_tensor = torch.as_tensor(liver_array[start_slice:end_slice +
                                                        1]).float()
                if opt.use_gpu:
                    ct_tensor = ct_tensor.cuda(opt.device)
                ct_tensor = ct_tensor.unsqueeze(0).unsqueeze(
                    0)  # shape变为: (1, 1, 48, 256, 256)

                output = model(ct_tensor)
                count[start_slice:end_slice + 1] += 1
                probability_map[start_slice:end_slice + 1] += np.squeeze(
                    output.cpu().detach().numpy())

                # 将输出结果转为ndarray类型,保存在CPU上,再释放掉output,减轻GPU压力
                del output

            # 针对sigmoid的输出结果,每个像素点的预测值,大于等于opt.threshold(即0.7)的判为1,小于opt.threshold的判为0
            pred_seg = np.zeros_like(
                probability_map)  # 生成一个shape和probability_map相同的全为零的矩阵
            pred_seg[probability_map >= opt.threshold *
                     count] = 1  # 有的像素点预测了两次,体现了count的意义

            # 前面对于切片数量不足以48的CT影像,进行了填充操作,这里需去掉填充的假切片
            if too_small:
                pred_seg = pred_seg[:slice_num]

            pred_seg = pred_seg.astype(np.uint8)
            tumor_seg = copy.deepcopy(pred_seg)

            # 读入医生标注的segmentation.nii文件,计算评估指标
            seg = sitk.ReadImage(
                os.path.join(opt.origin_train_root + '/seg',
                             volume.replace('volume', 'segmentation')),
                sitk.sitkInt8)
            seg_array = sitk.GetArrayFromImage(seg)

            seg_array[seg_array < 2] = 0
            seg_array[seg_array == 2] = 1
            seg_array = ndimage.zoom(seg_array, opt.zoom_scale,
                                     order=0)  # label采取最近邻插值法

            dice = metric.dc(tumor_seg, seg_array)
            dice_intersection += (tumor_seg * seg_array).sum() * 2
            dice_union += tumor_seg.sum() + seg_array.sum()
            total_dice += dice

            del tumor_seg

    # 验证集上的指标为dice avg和dice global,在新窗口可视化展示
    vis.plot_two_line('Dice Coefficient',
                      total_dice / len(volumes),
                      dice_intersection / dice_union,
                      legend_name=['dice avg', 'dice global'])

    # 模型恢复为训练模式
    model.train()
    model.training = True
def wdc(x):
	return dc(*x)