def calculate_metric_percase(pred, gt, num_classes): "二分类、多分类的指标统计" if num_classes is None: num_classes = len(np.unique(gt))#注意:gt不是onehot编码 print('np.unique(gt):',np.unique(gt)) if num_classes==2: dice = metric.binary.dc(pred, gt) jc = metric.binary.jc(pred, gt) hd = metric.binary.hd95(pred, gt) asd = metric.binary.asd(pred, gt) elif num_classes>2: gt_onehot = to_categorical(gt, num_classes) pred_onehot = to_categorical(pred, num_classes) dice = [] jc = [] hd = [] asd = [] for k in range(num_classes): pred_k = pred_onehot[...,k] gt_k = gt_onehot[...,k] dice += [metric.dc(result=pred_k, reference=gt_k)] jc += [metric.jc(result=pred_k, reference=gt_k)] hd += [metric.hd95(result=pred_k, reference=gt_k)] asd += [metric.asd(result=pred_k, reference=gt_k)] else: raise ValueError("pred和gt不能是onehot编码") return dice, jc, hd, asd
def calculate_metrics(mask1, mask2): true_positives = metric.obj_tpr(mask1, mask2) false_positives = metric.obj_fpr(mask1, mask2) dc = metric.dc(mask1, mask2) hd = metric.hd(mask1, mask2) precision = metric.precision(mask1, mask2) recall = metric.recall(mask1, mask2) ravd = metric.ravd(mask1, mask2) assd = metric.assd(mask1, mask2) asd = metric.asd(mask1, mask2) return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
def avg_surface_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): if confusion_matrix is None: confusion_matrix = ConfusionMatrix(test, reference) test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() if test_empty or test_full or reference_empty or reference_full: if nan_for_nonexisting: return float("NaN") else: return 0 test, reference = confusion_matrix.test, confusion_matrix.reference return metric.asd(test, reference, voxel_spacing, connectivity)
def calculate_validation_metrics(probas_pred, image_gt, class_labels=None, num_classes=5): classes = np.arange(probas_pred.shape[-1]) # determine valid classes (those that actually appear in image_gt). Some images may miss some classes classes = [c for c in classes if np.sum(image_gt == c) != 0] image_pred = probas_pred.argmax(-1) assert image_gt.shape == image_pred.shape accuracy = np.sum(image_gt == image_pred) / float(image_pred.size) class_metrics = {} y_true = convert_seg_flat_to_binary_label_indicator_array( image_gt.ravel(), num_classes).astype(int)[:, classes] y_pred = probas_pred.transpose(3, 0, 1, 2).reshape(num_classes, -1).transpose(1, 0)[:, classes] scores = roc_auc_score(y_true, y_pred, None) for i, c in enumerate(classes): true_positives = metric.obj_tpr(image_gt == c, image_pred == c) false_positives = metric.obj_fpr(image_gt == c, image_pred == c) dc = metric.dc(image_gt == c, image_pred == c) hd = metric.hd(image_gt == c, image_pred == c) precision = metric.precision(image_gt == c, image_pred == c) recall = metric.recall(image_gt == c, image_pred == c) ravd = metric.ravd(image_gt == c, image_pred == c) assd = metric.assd(image_gt == c, image_pred == c) asd = metric.asd(image_gt == c, image_pred == c) label = c if class_labels is not None and c in class_labels.keys(): label = class_labels[c] class_metrics[label] = { 'true_positives': true_positives, 'false_positives': false_positives, 'DICE\t\t': dc, 'Hausdorff dist': hd, 'precision\t': precision, 'recall\t\t': recall, 'rel abs vol diff': ravd, 'avg surf dist symm': assd, 'avg surf dist\t': asd, 'roc_auc\t\t': scores[i] } return accuracy, class_metrics
batch_y = Y_test[i_c:i_c + 1, :, :, :, :].copy() for j_c in range(n_class): dice_c[i_c, j_c] = dk_seg.dice(batch_y[0, :, :, :, j_c] == 1, y_tr_pr_c == j_c) y_t_c = np.argmax( batch_y[0, :, :, :, :], axis=-1) #dk_aux.save_pred_thumbs(batch_x[0,:,:,:,0], y_t_c, y_tr_pr_c, False, i_c, i_eval, images_dir ) '''if i_eval==0: save_pred_mhds(batch_x[0,:,:,:,0], y_t_c, y_tr_pr_c, False, i_c, i_eval) else: save_pred_mhds(None, None, y_tr_pr_c, False, i_c, i_eval)''' dice_c[i_c, n_class] = hd95(y_t_c, y_tr_pr_c) dice_c[i_c, n_class+1] = asd(y_t_c, y_tr_pr_c) dice_c[i_c, n_class+2] = assd(y_t_c, y_tr_pr_c) y_tr_pr_soft= y_tr_pr_sum[:,:,:,1]/(y_tr_pr_cnt+1e-10) #dk_aux.save_pred_soft_thumbs(batch_x[0,:,:,:,0], y_t_c, y_tr_pr_c, y_tr_pr_soft, False, i_c, i_eval, images_dir) error_mask= dk_aux.seg_2_anulus(y_t_c, radius= 2.0) plot_save_path= None ECE, MCE, ECE_curve= dk_aux.estimate_ECE_and_MCE(y_t_c, y_tr_pr_soft, plot_save_path=plot_save_path) dice_c[i_c, n_class+3]= ECE dice_c[i_c, n_class+4]= MCE plot_save_path= None ECE, MCE, ECE_curve= dk_aux.estimate_ECE_and_MCE_masked(y_t_c, y_tr_pr_soft, error_mask, plot_save_path=plot_save_path) dice_c[i_c, n_class+5]= ECE
print(' ') # ============================================================================= # Distancia Hausdorff # ============================================================================= distanceHD1 = mdm.hd(cHull_binary,cHull_Mbinary,connectivity=1) print('Distancia Hausdorff',distanceHD1) # ============================================================================= # Distancia media entre superficies # ============================================================================= distanceASD = mdm.asd(cHull_binary,cHull_Mbinary,connectivity=1) print('Distancia Media',distanceASD) # ============================================================================= # Distancia media simétrica entre superficies # ============================================================================= distanceASSD = mdm.assd(cHull_binary,cHull_Mbinary,connectivity=1) print('Distancia Media Simétrica',distanceASSD) # ============================================================================= # Distancia media entre superficie de objetos
lbl_p[..., idx] = utils.preprocess_lbl_slice(lbl[..., idx]) # predict pred = [] for idx in xrange(img.shape[2]): net1.blobs['data'].data[0, 0, ...] = img_p[..., idx] pred.append(net1.forward()['prob'][0, 1] > 0.5) pred = np.array(pred).transpose(1, 2, 0) # create volume instance from medpy v = volume.Volume(pred, lbl_p) # calculate metrics as in the oritinal paper voe = v.get_volumetric_overlap_error() rvd = v.get_relative_volume_difference() asd = metric.asd(pred, lbl_p) msd = metric.hd(pred, lbl_p) dice = metric.dc(pred, lbl_p) * 100 # convert to percentage perf_metrics.append([voe, rvd, asd, msd, dice]) print('subject %d: %s' % (idx_subject, str([voe, rvd, asd, msd, dice]))) perf_metrics = np.array(perf_metrics) perf_metrics_mean = np.mean(perf_metrics, axis=0) print('inference complete: mean of performance metrics') print(perf_metrics_mean) """ # visualize the results for idx in xrange(30, 100, 20): utils.imshow(img[...,idx], img_p[..., idx], lbl_p[...,idx], pred[...,idx])