def count_case_accuracy(pred_root,ground_truth_root,n_cls):
    # 统计数据集中每个测试病例的Dice、PA、IoU、Sensitivity、Precision
    casewise_PAs = np.zeros((1,n_cls))
    casewise_IoUs = np.zeros((1,n_cls))
    casewise_DSs = np.zeros((1,n_cls))
    casewise_Sensitivitys = np.zeros((1,n_cls))
    casewise_Precisions = np.zeros((1,n_cls))
    print('second observer accuracy on test dataset:')
    for (root,dirs,files) in os.walk(pred_root):
        for file in files:
            if 'npy' in file:
                gt_seg = np.load(os.path.join(ground_truth_root,file))
                pred_seg = np.load(os.path.join(root,file)) 
                print('unique gt_seg:',np.unique(gt_seg))
                print('unique pred_seg',np.unique(pred_seg))

                MPA,PAs = MeanPixelAccuracy(gt_seg,pred_seg,n_cls=n_cls)
                MIoU, IoUs = MeanIntersectionoverUnion(gt_seg,pred_seg,n_cls=n_cls)
                MDS, DSs = DiceScore(gt_seg,pred_seg,n_cls=n_cls) 
                Precisions = precision_score(y_true=gt_seg.flatten(), y_pred=pred_seg.flatten(), average=None)
                Sensitivitys = recall_score(y_true=gt_seg.flatten(), y_pred=pred_seg.flatten(), average=None) 
                
                casewise_PAs = np.vstack((casewise_PAs,PAs))
                casewise_IoUs = np.vstack((casewise_IoUs,IoUs))
                casewise_DSs = np.vstack((casewise_DSs,DSs))
                casewise_Sensitivitys = np.vstack((casewise_Sensitivitys,Sensitivitys))
                casewise_Precisions = np.vstack((casewise_Precisions,Precisions))
                print(file+'==========')
    casewise_PAs  = np.delete(casewise_PAs,0,axis=0)*100
    casewise_IoUs = np.delete(casewise_IoUs,0,axis=0)*100
    casewise_DSs = np.delete(casewise_DSs,0,axis=0)*100
    casewise_Sensitivitys  = np.delete(casewise_Sensitivitys,0,axis=0)
    casewise_Precisions  = np.delete(casewise_Precisions,0,axis=0)

    return casewise_PAs,casewise_IoUs,casewise_DSs,casewise_Sensitivitys,casewise_Precisions
Пример #2
0
def ensemble_cm_dataset(saveDir,
                        five_fold_root,
                        fold_01234,
                        labels,
                        mode='hard'):
    y_true = np.array([])
    y_predicted = np.array([])
    print('cross validation soft emsemble:')
    for (root, dirs,
         files) in os.walk(os.path.join(five_fold_root, fold_01234[0])):
        for file in files:
            if 'npy' in file:
                files = []
                for k, fold_k in enumerate(fold_01234):
                    files.append(os.path.join(five_fold_root, fold_k, file))

                ensemble_auto_seg = ensemble(files, mode=mode)
                ensemble_auto_seg = np.transpose(ensemble_auto_seg,
                                                 (1, 2, 0))  # 顺序是 HWD

                np.save(os.path.join(saveDir, file), ensemble_auto_seg)

                gt_file = file.split('_')[-1]
                gt_seg = np.load(os.path.join(ground_truth_root, gt_file))

                MPA, PAs = MeanPixelAccuracy(gt_seg,
                                             ensemble_auto_seg,
                                             n_cls=4)
                MIoU, IoUs = MeanIntersectionoverUnion(gt_seg,
                                                       ensemble_auto_seg,
                                                       n_cls=4)
                MDS, DSs = DiceScore(gt_seg, ensemble_auto_seg, n_cls=4)

                print(gt_file + '==========')
                print('PA:', PAs * 100)
                print('IoU:', IoUs * 100)
                print('Dice Score:', DSs * 100)

                y_true = np.append(y_true, gt_seg.flatten())
                y_predicted = np.append(y_predicted,
                                        ensemble_auto_seg.flatten())

    cm = confusion_matrix(y_true, y_predicted)
    np.set_printoptions(precision=3)
    cn_normalized = cm.astype('float') / cm.sum(axis=1)
    plot_confusion_matrix(cm.astype(int),
                          labels,
                          title='Confusion Matrix',
                          cmap=plt.cm.spring,
                          dtype='int')  #cmap=plt.cm.binary#plt.cm.spring
    plt.show()
    plot_confusion_matrix(cn_normalized,
                          labels,
                          title='Normalized Confusion Matrix',
                          cmap=plt.cm.spring,
                          dtype='float')  #cmap=plt.cm.binary#plt.cm.spring
    plt.show()

    return cm, cn_normalized
def count_slice_accuracy(predicted_root, ground_truth_root, n_cls):
    slicewise_PAs = np.zeros((1, n_cls))
    slicewise_IoUs = np.zeros((1, n_cls))
    slicewise_DSs = np.zeros((1, n_cls))
    print('second observer accuracy on test dataset:')
    for (root, dirs, files) in os.walk(predicted_root):
        for file in files:
            if 'npy' in file:
                gt_seg = np.load(os.path.join(ground_truth_root, file))
                pred_seg = np.load(os.path.join(root, file))
                print('unique gt_seg:', np.unique(gt_seg))
                print('unique pred_seg', np.unique(pred_seg))
                for sl in range(gt_seg.shape[-1]):
                    gt_seg_slice = gt_seg[:, :, sl]
                    pred_seg_slice = pred_seg[:, :, sl]
                    if len(np.unique(gt_seg_slice)) < n_cls:
                        gt_seg_slice[0][0:n_cls] = range(n_cls)
                        pred_seg_slice[0][0:n_cls] = range(
                            n_cls
                        )  #有的sile不一定有bone/disc/nerve                                     PA = pixelAccuracy(gt_seg_slice,pred_seg_slice,n_cls=n_cls)
                    MPA, PAs = MeanPixelAccuracy(gt_seg_slice,
                                                 pred_seg_slice,
                                                 n_cls=n_cls)
                    MIoU, IoUs = MeanIntersectionoverUnion(gt_seg_slice,
                                                           pred_seg_slice,
                                                           n_cls=n_cls)
                    MDS, DSs = DiceScore(gt_seg_slice,
                                         pred_seg_slice,
                                         n_cls=n_cls)
                    slicewise_PAs = np.vstack((slicewise_PAs, PAs))
                    slicewise_IoUs = np.vstack((slicewise_IoUs, IoUs))
                    slicewise_DSs = np.vstack((slicewise_DSs, DSs))
                print(file + '==========')
    slicewise_PAs = np.delete(slicewise_PAs, 0, axis=0) * 100
    slicewise_IoUs = np.delete(slicewise_IoUs, 0, axis=0) * 100
    slicewise_DSs = np.delete(slicewise_DSs, 0, axis=0) * 100

    return slicewise_PAs, slicewise_IoUs, slicewise_DSs
        DSs_testdataset = []
        for CT_filename, label_filename in zip(CT_files, label_files):
            print('label_filename:', label_filename)
            image, mask = readnpy2image_mask(
                os.path.join(test_dir, 'CT', CT_filename),
                os.path.join(test_dir, 'SegmentationLabel', label_filename))
            start_time = time.time()
            output_mask = bianli(image, mask, image_W, image_H, img_frame,
                                 n_cls)
            stop_time = time.time()
            print('bianli time:', stop_time - start_time)
            trueMask = np.array(mask).flatten().astype(int)
            predMask = np.array(output_mask).flatten().astype(int)

            PA = pixelAccuracy(trueMask, predMask, n_cls)
            MPA, PAs = MeanPixelAccuracy(trueMask, predMask, n_cls)
            MIoU, IoUs = MeanIntersectionoverUnion(trueMask, predMask, n_cls)
            MDS, DSs = DiceScore(trueMask, predMask, n_cls)

            print('PA=', PA * 100)
            print('MPA=', MPA * 100, 'PAs='******'MIoU=', MIoU * 100, 'IoUs=', IoUs * 100)
            print('Mean Dice Score=', MDS * 100, 'Dice Scores=', DSs * 100)

            PAs_testdataset.append(PAs)
            IoUs_testdataset.append(IoUs)
            DSs_testdataset.append(DSs)

            print(np.shape(label_masks))
            label_masks_k = np.squeeze(mask[0, 20, :, :, 0])
            print(np.shape(label_masks_k))
Пример #5
0
y_true = np.array([])
y_predicted = np.array([])
print('second observer accuracy on test dataset:')
for (root,dirs,files) in os.walk(sec_obs_root):
    for file in files:
        if 'npy' in file:
            gt_seg = np.load(os.path.join(ground_truth_root,file))
            sec_obs_seg = np.load(os.path.join(root,file)) 
            
            
            print('unique gt_seg:',np.unique(gt_seg))
            print('unique sec_obs_seg',np.unique(sec_obs_seg))
            
            PA = pixelAccuracy(gt_seg,sec_obs_seg,n_cls=4)
            MPA,PAs = MeanPixelAccuracy(gt_seg,sec_obs_seg,n_cls=4)
            MIoU, IoUs = MeanIntersectionoverUnion(gt_seg,sec_obs_seg,n_cls=4)
            MDS, DSs = DiceScore(gt_seg,sec_obs_seg,n_cls=4)
            
            print(file+'==========')
            print('PA:',PAs*100)
            print('IoU:',IoUs*100)
            print('Dice Score:',DSs*100)
            
#            print(np.where( (gt_seg.flatten()==sec_obs_seg.flatten())==False))
            
            y_true = np.append(y_true,gt_seg.flatten())
            y_predicted = np.append(y_predicted,sec_obs_seg.flatten())
            assert y_true.shape==y_predicted.shape
            
    cm = confusion_matrix(y_true,y_predicted)
    # =============================================================================
    #     # 换Mask对比图
    #     h,w = 270,180
    #     savefile_name = os.path.join(auto_pred_root, 'hard_ensemble_comparision_mask.png')
    #     draw_mask_compared_figure(pred_root=auto_pred_root,ground_truth_root=gt_root,n_cls=n_cls,h=h,w=w,savefile_name=savefile_name)
    # =============================================================================

    # 统计soft ensemble混淆矩阵
    y_true, y_pred = flatten_dataset(pred_root=auto_pred_root,
                                     ground_truth_root=gt_root,
                                     n_cls=n_cls)
    #    cm = confusion_matrix(y_true,y_pred)
    #    plot_confusion_matrix(cm.astype(int),labels=labels,title='Confusion Matrix',cmap=plt.cm.spring,dtype='int')#cmap=plt.cm.binary#plt.cm.spring
    #    plt.show()
    #    cn_normalized = cm.astype('float')/cm.sum(axis=1)
    #    plot_confusion_matrix(cn_normalized,labels,title='Normalized Confusion Matrix',cmap=plt.cm.spring,dtype='float')#cmap=plt.cm.binary#plt.cm.spring
    #    plt.show()

    #
    MPA, PAs = MeanPixelAccuracy(y_true, y_pred, n_cls=n_cls)

    # 数据集上的准确率和敏感度
    #    dataset_Precisions_auto_pred = precision_score(y_true=y_true, y_pred=y_pred, average=None)
    #    dataset_Sensitivitys_auto_pred = recall_score(y_true=y_true, y_pred=y_pred, average=None)
    dataset_f1scores_auto_pred = f1_score(y_true=y_true,
                                          y_pred=y_pred,
                                          average=None)
    dataset_macro_f1scores_auto_pred = f1_score(y_true=y_true,
                                                y_pred=y_pred,
                                                average='macro')