コード例 #1
0
ファイル: review_reply.py プロジェクト: yuruiqi/SHZL
def compare_small_tumor():
    small_casename = get_small_tumor_case()

    # 统计肿瘤大小、高低分数量
    train_data = pd.read_csv(r'F:\SHZL\model\3d\ISUP\train_data.csv')
    test_data = pd.read_csv(r'F:\SHZL\model\3d\ISUP\test_data.csv')

    small_train_data = train_data[train_data['CaseName'].isin(small_casename)]
    small_test_data = test_data[test_data['CaseName'].isin(small_casename)]

    n_small_train_high = small_train_data[small_train_data['label'] == 1].shape[0]
    n_small_train_low = small_train_data[small_train_data['label'] == 0].shape[0]
    n_small_test_high = small_test_data[small_test_data['label'] == 1].shape[0]
    n_small_test_low = small_test_data[small_test_data['label'] == 0].shape[0]
    print('small:\ntrain:high %d, low%d\ntest:high %d, low%d'%(n_small_train_high, n_small_train_low, n_small_test_high, n_small_test_low))

    large_train_data = train_data[~train_data['CaseName'].isin(small_casename)]
    large_test_data = test_data[~test_data['CaseName'].isin(small_casename)]

    n_large_train_high = large_train_data[large_train_data['label'] == 1].shape[0]
    n_large_train_low = large_train_data[large_train_data['label'] == 0].shape[0]
    n_large_test_high = large_test_data[large_test_data['label'] == 1].shape[0]
    n_large_test_low = large_test_data[large_test_data['label'] == 0].shape[0]
    print('large:\ntrain:high %d, low%d\ntest:high %d, low%d' % (n_large_train_high, n_large_train_low, n_large_test_high, n_large_test_low))

    # 在原有模型上的小肿瘤表现
    pred_result = pd.read_csv(r'F:\SHZL\model\3d\ISUP\united_model_7\lr\pred.csv')
    prob_result = pd.read_csv(r'F:\SHZL\model\3d\ISUP\united_model_7\lr\prob.csv')

    label = pred_result['label']
    prob = prob_result['n+a+v']
    prob_n_a = prob_result['n+a']

    small_label = pred_result[pred_result['CaseName'].isin(small_casename)]['label']
    small_prob = prob_result[prob_result['CaseName'].isin(small_casename)]['n+a+v']
    small_prob_n_a = prob_result[prob_result['CaseName'].isin(small_casename)]['n+a']

    large_label = pred_result[~pred_result['CaseName'].isin(small_casename)]['label']
    large_prob = prob_result[~prob_result['CaseName'].isin(small_casename)]['n+a+v']
    large_prob_n_a = prob_result[~prob_result['CaseName'].isin(small_casename)]['n+a']

    CS =BinaryClassification(is_show=False)

    CS.Run(prob_n_a.tolist(), label.tolist())
    print('all\n', CS._metric)

    CS.Run(small_prob_n_a.tolist(), small_label.tolist())
    print('small\n', CS._metric)

    CS.Run(large_prob_n_a.tolist(), large_label.tolist())
    print('large\n', CS._metric)
コード例 #2
0
ファイル: review_reply.py プロジェクト: yuruiqi/SHZL
def compare_model():
    all_feature_prob_result = pd.read_csv(r'F:\SHZL\model\3d\ISUP\united_model_7\lr\prob.csv')
    shape_result = pd.read_csv(r'F:\SHZL\model\3d\ISUP\review_reply\shape_model_result.csv')

    all_prob = all_feature_prob_result['n+a+v']

    label = shape_result['label'].tolist()
    label = [round(x) for x in label]
    united_pred = shape_result['united_pred'].tolist()
    united_prob = shape_result['united_prob'].tolist()

    CS = BinaryClassification(is_show=False)
    CS.Run(united_prob, label)
    print(CS._metric)
    # print('auc: ',classification_statistics.get_auc(united_prob, label, draw=False))
    print(stats.wilcoxon(all_prob, united_prob))
コード例 #3
0
def examine_model(prob_csv):
    prob_data = pd.read_csv(prob_csv)
    label = prob_data['label'].values.tolist()
    model_names = prob_data.columns.tolist()
    model_names.remove('CaseName')
    model_names.remove('label')

    for model_name in model_names:
        model_prob = prob_data[model_name].values.tolist()
        CS = BinaryClassification(is_show=False)
        CS.Run(model_prob, label)
        print(
            '{}: \tacc: {:.3f},\tsen: {:.3f},\tspe: {:.3f}\n, auc:{:.3f}, {:.3f}-{:.3f}'
            .format(model_name, CS._metric['accuracy'],
                    CS._metric['sensitivity'], CS._metric['specificity'],
                    CS._metric['AUC'], CS._metric['95 CIs Lower'],
                    CS._metric['95 CIs Upper']))
コード例 #4
0
ファイル: united_model.py プロジェクト: yuruiqi/SHZL
def print_result(name,train_label,prob_train,test_label,prob_test):
    # auc_train = roc_auc_score(train_label, prob_train)
    # auc_test = roc_auc_score(test_label, prob_test)
    # acc_train = accuracy_score(train_label, pred_train)
    # acc_test = accuracy_score(test_label, pred_test)
    # sen_train = recall_score(train_label, pred_train)
    # sen_test = recall_score(test_label, pred_test)
    # spe_train = specificity_score(train_label, pred_train)
    # spe_test = specificity_score(test_label, pred_test)
    #
    # space = ' '*len(name)
    #
    # print('{} \n'
    #       'training: auc:{:.3f},\tacc: {:.3f},\tsen: {:.3f},\tspe: {:.3f}\n'
    #       'testing : auc:{:.3f},\tacc: {:.3f},\tsen: {:.3f},\tspe: {:.3f}'
    #       .format(name, auc_train, acc_train,sen_train, spe_train, auc_test,acc_test, sen_test, spe_test))

    CS_train = BinaryClassification(is_show=False)
    CS_train.Run(prob_train.tolist(), train_label.tolist())
    print(name, 'train: ', CS_train._metric)

    CS_test = BinaryClassification(is_show=False)
    CS_test.Run(prob_test.tolist(), test_label.tolist())
    print(name, 'test: ', CS_test._metric)
コード例 #5
0
ファイル: test.py プロジェクト: Cherishzyh/ProstrateECE
def EnsembleInferencebyCaseName(data_type, weights_list=None):
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    input_shape = (1, 280, 280)
    model_folder = model_root + '/PAGNet0430'
    bc = BinaryClassification()

    ct_folder = os.path.join(data_root, 'ct_slice')
    atten_folder = os.path.join(data_root, 'atten_slice')
    label_path = os.path.join(data_root, '{}_label.csv'.format(data_type))
    info = pd.read_csv(label_path, index_col=0)
    case_name_list = info.index.tolist()

    cv_folder_list = [
        one for one in IterateCase(model_folder, only_folder=True, verbose=0)
    ]
    cv_pred_list, cv_label_list = [], []
    for cv_index, cv_folder in enumerate(cv_folder_list):
        model = ResNeXtOneInput(1, 2).to(device)
        if weights_list is None:
            one_fold_weights_list = [
                one
                for one in IterateCase(cv_folder, only_folder=False, verbose=0)
                if one.is_file()
            ]
            one_fold_weights_list = sorted(
                one_fold_weights_list, key=lambda x: os.path.getctime(str(x)))
            weights_path = one_fold_weights_list[-1]
        else:
            weights_path = weights_list[cv_index]

        print(weights_path.name)
        model.load_state_dict(torch.load(str(weights_path)))

        pred_list, label_list = [], []
        if cv_index == 0:
            case_list = []
        model.eval()
        for index, case_name in enumerate(case_name_list):
            ct_case = os.path.join(ct_folder, '{}.npy'.format(case_name))
            atten_case = os.path.join(atten_folder, '{}.npy'.format(case_name))
            label = torch.tensor(info.loc[case_name]['label'])

            ct, _ = ExtractBlock(np.load(ct_case),
                                 patch_size=input_shape,
                                 center_point=[-1, -1, -1])
            atten, _ = ExtractBlock(np.load(atten_case),
                                    patch_size=input_shape,
                                    center_point=[-1, -1, -1])
            ct = ct.astype(np.float32)
            atten = atten.astype(np.float32)

            inputs = [
                torch.from_numpy(ct[np.newaxis, ...]),
                torch.from_numpy(atten[np.newaxis, ...])
            ]

            inputs = MoveTensorsToDevice(inputs, device)
            outputs = MoveTensorsToDevice(label, device)

            model_pred = model(*inputs)
            preds = model_pred[:, 1]
            # pred_list.extend((1 - preds).cpu().data.numpy().squeeze().tolist())
            # label_list.extend((1 - outputs).cpu().data.numpy().astype(int).squeeze().tolist())
            # fcn_out_list.extend(model_pred[1].cpu().data.numpy().squeeze().tolist())

            pred_list.append((preds).cpu().data.numpy())
            label_list.append((outputs).cpu().data.numpy().astype(int))
            if cv_index == 0:
                case_list.append(case_name)

        # bc.Run(pred_list, label_list)

        cv_pred_list.append(pred_list)
        cv_label_list.append(label_list)
        # cv_fcn_out_list.append(fcn_out_list)

        del model, weights_path

    cv_pred = np.squeeze(np.array(cv_pred_list))
    cv_label = np.squeeze(np.array(cv_label_list))
    mean_pred = np.mean(cv_pred, axis=0)
    mean_label = np.mean(cv_label, axis=0)

    if not os.path.exists(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result'):
        os.mkdir(r'/home/zhangyihong/Documents/Kindey901/Model/Result')

    if data_type == 'test':
        np.save(r'/home/zhangyihong/Documents/Kindey901/Model/Result/test.npy',
                mean_pred)
        np.save(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/test_label.npy',
            mean_label)
        df = pd.DataFrame({
            'CaseName': case_list,
            'label': mean_label,
            'pred': mean_pred
        })
        df.to_csv(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/test_result.csv',
            index=False)
    else:
        np.save(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/train.npy',
            mean_pred)
        np.save(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/train_label.npy',
            mean_label)
        df = pd.DataFrame({
            'CaseName': case_list,
            'label': mean_label,
            'pred': mean_pred
        })
        df.to_csv(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/train_result.csv',
            index=False)

    print(mean_label)
    bc.Run(mean_pred.tolist(), mean_label.astype(int).tolist())
コード例 #6
0
ファイル: test.py プロジェクト: Cherishzyh/ProstrateECE
def ROCofModels(weights_list=None, data_type=['alltrain', 'test']):
    from Metric.classification_statistics import get_auc, draw_roc
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    input_shape = (192, 192)
    batch_size = 2
    model_folder = model_root + '/ResNeXt_CBAM_CV_20200814'
    bc = BinaryClassification()
    fpr_list, tpr_list, auc_list = [], [], []
    for type in data_type:
        spliter = DataSpliter()
        # sub_list = spliter.LoadName(data_root / '{}-name.csv'.format(data_type), contain_label=True)
        sub_list = spliter.LoadName(data_root + '/{}-name.csv'.format(type))

        if type == 'test':
            data = DataManager(sub_list=sub_list)
            data.AddOne(Image2D(data_root + '/T2Slice/Test',
                                shape=input_shape))
            data.AddOne(
                Image2D(data_root + '/AdcSlice/Test', shape=input_shape))
            data.AddOne(
                Image2D(data_root + '/DwiSlice/Test', shape=input_shape))
            data.AddOne(
                Image2D(data_root + '/DistanceMap/Test',
                        shape=input_shape,
                        is_roi=True))
            data.AddOne(Label(data_root + '/label.csv', label_tag='Negative'),
                        is_input=False)
            data_loader = DataLoader(data,
                                     batch_size=batch_size,
                                     shuffle=False)
        else:
            data = DataManager(sub_list=sub_list)
            data.AddOne(Image2D(data_root + '/T2Slice', shape=input_shape))
            data.AddOne(Image2D(data_root + '/AdcSlice', shape=input_shape))
            data.AddOne(Image2D(data_root + '/DwiSlice/', shape=input_shape))
            data.AddOne(
                Image2D(data_root + '/DistanceMap',
                        shape=input_shape,
                        is_roi=True))
            data.AddOne(Label(data_root + '/label.csv', label_tag='Negative'),
                        is_input=False)
            data_loader = DataLoader(data,
                                     batch_size=batch_size,
                                     shuffle=False)

        cv_folder_list = [
            one
            for one in IterateCase(model_folder, only_folder=True, verbose=0)
        ]
        cv_pred_list, cv_label_list = [], []

        for cv_index, cv_folder in enumerate(cv_folder_list):
            model = ResNeXt(3, 2).to(device)
            if weights_list is None:
                one_fold_weights_list = [
                    one for one in IterateCase(
                        cv_folder, only_folder=False, verbose=0)
                    if one.is_file()
                ]
                one_fold_weights_list = sorted(
                    one_fold_weights_list,
                    key=lambda x: os.path.getctime(str(x)))
                weights_path = one_fold_weights_list[-1]
            else:
                weights_path = weights_list[cv_index]

            print(weights_path.name)
            model.load_state_dict(torch.load(str(weights_path)))

            pred_list, label_list = [], []
            model.eval()
            for inputs, outputs in data_loader:
                inputs = MoveTensorsToDevice(inputs, device)
                outputs = MoveTensorsToDevice(outputs, device)

                preds = model(*inputs)[:, 1]
                pred_list.extend(
                    (1 - preds).cpu().data.numpy().squeeze().tolist())
                label_list.extend((
                    1 -
                    outputs).cpu().data.numpy().astype(int).squeeze().tolist())
                # pred_list.extend((preds).cpu().data.numpy().squeeze().tolist())
                # label_list.extend((outputs).cpu().data.numpy().astype(int).squeeze().tolist())

            fpr, tpr, auc = get_auc(pred_list, label_list)
            fpr_list.append(fpr)
            tpr_list.append(tpr)
            auc_list.append(auc)

            cv_pred_list.append(pred_list)
            cv_label_list.append(label_list)

            del model, weights_path

        cv_pred = np.array(cv_pred_list)
        cv_label = np.array(cv_label_list)
        mean_pred = np.mean(cv_pred, axis=0)
        mean_label = np.mean(cv_label, axis=0)

        fpr, tpr, auc = get_auc(mean_pred, mean_label)
        fpr_list.append(fpr)
        tpr_list.append(tpr)
        auc_list.append(auc)

    # draw_roc(fpr_list, tpr_list, auc_list, name_list=['cv0', 'cv1', 'cv2', 'cv3', 'cv4', 'alltrian'])
    name_list = [
        'model1', 'model2', 'model3', 'model4', 'model5', 'model combined'
    ]
    for idx in range(len(fpr_list)):
        label = name_list[idx] + ': ' + '%.3f' % auc_list[idx]
        plt.plot(fpr_list[idx], tpr_list[idx], label=label)

    plt.plot([0, 1], [0, 1], '--', color='r')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.legend()
    plt.show()
コード例 #7
0
ファイル: test.py プロジェクト: Cherishzyh/ProstrateECE
def EnsembleInference(data_type, weights_list=None):
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    input_shape = (192, 192)
    batch_size = 20
    model_folder = model_root + '/PAGNet0429'
    bc = BinaryClassification()

    spliter = DataSpliter()
    sub_list = spliter.LoadName(data_root + '/{}-name.csv'.format(data_type))

    # if data_type == 'test':
    #     data = DataManager()
    #     data.AddOne(Image2D(data_root + '/Test/T2Slice', shape=input_shape))
    #     data.AddOne(Image2D(data_root + '/Test/AdcSlice', shape=input_shape))
    #     data.AddOne(Image2D(data_root + '/Test/DwiSlice', shape=input_shape))
    #     data.AddOne(Image2D(data_root + '/Test/DistanceMap', shape=input_shape, is_roi=True))
    #     data.AddOne(Label(data_root + '/ece.csv'), is_input=False)
    #     data_loader = DataLoader(data, batch_size=batch_size, shuffle=False)
    # else:
    #     data = DataManager()
    #     data.AddOne(Image2D(data_root + '/Train/T2Slice', shape=input_shape))
    #     data.AddOne(Image2D(data_root + '/Train/AdcSlice', shape=input_shape))
    #     data.AddOne(Image2D(data_root + '/Train/DwiSlice/', shape=input_shape))
    #     data.AddOne(Image2D(data_root + '/Train/DistanceMap', shape=input_shape, is_roi=True))
    #     data.AddOne(Label(data_root + '/ece.csv'), is_input=False)
    #     data_loader = DataLoader(data, batch_size=batch_size, shuffle=False)

    data = DataManager(sub_list=sub_list)
    data.AddOne(Image2D(data_root + '/ct_slice', shape=input_shape))
    data.AddOne(
        Image2D(data_root + '/atten_slice', shape=input_shape, is_roi=True))
    data.AddOne(Label(data_root + '/label.csv'), is_input=False)
    data_loader = DataLoader(data, batch_size=batch_size, shuffle=False)

    cv_folder_list = [
        one for one in IterateCase(model_folder, only_folder=True, verbose=0)
    ]
    cv_pred_list, cv_label_list = [], []
    cv_fcn_out_list = []
    for cv_index, cv_folder in enumerate(cv_folder_list):
        model = ResNeXtOneInput(1, 2).to(device)
        if weights_list is None:
            one_fold_weights_list = [
                one
                for one in IterateCase(cv_folder, only_folder=False, verbose=0)
                if one.is_file()
            ]
            one_fold_weights_list = sorted(
                one_fold_weights_list, key=lambda x: os.path.getctime(str(x)))
            weights_path = one_fold_weights_list[-1]
        else:
            weights_path = weights_list[cv_index]

        print(weights_path.name)
        model.load_state_dict(torch.load(str(weights_path)))

        pred_list, label_list = [], []
        fcn_out_list = []
        model.eval()
        for inputs, outputs in data_loader:
            inputs = MoveTensorsToDevice(inputs, device)
            outputs = MoveTensorsToDevice(outputs, device)

            model_pred = model(*inputs)
            preds = model_pred[:, 1]
            # pred_list.extend((1 - preds).cpu().data.numpy().squeeze().tolist())
            # label_list.extend((1 - outputs).cpu().data.numpy().astype(int).squeeze().tolist())
            # fcn_out_list.extend(model_pred[1].cpu().data.numpy().squeeze().tolist())

            pred_list.extend((preds).cpu().data.numpy().squeeze().tolist())
            label_list.extend(
                (outputs).cpu().data.numpy().astype(int).squeeze().tolist())

        # bc.Run(pred_list, label_list)

        cv_pred_list.append(pred_list)
        cv_label_list.append(label_list)
        # cv_fcn_out_list.append(fcn_out_list)

        del model, weights_path

    cv_pred = np.array(cv_pred_list)
    cv_label = np.array(cv_label_list)
    # cv_fcn = np.array(cv_fcn_out_list)
    mean_pred = np.mean(cv_pred, axis=0)
    mean_label = np.mean(cv_label, axis=0)
    # mean_fcn_out = np.mean(cv_fcn, axis=0)
    # if data_type == 'test':
    #     np.save(r'/home/zhangyihong/Documents/ProstateECE/NPYMaxPred/Result/PAGNet_test.npy', mean_pred)
    #     np.save(r'/home/zhangyihong/Documents/ProstateECE/NPYMaxPred/Result/PAGNet_test_label.npy', mean_label)
    # else:
    #     np.save(r'/home/zhangyihong/Documents/ProstateECE/NPYMaxPred/Result/PAGNet_train.npy', mean_pred)
    #     np.save(r'/home/zhangyihong/Documents/ProstateECE/NPYMaxPred/Result/PAGNet_label.npy', mean_label)
    # os.mkdir(r'/home/zhangyihong/Documents/Kindey901/Model/Result')
    if data_type == 'test':
        np.save(r'/home/zhangyihong/Documents/Kindey901/Model/Result/test.npy',
                mean_pred)
        np.save(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/test_label.npy',
            mean_label)
    else:
        np.save(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/train.npy',
            mean_pred)
        np.save(
            r'/home/zhangyihong/Documents/Kindey901/Model/Result/train_label.npy',
            mean_label)

    print(mean_label)
    bc.Run(mean_pred.tolist(), mean_label.astype(int).tolist())