Beispiel #1
0
def convert_all_to_BraTS(input_folder, output_folder, expected_num_cases=125):
    for s in subdirs(input_folder, join=False):
        nii = subfiles(join(input_folder, s), suffix='.nii.gz', join=False)
        if len(nii) != expected_num_cases:
            print(s)
        else:
            target_dir = join(output_folder, s)
            convert_labels_back_to_BraTS_2018_2019_convention(join(input_folder, s), target_dir, num_processes=6)
Beispiel #2
0
def score_and_postprocess_model_based_on_rank_then_aggregate():
    """
    Similarly to BraTS 2017 - BraTS 2019, each participant will be ranked for each of the X test cases. Each case
    includes 3 regions of evaluation, and the metrics used to produce the rankings will be the Dice Similarity
    Coefficient and the 95% Hausdorff distance. Thus, for X number of cases included in the BraTS 2020, each
    participant ends up having X*3*2 rankings. The final ranking score is the average of all these rankings normalized
    by the number of teams.
    https://zenodo.org/record/3718904

    -> let's optimize for this.

    Important: the outcome very much depends on the competing models. We need some references. We only got our own,
    so let's hope this still works
    :return:
    """
    base = "/media/fabian/Results/nnUNet/3d_fullres/Task082_BraTS2020"
    replace_with = 2
    num_processes = 24
    expected_num_cases_val = 125

    # use a separate output folder from the previous experiments to ensure we are not messing things up
    output_base_here = join(base, 'use_brats_ranking')
    maybe_mkdir_p(output_base_here)

    # collect cv niftis and compute metrics with evaluate_BraTS_folder to ensure we work with the same metrics as brats
    out = join(output_base_here, 'cv_results')
    experiments = subfolders(base, join=False, prefix='nnUNetTrainer')
    gt_dir = join(base, 'gt_niftis')

    experiments_with_full_cv = []
    for e in experiments:
        print(e)
        o = join(out, e)
        maybe_mkdir_p(o)
        try:
            collect_cv_niftis(join(base, e), o)
            if not isfile(join(o, 'results.csv')):
                evaluate_BraTS_folder(o, gt_dir, num_processes, strict=True)
            experiments_with_full_cv.append(e)
        except Exception as ex:
            print("\nERROR\n", e, ex, "\n")
            if isfile(join(o, 'results.csv')):
                os.remove(join(o, 'results.csv'))

    # rank the non-postprocessed models
    tmp = np.loadtxt(join(out, experiments_with_full_cv[0], 'results.csv'), dtype='str', delimiter=',')
    num_cases = len(tmp) - 1
    data_for_ranking = np.zeros((6, len(experiments_with_full_cv), num_cases))
    for i, e in enumerate(experiments_with_full_cv):
        scores = load_csv_for_ranking(join(out, e, 'results.csv'))
        for metric in range(6):
            data_for_ranking[metric, i] = scores[:, metric]

    final_ranks, average_rank, ranks = rank_algorithms(data_for_ranking)

    for t in np.argsort(final_ranks):
        print(final_ranks[t], average_rank[t], experiments_with_full_cv[t])

    # for each model, create output directories with different thresholds. evaluate ALL OF THEM (might take a while lol)
    thresholds = np.arange(25, 751, 25)
    output_pp_tmp = join(output_base_here, 'cv_determine_pp_thresholds')
    for e in experiments_with_full_cv:
        input_folder = join(out, e)
        for t in thresholds:
            output_directory = join(output_pp_tmp, e, str(t))
            maybe_mkdir_p(output_directory)
            if not isfile(join(output_directory, 'results.csv')):
                apply_threshold_to_folder(input_folder, output_directory, t, replace_with, processes=16)
                evaluate_BraTS_folder(output_directory, gt_dir, num_processes)

    # load ALL the results!
    results = []
    experiment_names = []
    for e in experiments_with_full_cv:
        for t in thresholds:
            output_directory = join(output_pp_tmp, e, str(t))
            expected_file = join(output_directory, 'results.csv')
            if not isfile(expected_file):
                print(e, 'does not have a results file for threshold', t)
                continue
            results.append(load_csv_for_ranking(expected_file))
            experiment_names.append("%s___%d" % (e, t))
    all_results = np.concatenate([i[None] for i in results], 0).transpose((2, 0, 1))

    # concatenate with non postprocessed models
    all_results = np.concatenate((data_for_ranking, all_results), 1)
    experiment_names += experiments_with_full_cv

    final_ranks, average_rank, ranks = rank_algorithms(all_results)

    for t in np.argsort(final_ranks):
        print(final_ranks[t], average_rank[t], experiment_names[t])

    # for each model, print the non postprocessed model as well as the best postprocessed model. If there are
    # validation set predictions, apply the best threshold to the validation set
    pred_val_base = join(base, 'predVal_PP_rank')
    has_val_pred = []
    for e in experiments_with_full_cv:
        rank_nonpp = final_ranks[experiment_names.index(e)]
        avg_rank_nonpp = average_rank[experiment_names.index(e)]
        print(e, avg_rank_nonpp, rank_nonpp)
        predicted_val = join(base, 'predVal', e)

        pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
        if len(pp_models) > 0:
            ranks = [final_ranks[i] for i in pp_models]
            best_idx = np.argmin(ranks)
            best = experiment_names[pp_models[best_idx]]
            best_avg_rank = average_rank[pp_models[best_idx]]
            print(best, best_avg_rank, min(ranks))
            print('')
            # apply threshold to validation set
            best_threshold = int(best.split('___')[-1])
            if not isdir(predicted_val):
                print(e, 'has not valset predictions')
            else:
                files = subfiles(predicted_val, suffix='.nii.gz')
                if len(files) != expected_num_cases_val:
                    print(e, 'has missing val cases. found: %d expected: %d' % (len(files), expected_num_cases_val))
                else:
                    apply_threshold_to_folder(predicted_val, join(pred_val_base, e), best_threshold, replace_with, num_processes)
                    has_val_pred.append(e)
        else:
            print(e, 'not found in ranking')

    # apply nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold
    e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
    pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
    ranks = [final_ranks[i] for i in pp_models]
    best_idx = np.argmin(ranks)
    best = experiment_names[pp_models[best_idx]]
    best_avg_rank = average_rank[pp_models[best_idx]]
    best_threshold = int(best.split('___')[-1])
    predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
    apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
    has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')

    # apply nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold
    e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
    pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
    ranks = [final_ranks[i] for i in pp_models]
    best_idx = np.argmin(ranks)
    best = experiment_names[pp_models[best_idx]]
    best_avg_rank = average_rank[pp_models[best_idx]]
    best_threshold = int(best.split('___')[-1])
    predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
    apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
    has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')

    # convert valsets
    output_converted = join(base, 'converted_valSet')
    for e in has_val_pred:
        expected_source_folder = join(base, 'predVal_PP_rank', e)
        if not isdir(expected_source_folder):
            print(e, 'has no predVal_PP_rank')
            raise RuntimeError()
        files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
        if len(files) != expected_num_cases_val:
            print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases_val))
            continue
        target_folder = join(output_converted, 'predVal_PP_rank', e)
        maybe_mkdir_p(target_folder)
        convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)

    # now load all the csvs for the validation set (obtained from evaluation platform) and rank our models on the
    # validation set
    flds = subdirs(output_converted, join=False)
    results_valset = []
    names_valset = []
    for f in flds:
        curr = join(output_converted, f)
        experiments = subdirs(curr, join=False)
        for e in experiments:
            currr = join(curr, e)
            expected_file = join(currr, 'Stats_Validation_final.csv')
            if not isfile(expected_file):
                print(f, e, "has not been evaluated yet!")
            else:
                res = load_csv_for_ranking(expected_file)[:-5]
                assert res.shape[0] == expected_num_cases_val
                results_valset.append(res[None])
                names_valset.append("%s___%s" % (f, e))
    results_valset = np.concatenate(results_valset, 0)  # experiments x cases x metrics
    # convert to metrics x experiments x cases
    results_valset = results_valset.transpose((2, 0, 1))
    final_ranks, average_rank, ranks = rank_algorithms(results_valset)
    for t in np.argsort(final_ranks):
        print(final_ranks[t], average_rank[t], names_valset[t])
Beispiel #3
0
def collect_and_prepare(base_dir, num_processes = 12, clean=False):
    """
    collect all cv_niftis, compute brats metrics, compute enh tumor thresholds and summarize in csv
    :param base_dir:
    :return:
    """
    out = join(base_dir, 'cv_results')
    out_pp = join(base_dir, 'cv_results_pp')
    experiments = subfolders(base_dir, join=False, prefix='nnUNetTrainer')
    regions = get_brats_regions()
    gt_dir = join(base_dir, 'gt_niftis')
    replace_with = 2

    failed = []
    successful = []
    for e in experiments:
        print(e)
        try:
            o = join(out, e)
            o_p = join(out_pp, e)
            maybe_mkdir_p(o)
            maybe_mkdir_p(o_p)
            collect_cv_niftis(join(base_dir, e), o)
            if clean or not isfile(join(o, 'summary.csv')):
                evaluate_regions(o, gt_dir, regions, num_processes)
            if clean or not isfile(join(o_p, 'threshold.pkl')):
                determine_brats_postprocessing(o, gt_dir, o_p, num_processes, thresholds=list(np.arange(0, 760, 10)), replace_with=replace_with)
            if clean or not isfile(join(o_p, 'summary.csv')):
                evaluate_regions(o_p, gt_dir, regions, num_processes)
            successful.append(e)
        except Exception as ex:
            print("\nERROR\n", e, ex, "\n")
            failed.append(e)

    # we are interested in the mean (nan is 1) column
    with open(join(base_dir, 'cv_summary.csv'), 'w') as f:
        f.write('name,whole,core,enh,mean\n')
        for e in successful:
            expected_nopp = join(out, e, 'summary.csv')
            expected_pp = join(out, out_pp, e, 'summary.csv')
            if isfile(expected_nopp):
                res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
                as_numeric = [float(i) for i in res[1:]]
                f.write(e + '_noPP,')
                f.write("%0.4f," % as_numeric[0])
                f.write("%0.4f," % as_numeric[1])
                f.write("%0.4f," % as_numeric[2])
                f.write("%0.4f\n" % np.mean(as_numeric))
            if isfile(expected_pp):
                res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
                as_numeric = [float(i) for i in res[1:]]
                f.write(e + '_PP,')
                f.write("%0.4f," % as_numeric[0])
                f.write("%0.4f," % as_numeric[1])
                f.write("%0.4f," % as_numeric[2])
                f.write("%0.4f\n" % np.mean(as_numeric))

    # this just crawls the folders and evaluates what it finds
    with open(join(base_dir, 'cv_summary2.csv'), 'w') as f:
        for folder in ['cv_results', 'cv_results_pp']:
            for ex in subdirs(join(base_dir, folder), join=False):
                print(folder, ex)
                expected = join(base_dir, folder, ex, 'summary.csv')
                if clean or not isfile(expected):
                    evaluate_regions(join(base_dir, folder, ex), gt_dir, regions, num_processes)
                if isfile(expected):
                    res = np.loadtxt(expected, dtype=str, skiprows=0, delimiter=',')[-2]
                    as_numeric = [float(i) for i in res[1:]]
                    f.write('%s__%s,' % (folder, ex))
                    f.write("%0.4f," % as_numeric[0])
                    f.write("%0.4f," % as_numeric[1])
                    f.write("%0.4f," % as_numeric[2])
                    f.write("%0.4f\n" % np.mean(as_numeric))

        f.write('name,whole,core,enh,mean\n')
        for e in successful:
            expected_nopp = join(out, e, 'summary.csv')
            expected_pp = join(out, out_pp, e, 'summary.csv')
            if isfile(expected_nopp):
                res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
                as_numeric = [float(i) for i in res[1:]]
                f.write(e + '_noPP,')
                f.write("%0.4f," % as_numeric[0])
                f.write("%0.4f," % as_numeric[1])
                f.write("%0.4f," % as_numeric[2])
                f.write("%0.4f\n" % np.mean(as_numeric))
            if isfile(expected_pp):
                res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
                as_numeric = [float(i) for i in res[1:]]
                f.write(e + '_PP,')
                f.write("%0.4f," % as_numeric[0])
                f.write("%0.4f," % as_numeric[1])
                f.write("%0.4f," % as_numeric[2])
                f.write("%0.4f\n" % np.mean(as_numeric))

    # apply threshold to val set
    expected_num_cases = 125
    missing_valset = []
    has_val_pred = []
    for e in successful:
        if isdir(join(base_dir, 'predVal', e)):
            currdir = join(base_dir, 'predVal', e)
            files = subfiles(currdir, suffix='.nii.gz', join=False)
            if len(files) != expected_num_cases:
                print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
                continue
            output_folder = join(base_dir, 'predVal_PP', e)
            maybe_mkdir_p(output_folder)
            threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
            if threshold > 1000: threshold = 750  # don't make it too big!
            apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
            has_val_pred.append(e)
        else:
            print(e, 'has no valset predictions')
            missing_valset.append(e)

    # 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
    e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
    currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
    output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
    maybe_mkdir_p(output_folder)
    threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
    if threshold > 1000: threshold = 750  # don't make it too big!
    apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)

    # 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
    e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
    currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
    output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
    maybe_mkdir_p(output_folder)
    threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
    if threshold > 1000: threshold = 750  # don't make it too big!
    apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)

    # convert val set to brats labels for submission
    output_converted = join(base_dir, 'converted_valSet')

    for source in ['predVal', 'predVal_PP']:
        for e in has_val_pred + ['nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold']:
            expected_source_folder = join(base_dir, source, e)
            if not isdir(expected_source_folder):
                print(e, 'has no', source)
                raise RuntimeError()
            files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
            if len(files) != expected_num_cases:
                print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
                continue
            target_folder = join(output_converted, source, e)
            maybe_mkdir_p(target_folder)
            convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)

    summarize_validation_set_predictions(output_converted)