all_experiments = list_folders(exp_folders)[-1:]
else:
    all_experiments = list_folders(exp_folders)
for i_exp, exp in enumerate(all_experiments):
    print("Curr. exp: ", exp, "(", i_exp + 1, "/", len(all_experiments), ")")
    for gt_curr in gt_list:
        experiment_folder = jp(exp_folders, exp)
        #gt_folder = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\isbi_train'
        gt_name = gt_curr
        patients = list_folders(gt_folder)
        post_processing = True
        post_processing_type = 'remove_small'
        min_area = 3
        folds = list_folders(experiment_folder)

        labels_for_df = compute_metrics(None, None, labels_only=True)
        global_df = pd.DataFrame(columns=labels_for_df)
        cnt_global = 0
        for gt_patient in patients:  #For every patient
            print("Current patient: ", gt_patient)
            patient_df = pd.DataFrame(columns=labels_for_df)
            gt_timepoints = list_files_with_name_containing(
                jp(gt_folder, gt_patient), gt_name, "nii.gz")
            cnt_patient = 0
            for i_timepoint in range(len(gt_timepoints)):
                print("Current timepoint: ", i_timepoint + 1, "/",
                      len(gt_timepoints))
                curr_gt = gt_timepoints[i_timepoint]
                curr_gt_img = nib.load(curr_gt).get_fdata().astype(np.uint8)
                curr_pred = jp(
                    experiment_folder, "fold" + gt_patient, "results",
        init = 1
    for i in range(init,num_backward):
        list_inference_slices.append(all_slices[:,i:i+desired_timepoints,:,:,:])
        if(num_timepoints%2==0): #even
            indexes.append(i+1)
        else:
            indexes.append(i)
    
    sorted_indexes = np.argsort(indexes)
    sorted_list_inference_slices = [list_inference_slices[k] for k in sorted_indexes]

    return sorted_list_inference_slices


#Evaluate all images
columns = compute_metrics(None, None, labels_only=True)
df = pd.DataFrame(columns = columns)

test_images = list_folders(path_test)
i_row=0
cnt=0
for case in test_images:
    print(cnt+1, "/", len(test_images))
    scan_path = jp(path_test, case)
    inf_slices = get_inference_slices_time( the_path=path_test,
                                            the_case=case,
                                            input_data=options['input_data'], 
                                            normalize=True, 
                                            out_size = (160,200),
                                            norm_type = 'zero_one')
    
예제 #3
0
import os
from os.path import join as jp
import pandas as pd
import numpy as np
import nibabel as nib
import cc3d
from ms_segmentation.general.general import list_folders, list_files_with_name_containing, list_files_with_extension
from ms_segmentation.evaluation.metrics import compute_metrics

path_experiment = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\cross_validation\CROSS_VALIDATION_UNet3D_2020-06-25_07_07_15[chi-square_norm_train]'

masks =['mask1', 'mask2']

csv_files = list_files_with_extension(path_experiment, 'csv')

labels_for_df = compute_metrics(None, None, labels_only = True)
df_std = pd.DataFrame(columns = labels_for_df)
cnt = 0

for m in masks:
    f = [fi for fi in csv_files if m in fi][0]
    df = pd.read_csv(jp(path_experiment,f))[:-1] # ignore last row which is the average
    df_std.loc[cnt] = list(df.std())
    cnt += 1

df_std.to_csv(jp(path_experiment, 'std_mask1_mask2.csv'), float_format = '%.5f', index = False)
예제 #4
0
    'CROSS_VALIDATION_UNet3D_2020-06-25_07_07_15[chi-square_norm_train]'
]
exp_l = [
    'CROSS_VALIDATION_UNetConvLSTM3D_2020-06-23_21_31_39[longitudinal_chisquare_normalization_new]',
    'CROSS_VALIDATION_UNetConvLSTM3D_2020-06-13_08_43_21[all_modalities_no_hist_matching_new]'
]
metrics = ["DSC", "LFPR", "LTPR"]

cases = {
    "CS (min-max norm.)": jp(path_cs, exp_cs[0]),
    "CS (proposed norm.)": jp(path_cs, exp_cs[0]),
    "L (min-max norm.)": jp(path_l, exp_l[0]),
    "L (proposed norm.)": jp(path_l, exp_l[1])
}

col_names = compute_metrics(None, None, labels_only=True)
#total_df = pd.DataFrame(columns = col_names)

for i_case, (c_name, c_path) in enumerate(cases.items()):
    df = pd.read_csv(jp(c_path, "all_results_postprocessed_mask1.csv"))[:-1]
    built = dc(df)
    for i_m, m in enumerate(metrics):
        if i_m == 0:
            built["metric"] = m
            built["metric value"] = df[m]
        else:
            temp_df = dc(df)
            temp_df["metric"] = m
            temp_df["metric value"] = temp_df[m]
            built = built.append(temp_df)
    built["case"] = c_name
import os
import pandas as pd
import nibabel as nib
import numpy as np
from os.path import join as jp
from ms_segmentation.general.general import list_folders, list_files_with_extension
from ms_segmentation.evaluation.metrics import compute_metrics

reference = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_cs\qwertz06'
to_compare = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_cs\qwertz09'
comparison_id = reference.split("\\")[-1] + "__" + to_compare.split("\\")[-1]
save_path = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images'

columns = compute_metrics(None, None, labels_only=True)
df = pd.DataFrame(columns=columns)

list_ref = list_files_with_extension(reference, "nii")
list_to_compare = list_files_with_extension(to_compare, "nii")

assert len(list_ref) == len(list_to_compare)
i_row = 0
for i, img in enumerate(list_ref):
    print(i + 1, "/", len(list_ref))
    ref_labels = nib.load(jp(reference, img)).get_fdata().astype(np.uint8)
    to_compare_labels = nib.load(jp(to_compare,
                                    img)).get_fdata().astype(np.uint8)

    metrics = compute_metrics(ref_labels, to_compare_labels)
    df.loc[i_row] = list(metrics.values())
    i_row += 1