def get_result_name(the_paths, the_base):
    accum = 0
    for p in the_paths:
        num_ensembles = len(list_folders(jp(p, "ensembles")))
        num_non_ensembles = len(list_folders(p)) - 1 # minus ensembles folder
        total = num_ensembles + num_non_ensembles
        accum += total
    new_name = the_base + str(accum+1).zfill(2)
    return new_name
import pandas as pd
import numpy as np
import nibabel as nib
import cc3d
from ms_segmentation.general.general import list_folders, list_files_with_name_containing
from ms_segmentation.evaluation.metrics import compute_metrics

last_only = True

exp_folders = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\cross_validation'
gt_folder = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\isbi_train'

gt_list = ['mask1', 'mask2']

if last_only:
    all_experiments = list_folders(exp_folders)[-1:]
else:
    all_experiments = list_folders(exp_folders)
for i_exp, exp in enumerate(all_experiments):
    print("Curr. exp: ", exp, "(", i_exp + 1, "/", len(all_experiments), ")")
    for gt_curr in gt_list:
        experiment_folder = jp(exp_folders, exp)
        #gt_folder = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\isbi_train'
        gt_name = gt_curr
        patients = list_folders(gt_folder)
        post_processing = True
        post_processing_type = 'remove_small'
        min_area = 3
        folds = list_folders(experiment_folder)

        labels_for_df = compute_metrics(None, None, labels_only=True)
import os
import cv2 as cv
import numpy as np
import nibabel as nib
from os.path import join as jp
from ms_segmentation.general.general import save_image, list_folders

path_base = r'D:\dev\s.tasconmorales\feature_maps'
experiments = list_folders(path_base)
batch_size = 32
patch_slice = 16

all_fm = {}
for exp in experiments:
    all_fm[exp] = []
    for i in range(batch_size):
        all_fm[exp].append(
            nib.load(
                jp(path_base, exp, "inc_batch000_fm" + str(i).zfill(3) +
                   ".nii.gz")).get_fdata()[:, :, patch_slice])

for k, v in all_fm.items():
    all_fm[k] = np.concatenate(v, axis=1)

tot = np.concatenate(list(all_fm.values()), axis=0)


def bring_to_256_levels(input_im):
    return (255 * (input_im - np.min(input_im)) /
            (np.max(input_im) - np.min(input_im))).astype(np.uint8)
import numpy as np 
import nibabel as nib 
import os
import shutil
from os.path import join as jp
from ms_segmentation.general.general import list_folders, list_files_with_name_containing, create_folder

path_base = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images'
path_cs = jp(path_base, 'cross_sectional')
path_origin = jp(path_base, 'orig')
patients = list_folders(path_origin)

case_index = 1

for i_pat, pat in enumerate(patients):
    print("Patient: ", i_pat+1, "/", len(patients))
    path_pat = jp(path_origin, pat, "preprocessed")
    flair_images = list_files_with_name_containing(path_pat, "flair", "nii")
    mprage_images = list_files_with_name_containing(path_pat, "mprage", "nii")
    pd_images = list_files_with_name_containing(path_pat, "pd", "nii")
    t2_images = list_files_with_name_containing(path_pat, "t2", "nii")

    #check that all modalities have the same number of images
    assert len(flair_images) == len(mprage_images) == len(pd_images) == len(t2_images)

    #create patient folder
    create_folder(jp(path_cs, str(case_index).zfill(2)))

    for i_img in range(len(flair_images)):
        print("Timepoint: ", i_img+1)
        # create timepoint folder
Esempio n. 5
0
import os
from os.path import join as jp
import numpy as np
import SimpleITK as sitk
from ms_segmentation.general.general import list_folders

path_data = r'D:\dev\ms_data\Preprocessed-AnonymPatData'
cases_list = list_folders(path_data)
for case in cases_list:
    print("Current case: ", case)
    tps = list_folders(jp(path_data, case))
    for tp in tps:
        files_list = os.listdir(jp(path_data, case, tp))
        for file_ in files_list:
            if "gz" in file_:
                continue
            # Read image
            img_itk = sitk.ReadImage(jp(path_data, case, tp, file_))
            # Save with new format
            sitk.WriteImage(img_itk, jp(path_data, case, tp, file_ + ".gz"))
            # Build brain image from FLAIR image (simple thresholding)
            if "flair" in file_ and "brain_mask.nii.gz" not in files_list:
                flair_np = sitk.GetArrayFromImage(img_itk)
                mask = flair_np > 0
                mask_itk = sitk.GetImageFromArray(mask.astype(np.uint8))
                mask_itk.CopyInformation(img_itk)
                sitk.WriteImage(mask_itk,
                                jp(path_data, case, tp, "brain_mask.nii.gz"))

            os.remove(jp(path_data, case, tp, file_))
import os
import nibabel as nib
import numpy as np
from os.path import join as jp
from ms_segmentation.general.general import save_image, list_folders, list_files_with_name_containing
from ms_segmentation.data_generation.patch_manager_3d import normalize_data

path_base = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\cross_sectional'
cross_sectional = True

patients = list_folders(path_base)
if cross_sectional:
    for pat in patients:
        print("Patient ", pat)
        tps = list_folders(jp(path_base, pat))
        for tp in tps:
            print("TP: ", tp)
            flair = normalize_data(
                nib.load(jp(path_base, pat, tp, 'flair.nii.gz')).get_fdata())
            t2 = normalize_data(
                nib.load(jp(path_base, pat, tp, 't2.nii.gz')).get_fdata())
            pd = normalize_data(
                nib.load(jp(path_base, pat, tp, 'pd.nii.gz')).get_fdata())
            t1 = normalize_data(
                nib.load(jp(path_base, pat, tp, 'mprage.nii.gz')).get_fdata())
            t1_inv = normalize_data(np.max(t1) - t1)

            t2_flair = normalize_data(flair * t2)
            pd_flair = normalize_data(flair * pd)
            t1_inv_flair = normalize_data(flair * t1_inv)
            what = normalize_data(flair * (t2 + pd + t1_inv))
import os
from os.path import join as jp
import pandas as pd
import numpy as np
import nibabel as nib
import cc3d
from ms_segmentation.general.general import list_folders, list_files_with_name_containing
from ms_segmentation.evaluation.metrics import compute_metrics

last_only = True  # evaluate only last experiment

exp_folders = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\cross_validation'
gt_list = ['mask1.nii.gz', 'mask2.nii.gz']
all_experiments = list_folders(exp_folders)
if last_only:
    all_experiments = all_experiments[-1:]
for i_exp, exp in enumerate(all_experiments):
    print("Curr. exp: ", exp, "(", i_exp + 1, "/", len(all_experiments), ")")
    for gt_curr in gt_list:
        experiment_folder = jp(exp_folders, exp)
        gt_folder = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\isbi_cs'
        gt_name = gt_curr
        patients = list_folders(gt_folder)
        post_processing = True
        post_processing_type = 'remove_small'
        min_area = 3
        folds = list_folders(experiment_folder)

        labels_for_df = compute_metrics(None, None, labels_only=True)
        global_df = pd.DataFrame(columns=labels_for_df)
        cnt_global = 0
Esempio n. 8
0
import os
import shutil
from ms_segmentation.general.general import list_folders, create_folder, cls
from os.path import join as jp

origin = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\histogram_matched'
destiny = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\histogram_matched'

to_copy = ['flair', 'pd', 'mprage', 't2', 'brain_mask', 'mask1', 'mask2']

patients = list_folders(origin)
for pat in patients:
    create_folder(jp(destiny, pat))

    #Copy all timepoints from origin to destiny
    timepoints = list_folders(jp(origin, pat))
    for tp in timepoints:
        cls()
        print("Patient: ", pat)
        print("TP: ", tp)
        for elem in to_copy:
            shutil.copyfile(jp(origin, pat, tp, elem + '.nii.gz'),
                            jp(destiny, pat, elem + '_' + tp + '.nii.gz'))
        if(num_timepoints%2==0): #even
            indexes.append(i+1)
        else:
            indexes.append(i)
    
    sorted_indexes = np.argsort(indexes)
    sorted_list_inference_patches = [list_inference_patches[k] for k in sorted_indexes]

    return sorted_list_inference_patches

# Process experiment

if experiment_type == 'cs': # if experiment is cross-sectional
    # cross-sectional
    path_exp = jp(path_experiments_cs, experiment_name)
    folds = list_folders(path_exp)

    #Read log file to get parameters
    parameters_dict = parse_log_file(jp(path_exp, folds[0])) # take file of first fold as reference

    all_folds = []
    
    for f in folds:
        if selection[f] == False:
            continue
        fold_segmentations = []
        # create model
        lesion_model = eval(parameters_dict["model_name"])(n_channels=len(eval(parameters_dict['input_data'])), n_classes=2, bilinear = False)
        lesion_model.cuda()

        # try to load the weights
options['norm_type'] = 'zero_one'
options['batch_size'] = 16
options['patience'] = 20  #Patience for the early stopping
options['gpu_use'] = True
options['num_epochs'] = 200
options['optimizer'] = 'adam'
options[
    'patch_sampling'] = 'mask'  # (mask, balanced or balanced+roi or non-uniform)
options['loss'] = 'dice'  # (dice, cross-entropy, categorical-cross-entropy)
options['resample_each_epoch'] = False

path_base = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS'
path_data = jp(path_base, 'isbi_cs')
options['path_data'] = path_data
path_res = jp(path_base, "cross_validation")
all_patients = list_folders(path_data)

if (debug):
    experiment_name = "dummy_experiment_CV_3DUNet"
else:
    experiment_name, curr_date, curr_time = get_experiment_name(
        the_prefix="CROSS_VALIDATION_UNet3D")

# Visualize feature maps
activation = {}


def get_activation(name):
    def hook(model, input, output):
        activation[name] = output.detach()
Esempio n. 11
0
## Script to rename product images that contain "flair" in their names so that when listing files they are not liste
# twice.

import os
from os.path import join as jp
from ms_segmentation.general.general import list_folders, list_files_with_name_containing

path_images = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\isbi_train'
patients = list_folders(path_images)

#changes = {'pd_times_flair': 'pd_times_fl', 't2_times_flair': 't2_times_fl', 't1_inv_times_flair': 't1_inv_times_fl', 'sum_times_flair': 'sum_times_fl'}
changes ={'pd_times_fl': 'p_times_fl','t2_times_fl': 't_two_times_fl'}
for pat in patients: # for each patient
    print("patient: ", pat)
    for k,v in changes.items():
        list_of_files = list_files_with_name_containing(jp(path_images, pat), k, "nii.gz")
        for i_tp in range(len(list_of_files)): # for each timepoint
            print("timepoint: ", i_tp+1)
            new_name = list_of_files[i_tp].replace(k, v)
            os.rename(list_of_files[i_tp], new_name)
            


Esempio n. 12
0
    its values matches the cumulative density function of the template.
    """
    src_values, src_unique_indices, src_counts = np.unique(source.ravel(),
                                                           return_inverse=True,
                                                           return_counts=True)
    tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)

    # calculate normalized quantiles for each array
    src_quantiles = np.cumsum(src_counts) / source.size
    tmpl_quantiles = np.cumsum(tmpl_counts) / template.size

    interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)
    return interp_a_values[src_unique_indices].reshape(source.shape)


patients = list_folders(path_data)

for pat in patients:  # for each patient
    print("Patient:", pat)
    timepoints = list_folders(jp(path_data, pat))
    create_folder(jp(path_new_data, pat))
    #take first point as reference
    ref_flair = normalize_data(
        nib.load(jp(path_data, pat, timepoints[0],
                    'flair.nii.gz')).get_fdata())
    ref_mprage = normalize_data(
        nib.load(jp(path_data, pat, timepoints[0],
                    'mprage.nii.gz')).get_fdata())
    ref_pd = normalize_data(
        nib.load(jp(path_data, pat, timepoints[0], 'pd.nii.gz')).get_fdata())
    ref_t2 = normalize_data(
import os
import SimpleITK as sitk
from os.path import join as jp
import pandas as pd
from ms_segmentation.general import general as misc

path_orig = r'D:\dev\ms_data\Inspiration\INSPIRATION-org'
path_proc = r'D:\dev\ms_data\Inspiration\INSPIRATION'

# first, process original images
centers = misc.list_folders(path_orig)
columns = ['Center', 'Patient', 'Time Point', 'Modality', 'Size']
df_orig = pd.DataFrame(columns=columns)
i_orig = 0
for i_c, c in enumerate(centers):  # for each study center
    patients = misc.list_folders(jp(path_orig,
                                    c))  # list of patients for center c
    for i_pat, pat in enumerate(patients):  #for every patient
        timepoints = misc.list_folders(jp(path_orig, c,
                                          pat))  #list of timepoints
        for tp in timepoints:
            modalities = misc.list_folders(jp(
                path_orig, c, pat, tp))  # List of available modalities
            for mod in modalities:
                imgs = misc.list_files_with_extension(
                    jp(path_orig, c, pat, tp, mod),
                    "nii.gz")  # List nii.gz images
                if len(imgs
                       ) > 1:  # If more than one image for a certain modality
                    df_orig.loc[i_orig] = [c, pat, tp, mod, 'multiple']
                    i_orig += 1
Esempio n. 14
0
    #plot_learning_curve(train_accs, val_accs, the_title="Accuracy plot", measure = "Accuracy", early_stopping = False, filename = jp(path_results, "acc_plot.png"))                  
    plot_learning_curve(train_jaccs, val_jaccs, the_title="Jaccard plot", measure = "Jaccard", early_stopping = False, filename = jp(path_results, "jaccard_plot.png"))
else:
    try:
        lesion_model.load_state_dict(torch.load(jp(path_models,"checkpoint.pt")))
    except:
        raise ValueError("No model found")




#Evaluate all test images
columns = columns = compute_metrics(None, None, labels_only=True)
df = pd.DataFrame(columns = columns)

test_images = list_folders(path_test)
i_row=0
cnt=0
for case in test_images:
    print(cnt+1, "/", len(test_images))
    path_timepoints = jp(path_test, case)

    list_images = get_dictionary_with_paths_cs([case], path_test, options['input_data']) 
    list_rois = get_dictionary_with_paths_cs([case], path_test, [options['brain_mask']])
    patch_half = tuple([idx // 2 for idx in options['patch_size']])


    timepoints = list_folders(path_timepoints)    
    
    # get candidate voxels
    all_infer_patches, all_coordenates = get_inference_patches( path_test=path_test,
import os
from os.path import join as jp
import pandas as pd
import numpy as np
import nibabel as nib
import SimpleITK as sitk
from ms_segmentation.general.general import list_folders, list_files_with_name_containing, create_folder

path_masks = r'D:\dev\s.tasconmorales\test_segm\PROCESS\INPUT\mri'
path_target = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\isbi_train'
path_copy = r'D:\dev\s.tasconmorales\wm_gm_masks'

patients = list_folders(path_masks)

for pat in patients:
    print('Patient:', pat)
    create_folder(jp(path_copy, pat))
    n_timepoints = len(
        list_files_with_name_containing(jp(path_target, pat), 'mprage',
                                        'nii.gz'))
    #read ref image
    ref = sitk.ReadImage(jp(path_masks, pat, 'ref.nii.gz'))
    #Read each mask
    for tp in range(n_timepoints):
        print('Time-point:', tp + 1)
        gm = sitk.ReadImage(
            jp(path_masks, pat, 'p1mprage_' + str(tp + 1).zfill(2) + '.nii'))
        gm_np = sitk.GetArrayFromImage(gm)
        gm_new = sitk.GetImageFromArray((gm_np > 0.5).astype(np.uint8))
        gm_new.CopyInformation(ref)
        sitk.WriteImage(
import os
from copy import deepcopy as dcopy
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from skimage.feature import local_binary_pattern as lbp
from ms_segmentation.general.general import save_image, list_folders, create_folder, list_files_with_name_containing
from ms_segmentation.data_generation.patch_manager_3d import normalize_data
from scipy.optimize import fmin

path_data = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\longitudinal'
path_histograms = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\chi_square_histograms'
path_write = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\chi_square_images'
create_folder(path_write)
create_folder(path_histograms)

patients = list_folders(path_data)

modalities = ["flair", "mprage", "pd", "t2"]
#tissues = ["wm", "gm", "mask1"]
tissues = ["brain_mask"]
to_analyze = {'preprocessed': 'nii'}
colors = ['b', 'g', 'r', 'c', 'm']
ref = []
wm_masks_ref = []
#f = lambda x: np.sum((np.histogram(fl_0[fl_0>0].ravel(), 256, [0,1], density=True)[0] - np.histogram(x[0]*fl_1[fl_1>0].ravel(), 256, [0,1], density=True)[0]**2)/(np.histogram(fl_0[fl_0>0].ravel(), 256, [0,1], density=True)[0] + 0.00001))
image_format = "nii.gz"

all_histograms = [[], [], [], []]
all_histograms_aligned = [[], [], [], []]
all_histograms_wm = [[], [], [], []]
all_histograms_gm = [[], [], [], []]
                        the_title="Jaccard plot",
                        measure="Jaccard",
                        early_stopping=False,
                        filename=jp(path_results, "jaccard_plot.png"))
else:
    try:
        lesion_model.load_state_dict(
            torch.load(jp(path_models, "checkpoint.pt")))
    except:
        raise ValueError("No model found")

#Evaluate all images
columns = ['Case', 'DSC', 'HD']
df = pd.DataFrame(columns=columns)

test_images = list_folders(path_test)
i_row = 0
cnt = 0
for case in test_images:
    print(cnt + 1, "/", len(test_images))
    scan_path = jp(path_test, case)
    inf_slices = get_inference_slices(scan_path=scan_path,
                                      input_data=options['input_data'],
                                      normalize=True,
                                      norm_type='zero_one')

    probs = get_probs(inf_slices, lesion_model, device, options['num_classes'],
                      options)
    labels = np.transpose(np.argmax(probs, axis=1).astype(np.uint8), (1, 2, 0))

    labels_gt = nib.load(jp(path_test, case,
Esempio n. 18
0
import os
import numpy as np
import nibabel as nib
from os.path import join as jp
from ms_segmentation.general.general import save_image, list_folders, create_log, create_folder, list_files_with_extension

path_base = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_cs'
path_ensembles = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_cs\ensembles'
experiments_to_ensemble = ['qwertz06', 'qwertz09', 'qwertz11']

num_ens = len(list_folders(path_ensembles))
name_folder_new_ensemble = "Ensemble_" + str(num_ens + 1).zfill(2)
create_folder(jp(path_ensembles, name_folder_new_ensemble))

# Check that listed experiments exist
all_experiments = list_folders(path_base)
all_experiments = [x for x in all_experiments if x in experiments_to_ensemble]

assert len(all_experiments) % 2 != 0  # number of experiments must be odd

all_images = []
for i_exp, exp in enumerate(all_experiments):
    print("Current experiment: ", exp, i_exp + 1, "/", len(all_experiments))
    all_images_names = list_files_with_extension(jp(path_base, exp), "nii")
    all_images_curr_exp = []
    for img in all_images_names:
        all_images_curr_exp.append(
            nib.load(jp(path_base, exp, img)).get_fdata().astype(np.uint8))
    all_images.append(np.stack(all_images_curr_exp))  # (61, volume size)

all_images_np = np.stack(all_images)