import nibabel as nib
import numpy as np
from scipy import ndimage
import os
from copy import deepcopy as dcopy
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from skimage.feature import local_binary_pattern as lbp
from ms_segmentation.general.general import save_image, list_folders, create_folder, list_files_with_name_containing
from ms_segmentation.data_generation.patch_manager_3d import normalize_data
from scipy.optimize import fmin

path_data = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\longitudinal'
path_histograms = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\chi_square_histograms'
path_write = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\chi_square_images'
create_folder(path_write)
create_folder(path_histograms)

patients = list_folders(path_data)

modalities = ["flair", "mprage", "pd", "t2"]
#tissues = ["wm", "gm", "mask1"]
tissues = ["brain_mask"]
to_analyze = {'preprocessed': 'nii'}
colors = ['b', 'g', 'r', 'c', 'm']
ref = []
wm_masks_ref = []
#f = lambda x: np.sum((np.histogram(fl_0[fl_0>0].ravel(), 256, [0,1], density=True)[0] - np.histogram(x[0]*fl_1[fl_1>0].ravel(), 256, [0,1], density=True)[0]**2)/(np.histogram(fl_0[fl_0>0].ravel(), 256, [0,1], density=True)[0] + 0.00001))
image_format = "nii.gz"

all_histograms = [[], [], [], []]
path_data = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS'
#path_data = r'D:\dev\INS1'
path_train = jp(path_data, "train")
path_test = jp(path_data, "test")
#path_test = r'D:\dev\ISBI_CS\test'
#path_test = r'D:\dev\INS_Test\test'
debug = True
if (debug):
    experiment_name = "dummy_experiment_2DUNetConvGRU"
else:
    experiment_name, curr_date, curr_time = get_experiment_name(
        the_prefix="unet2dconvGRU")
#experiment_name = 'unet3d_2020-03-24_16_08_04[ISBI1-17]'
path_results = jp(path_data, experiment_name)
create_folder(path_results)
path_models = jp(path_results, "models")
create_folder(path_models)
path_segmentations = jp(path_results, "results")
create_folder(path_segmentations)

# Visualize one subject
#img, img_header = load(jp(path_train, '00001', 'FLAIR_masked.nii.gz'))
#mask, mask_header = load(jp(path_train, '00001', 'mask_FLAIR.nii.gz'))
#shim_overlay_slice(img, mask, 25, alpha=0.4)
#shim(img, 16)

# Define options for training
options = {}
# training data path
options['training_path'] = path_train
#path_test_cs = r'D:\dev\ms_data\Challenges\ISBI2015\sample1'
path_test_cs = r'D:\dev\ms_data\Preprocessed-AnonymPatData\cross_sectional'    #ACHTUUUUNG
if path_test_cs.split("\\")[-1] == "histogram_matched":
    print("Histogram matched test dataset is being used")

#path_test_l = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\longitudinal'    
path_test_l = r'D:\dev\ms_data\Preprocessed-AnonymPatData\normalized_wm'
path_experiments_cs = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\cross_validation'
path_experiments_l = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\cross_validation'
#path_results = r'D:\dev\ms_data\Challenges\ISBI2015\res'
if experiment_type == 'cs':
    path_results = r'D:\dev\ms_data\Preprocessed-AnonymPatData\results'
else:
    path_results = r'D:\dev\ms_data\Preprocessed-AnonymPatData\results'

create_folder(path_results)
use_gpu = True

def get_result_name(the_paths, the_base):
    accum = 0
    for p in the_paths:
        num_ensembles = len(list_folders(jp(p, "ensembles")))
        num_non_ensembles = len(list_folders(p)) - 1 # minus ensembles folder
        total = num_ensembles + num_non_ensembles
        accum += total
    new_name = the_base + str(accum+1).zfill(2)
    return new_name


experiment_name_folder = get_result_name([r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_cs', r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_l'], "qwertz")
create_folder(jp(path_results, experiment_name_folder))
case_index = 1

for i_pat, pat in enumerate(patients):
    print("Patient: ", i_pat+1, "/", len(patients))
    path_pat = jp(path_origin, pat, "preprocessed")
    flair_images = list_files_with_name_containing(path_pat, "flair", "nii")
    mprage_images = list_files_with_name_containing(path_pat, "mprage", "nii")
    pd_images = list_files_with_name_containing(path_pat, "pd", "nii")
    t2_images = list_files_with_name_containing(path_pat, "t2", "nii")

    #check that all modalities have the same number of images
    assert len(flair_images) == len(mprage_images) == len(pd_images) == len(t2_images)

    #create patient folder
    create_folder(jp(path_cs, str(case_index).zfill(2)))

    for i_img in range(len(flair_images)):
        print("Timepoint: ", i_img+1)
        # create timepoint folder
        create_folder(jp(path_cs, str(case_index).zfill(2), str(i_img+1).zfill(2)))
        shutil.copyfile(flair_images[i_img], jp(path_cs, str(case_index).zfill(2), str(i_img+1).zfill(2), "flair.nii"))
        shutil.copyfile(mprage_images[i_img], jp(path_cs, str(case_index).zfill(2), str(i_img+1).zfill(2), "mprage.nii"))
        shutil.copyfile(pd_images[i_img], jp(path_cs, str(case_index).zfill(2), str(i_img+1).zfill(2), "pd.nii"))
        shutil.copyfile(t2_images[i_img], jp(path_cs, str(case_index).zfill(2), str(i_img+1).zfill(2), "t2.nii"))
    
    case_index += 1

#change extension
for i_pat, pat in enumerate(patients):
Beispiel #5
0
import pandas as pd
import numpy as np
from copy import deepcopy as dc
import nibabel as nib
import matplotlib.pyplot as plt
import seaborn as sns
from ms_segmentation.general.general import create_folder, list_folders, list_files_with_name_containing, list_files_with_extension
from ms_segmentation.evaluation.metrics import compute_metrics

sns.set(style="whitegrid")

path_cs = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\cross_validation'
path_l = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\cross_validation'
path_gt = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\isbi_train'
path_results = r'D:\dev\ms_data\Challenges\ISBI2015\boxplots'
create_folder(path_results)

exp_cs = [
    'CROSS_VALIDATION_UNet3D_2020-06-04_14_54_25[all_modalities_new]',
    'CROSS_VALIDATION_UNet3D_2020-06-25_07_07_15[chi-square_norm_train]'
]
exp_l = [
    'CROSS_VALIDATION_UNetConvLSTM3D_2020-06-23_21_31_39[longitudinal_chisquare_normalization_new]',
    'CROSS_VALIDATION_UNetConvLSTM3D_2020-06-13_08_43_21[all_modalities_no_hist_matching_new]'
]
metrics = ["DSC", "LFPR", "LTPR"]

cases = {
    "CS (min-max norm.)": jp(path_cs, exp_cs[0]),
    "CS (proposed norm.)": jp(path_cs, exp_cs[0]),
    "L (min-max norm.)": jp(path_l, exp_l[0]),
Beispiel #6
0
    tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)

    # calculate normalized quantiles for each array
    src_quantiles = np.cumsum(src_counts) / source.size
    tmpl_quantiles = np.cumsum(tmpl_counts) / template.size

    interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)
    return interp_a_values[src_unique_indices].reshape(source.shape)


patients = list_folders(path_data)

for pat in patients:  # for each patient
    print("Patient:", pat)
    timepoints = list_folders(jp(path_data, pat))
    create_folder(jp(path_new_data, pat))
    #take first point as reference
    ref_flair = normalize_data(
        nib.load(jp(path_data, pat, timepoints[0],
                    'flair.nii.gz')).get_fdata())
    ref_mprage = normalize_data(
        nib.load(jp(path_data, pat, timepoints[0],
                    'mprage.nii.gz')).get_fdata())
    ref_pd = normalize_data(
        nib.load(jp(path_data, pat, timepoints[0], 'pd.nii.gz')).get_fdata())
    ref_t2 = normalize_data(
        nib.load(jp(path_data, pat, timepoints[0], 't2.nii.gz')).get_fdata())
    brain_mask_ref = nib.load(
        jp(path_data, pat, timepoints[0], 'brain_mask.nii.gz')).get_fdata()
    #mask1 = nib.load(jp(path_data, pat, timepoints[0], 'mask1.nii.gz')).get_fdata()
    #mask2 = nib.load(jp(path_data, pat, timepoints[0], 'mask2.nii.gz')).get_fdata()
Beispiel #7
0
# Ensemble of different models

import nibabel as nib
import numpy as np
import os
from os.path import join as jp
from ms_segmentation.general.general import save_image, list_folders, create_folder

path_base = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\cross_validation'
ensemble_name = 'aensemble2'
create_folder(jp(path_base, ensemble_name))
indexes_experiments = [13, 14, 15]
all_experiments = list_folders(path_base)
to_consider = [all_experiments[i] for i in indexes_experiments]
num_folds = 5
tp_amounts = []

data = dict().fromkeys(to_consider, None)
for k in data.keys():
    data[k] = []
# read images:
for exp in to_consider:  # for each of the selected experiments
    for f in range(num_folds):  # for each fold
        timepoints = []
        images = os.listdir(
            jp(path_base, exp, "fold" + str(f + 1).zfill(2), "results",
               str(f + 1).zfill(2)))  # List all images for current fold
        for img in images:
            timepoints.append(
                nib.load(
                    jp(path_base, exp, "fold" + str(f + 1).zfill(2), "results",
from os.path import join as jp
import pandas as pd
import numpy as np
import nibabel as nib
import SimpleITK as sitk
from ms_segmentation.general.general import list_folders, list_files_with_name_containing, create_folder

path_masks = r'D:\dev\s.tasconmorales\test_segm\PROCESS\INPUT\mri'
path_target = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\isbi_train'
path_copy = r'D:\dev\s.tasconmorales\wm_gm_masks'

patients = list_folders(path_masks)

for pat in patients:
    print('Patient:', pat)
    create_folder(jp(path_copy, pat))
    n_timepoints = len(
        list_files_with_name_containing(jp(path_target, pat), 'mprage',
                                        'nii.gz'))
    #read ref image
    ref = sitk.ReadImage(jp(path_masks, pat, 'ref.nii.gz'))
    #Read each mask
    for tp in range(n_timepoints):
        print('Time-point:', tp + 1)
        gm = sitk.ReadImage(
            jp(path_masks, pat, 'p1mprage_' + str(tp + 1).zfill(2) + '.nii'))
        gm_np = sitk.GetArrayFromImage(gm)
        gm_new = sitk.GetImageFromArray((gm_np > 0.5).astype(np.uint8))
        gm_new.CopyInformation(ref)
        sitk.WriteImage(
            gm_new,
Beispiel #9
0
import os
import shutil
from ms_segmentation.general.general import list_folders, create_folder, cls
from os.path import join as jp

origin = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_CS\histogram_matched'
destiny = r'D:\dev\ms_data\Challenges\ISBI2015\ISBI_L\histogram_matched'

to_copy = ['flair', 'pd', 'mprage', 't2', 'brain_mask', 'mask1', 'mask2']

patients = list_folders(origin)
for pat in patients:
    create_folder(jp(destiny, pat))

    #Copy all timepoints from origin to destiny
    timepoints = list_folders(jp(origin, pat))
    for tp in timepoints:
        cls()
        print("Patient: ", pat)
        print("TP: ", tp)
        for elem in to_copy:
            shutil.copyfile(jp(origin, pat, tp, elem + '.nii.gz'),
                            jp(destiny, pat, elem + '_' + tp + '.nii.gz'))
Beispiel #10
0
import os
import numpy as np
import nibabel as nib
from os.path import join as jp
from ms_segmentation.general.general import save_image, list_folders, create_log, create_folder, list_files_with_extension

path_base = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_cs'
path_ensembles = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\results_cs\ensembles'
experiments_to_ensemble = ['qwertz06', 'qwertz09', 'qwertz11']

num_ens = len(list_folders(path_ensembles))
name_folder_new_ensemble = "Ensemble_" + str(num_ens + 1).zfill(2)
create_folder(jp(path_ensembles, name_folder_new_ensemble))

# Check that listed experiments exist
all_experiments = list_folders(path_base)
all_experiments = [x for x in all_experiments if x in experiments_to_ensemble]

assert len(all_experiments) % 2 != 0  # number of experiments must be odd

all_images = []
for i_exp, exp in enumerate(all_experiments):
    print("Current experiment: ", exp, i_exp + 1, "/", len(all_experiments))
    all_images_names = list_files_with_extension(jp(path_base, exp), "nii")
    all_images_curr_exp = []
    for img in all_images_names:
        all_images_curr_exp.append(
            nib.load(jp(path_base, exp, img)).get_fdata().astype(np.uint8))
    all_images.append(np.stack(all_images_curr_exp))  # (61, volume size)

all_images_np = np.stack(all_images)
Beispiel #11
0
import os
from os.path import join as jp
import shutil
from ms_segmentation.general.general import list_folders, create_folder

path_origin = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\chi_square_images_longit'
path_destiny = r'D:\dev\ms_data\Challenges\ISBI2015\Test_Images\chi_square_images_cs'
create_folder(path_destiny)
patients = list_folders(path_origin)

def get_name(the_string):
    if "flair" in the_string:
        return "flair_norm.nii.gz"
    elif "brain_mask" in the_string:
        return "brain_mask.nii.gz"
    elif "pd" in the_string:
        return "pd_norm.nii.gz"
    elif "mprage" in the_string:
        return "mprage_norm.nii.gz"
    elif "t2" in the_string:
        return "t2_norm.nii.gz"
    elif "mask1" in the_string:
        return "mask1.nii.gz"
    elif "mask2" in the_string:
        return "mask2.nii.gz"
    else:
        raise Exception("Unknown image type")

for pat in patients:
    create_folder(jp(path_destiny, pat))
    all_files = os.listdir(jp(path_origin, pat))