Esempio n. 1
0
def validate(model, df, input_shape, output_shape, n_tiles, n_classes):
    dice_coefs = []
    for image_path, label_path in zip(df["preprocessed"], df["label"]):
        image = load_nifti(image_path)
        label = load_nifti(label_path)
        output = feedforward(model, image, input_shape, output_shape, n_tiles,
                             n_classes)
        y = np.int32(np.argmax(output, axis=0))
        dice_coefs.append(dice_coefficients(y, label, labels=range(n_classes)))
    dice_coefs = np.array(dice_coefs)
    return np.mean(dice_coefs, axis=0)
Esempio n. 2
0
    def fit_normalization(self, num_sample=None, show_progress=False):
        """
        Calculate the voxel-wise mean and std across the dataset for normalization.
        
        Args:
            num_sample (int or None): If None (default), calculate the values across the complete dataset, 
                                      otherwise sample a number of images.
            show_progress (bool): Show a progress bar during the calculation."
        """

        if num_sample is None:
            num_sample = len(self)

        image_shape = self.image_shape()
        all_struct_arr = np.zeros(
            (num_sample, image_shape[0], image_shape[1], image_shape[2]))

        sampled_filenames = np.random.choice(self.filenames,
                                             num_sample,
                                             replace=False)
        if show_progress:
            sampled_filenames = tqdm_notebook(sampled_filenames)

        for i, filename in enumerate(sampled_filenames):
            struct_arr = utils.load_nifti(filename, mask=mask)
            all_struct_arr[i] = struct_arr

        self.mean = all_struct_arr.mean(0)
        self.std = all_struct_arr.std(0)
def check_masks_to_be_binary(masks_fps):
    wrong_masks = []

    # check that masks are truly binary
    with tqdm.tqdm(total=len(masks_fps)) as pbar:
        for fp in masks_fps:
            pbar.set_description(os.path.basename(fp))
            mask, mask_data = utils.load_nifti(fp)

            if mask_data.dtype != np.uint8:
                wrong_masks.append(fp)
                print(
                    f'WARNING! "{os.path.basename(fp)}" has {mask_data.dtype} dtype'
                )

            unique_values = np.unique(mask_data)
            if not np.array_equal(unique_values, [0, 1]):
                wrong_masks.append(fp)
                print(
                    f'WARNING! "{os.path.basename(fp)}" is not binary. unique values: {unique_values}'
                )

            pbar.update()

    return wrong_masks
def add_raw_masks(masks_raw_dp,
                  masks_out_dp,
                  postfix: str = 'resegm2_fixed_bin'):
    """add raw (not thresholded) masks into dataset"""

    print(f'\nadd_raw_masks()')
    print(f'masks_raw_dp: {masks_raw_dp}')
    print(f'masks_out_dp: {masks_out_dp}')

    masks_raw_fps = utils.get_nii_gz_filepaths(masks_raw_dp)
    print(f'# of raw masks to add: {len(masks_raw_fps)}')

    os.makedirs(masks_out_dp, exist_ok=True)

    with tqdm.tqdm(total=len(masks_raw_fps)) as pbar:
        for fp in masks_raw_fps:
            pbar.set_description(os.path.basename(fp))

            mask_raw, data = utils.load_nifti(fp)
            data = preprocessing.threshold_mask(data)
            mask_new = utils.change_nifti_data(data, mask_raw, is_scan=False)

            mask_id = utils.parse_image_id_from_filepath(fp)
            fp_new = os.path.join(masks_out_dp, f'{mask_id}_{postfix}.nii.gz')
            utils.store_nifti_to_file(mask_new, fp_new)

            pbar.update()
Esempio n. 5
0
def mask_matiere_blanche(filename):
    fa = utils.load_nifti(filename)
    data = fa.get_data()
    mask = np.zeros(data.shape)
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            for k in range(data.shape[2]):
                if data[i, j, k] > 0.15:
                    mask[i, j, k] = 1
    return mask
Esempio n. 6
0
    def _init_info(self):
        paths_dict = utils.get_files_dict(self._scans_dp,
                                          self._masks_dp,
                                          ids=self._img_ids)

        for cur_id, paths in paths_dict.items():
            if self._img_ids is None or cur_id in self._img_ids:
                img_scan, _ = utils.load_nifti(paths['scan_fp'],
                                               load_data=False)
                img_mask, _ = utils.load_nifti(paths['mask_fp'],
                                               load_data=False)
                assert img_scan.shape == img_mask.shape, (
                    f'scan shape != mask shape. '
                    f'id: {cur_id}. '
                    f'scan shape: {img_scan.shape}, '
                    f'mask shape: {img_mask.shape}')
                paths['shape'] = img_scan.shape

        self._info = paths_dict
Esempio n. 7
0
def validate(model):
    dice_coefs = []
    for image_path, label_path in zip(df_val["image"], df_val["label"]):
        image = load_nifti(image_path)
        label = load_nifti(label_path)
        centers = [[], [], []]
        for img_len, len_out, center, n_tile in zip(image.shape,
                                                    args.output_shape, centers,
                                                    args.n_tiles):
            assert img_len < len_out * n_tile, "{} must be smaller than {} x {}".format(
                img_len, len_out, n_tile)
            stride = int((img_len - len_out) / (n_tile - 1))
            center.append(len_out / 2)
            for i in range(n_tile - 2):
                center.append(center[-1] + stride)
            center.append(img_len - len_out / 2)
        output = np.zeros((dataset["n_classes"], ) + image.shape[:-1])
        for x, y, z in itertools.product(*centers):
            patch = crop_patch(image, [x, y, z], args.input_shape)
            patch = np.expand_dims(patch, 0)
            patch = xp.asarray(patch)
            slices_out = [
                slice(center - len_out / 2, center + len_out / 2)
                for len_out, center in zip(args.output_shape, [x, y, z])
            ]
            slices_in = [
                slice((len_in - len_out) / 2, len_in - (len_in - len_out) / 2)
                for len_out, len_in, in zip(args.output_shape,
                                            args.input_shape)
            ]
            output[slice(None), slices_out[0], slices_out[1],
                   slices_out[2]] += chainer.cuda.to_cpu(
                       model(patch).data[0,
                                         slice(None), slices_in[0],
                                         slices_in[1], slices_in[2]])
        y = np.argmax(output, axis=0).astype(np.int32)
        dice_coefs.append(
            dice_coefficients(y, label, labels=range(dataset["n_classes"])))
    dice_coefs = np.array(dice_coefs)
    return np.mean(dice_coefs, axis=0)
Esempio n. 8
0
def fix_resegm_masks(resegm_filenames, fixed_dp):
    if os.path.isdir(fixed_dp):
        print(f'removing directory with fixed scans: {fixed_dp}')
        shutil.rmtree(fixed_dp)
    os.makedirs(fixed_dp)

    pat = r'([^/]+)(.nii.gz)$'
    for fn in tqdm.tqdm(resegm_filenames.values()):
        img, img_data = utils.load_nifti(fn, load_data=False)
        new_affine = utils.diagonal_abs(img.affine)
        new_data = np.flip(img_data, axis=0)
        new_nii = nibabel.Nifti1Image(new_data, new_affine)
        new_fp = os.path.join(fixed_dp,
                              '_fixed'.join(re.search(pat, fn).groups()))
        new_nii.to_filename(new_fp)
Esempio n. 9
0
    def __getitem__(self, idx):
        """Return the image as a numpy array and the label."""
        label = self.labels[idx]

        struct_arr = utils.load_nifti(self.filenames[idx], mask=self.mask)
        # TDOO: Try normalizing each image to mean 0 and std 1 here.
        #struct_arr = (struct_arr - struct_arr.mean()) / (struct_arr.std() + 1e-10)
        struct_arr = (struct_arr - self.mean) / (
            self.std + 1e-10)  # prevent 0 division by adding small factor
        struct_arr = struct_arr[None]  # add (empty) channel dimension
        struct_arr = torch.FloatTensor(struct_arr)

        if self.transform is not None:
            struct_arr = self.transform(struct_arr)

        return struct_arr, label
Esempio n. 10
0
from sklearn.model_selection import train_test_split
import multiprocessing
import json

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import torchvision

from tabulate import tabulate

# Binary brain mask used to cut out the skull.
mask = utils.load_nifti('data/icbm_mask.nii.gz')

# ------------------------- ADNI data tables -----------------------------------

# Ritter/Haynes lab file system at BCCN Berlin.
#ADNI_DIR = '/analysis/share/ADNI'

# Local.
ADNI_DIR = 'data/ADNI'

# Filepaths for 3 Tesla scans.
table_3T = os.path.join(
    ADNI_DIR,
    'ADNI_tables/customized/DxByImgClean_CompleteAnnual2YearVisitList_3T.csv')
image_dir_3T = os.path.join(ADNI_DIR, 'ADNI_2Yr_3T_preprocessed')
corrupt_images_3T = ['67380']
Esempio n. 11
0
    def store_as_numpy_dataset(self, out_dp: str, zoom_factor: float):
        """
        Convert Nifti images to numpy nd.arrays and store them to .npy files
        to save time on probably time-expensive zoom.
        """
        print(const.SEPARATOR)
        print('NiftiDataset.store_as_numpy_dataset():')
        print(f'\nout_dp: {out_dp}')
        print(f'zoom_factor: {zoom_factor}')

        if os.path.isdir(out_dp):
            print(
                f'\noutput dir "{out_dp}" already exists. \nwill remove and create a new one.'
            )
            shutil.rmtree(out_dp)

        numpy_scans_dp = os.path.join(out_dp, 'numpy', 'scans')
        numpy_masks_dp = os.path.join(out_dp, 'numpy', 'masks')
        nifti_dp = os.path.join(out_dp, 'nifti')
        os.makedirs(numpy_scans_dp, exist_ok=True)
        os.makedirs(numpy_masks_dp, exist_ok=True)
        os.makedirs(nifti_dp, exist_ok=True)

        # store shapes dict for NumpyDataset
        shapes_dict = {k: v['shape'] for (k, v) in self._info.items()}
        shapes_dict_fp = os.path.join(out_dp, 'numpy', 'shapes.pickle')
        with open(shapes_dict_fp, 'wb') as fout:
            pickle.dump(shapes_dict, fout)

        # process and store scans with masks
        with tqdm.tqdm(total=len(self._info),
                       unit='scan',
                       bar_format=const.TQDM_BAR_FORMAT) as pbar:
            for cur_id, cur_info in self._info.items():
                pbar.set_description(
                    f'image: {cur_id}. shape: {cur_info["shape"]}')

                scan_img, scan_data = utils.load_nifti(cur_info['scan_fp'])
                mask_img, mask_data = utils.load_nifti(cur_info['mask_fp'])

                # check raw mask
                mask_is_ok, msg = utils.validate_binary_mask(mask_data)
                if not mask_is_ok:
                    raise ValueError(f'id: "{cur_id}". {msg}')

                # clip scan
                scan_data = preprocessing.clip_intensities(scan_data)

                # convert scan to np.int16 after clipping intensities if needed
                if scan_data.dtype != np.int16:
                    print(
                        f'\nWARNING: scan {cur_id} has {scan_data.dtype} dtype.\n'
                        f'will convert to np.int16')
                    scan_data = scan_data.astype(np.int16)

                # zoom scan and mask
                scan_data = preprocessing.zoom_volume_along_x_y(
                    scan_data, zoom_factor)
                mask_data = preprocessing.zoom_volume_along_x_y(
                    mask_data, zoom_factor)

                # check mask after all transformations
                mask_is_ok, msg = utils.validate_binary_mask(mask_data)
                if not mask_is_ok:
                    raise ValueError(f'id: "{cur_id}". {msg}')

                # store numpy arrays to files
                utils.store_npy(os.path.join(numpy_scans_dp, f'{cur_id}.npy'),
                                scan_data)
                utils.store_npy(os.path.join(numpy_masks_dp, f'{cur_id}.npy'),
                                mask_data)

                # also store processed images to Nifti
                scan_img_new = utils.change_nifti_data(data_new=scan_data,
                                                       nifti_original=scan_img,
                                                       is_scan=True)
                mask_img_new = utils.change_nifti_data(data_new=mask_data,
                                                       nifti_original=mask_img,
                                                       is_scan=False)
                utils.store_nifti_to_file(
                    scan_img_new, os.path.join(nifti_dp, f'{cur_id}.nii.gz'))
                utils.store_nifti_to_file(
                    mask_img_new,
                    os.path.join(nifti_dp, f'{cur_id}_autolungs.nii.gz'))

                pbar.update()
Esempio n. 12
0
        # On suit la direction principale la plus alignee jusqu'a sortir du masque
        streamline.extend(build_streamline(brain_mask, pos_depart, 0.5, [0.0, 0.0, 0.0]))

        # On inverse la streamline pour continuer de l'autre cote
        streamline.reverse()

        # On suit la direction principale dans l'autre sens
        streamline.extend(build_streamline(brain_mask, pos_depart, -0.5, [0.0, 0.0, 0.0]))

        # On ajoute la streamline au reseau
        if len(streamline) > 0:
            streamlines.append(streamline)

    return np.array(streamlines)


if __name__ == "__main__":

     # On load l'image, le masque et on fixe le nombre de stream lines
    nb_streamline = 10000
    direc_max = utils.load_nifti("_peaks.nii").get_data()
    brain_mask = mask_matiere_blanche('_tensor_fa.nii.gz')

    # Afin de palier au probleme d'interpolation on s'assure que le rebord est noir
    brain_mask[:, :, 0] = 0

     # On effectue la tractographie et on sauveguarde les streamlines
    lines = tracto(brain_mask, direc_max, nb_streamline)
    utils.save_streamlines(lines, "tp3_data\\_streamline.trk")

Esempio n. 13
0
    fig.add_subplot(122)
    plt.title("RGB")
    plt.axis('off')
    plt.imshow(rgb[:, rgb.shape[1]/2, :], cmap=cm.gray)

    #sauvegarde
    plt.savefig(os.path.dirname(os.path.realpath(__file__)) + "\\.." + "\\Latex\\Images\\" + filename + ".png")
    plt.show()

if __name__ == "__main__":

    # loading image and gradient table
    bvals_filename="tp3_data\\bvals2000"
    bvecs_filename="tp3_data\\bvecs2000"

    img = utils.load_nifti("dwi2000.nii.gz")
    bvals, bvecs = read_bvals_bvecs(bvals_filename, bvecs_filename)
    gtab = dp.data.gradient_table(bvals, bvecs)
    data = img.get_data()
    print('data.shape (%d, %d, %d, %d)' % data.shape)

    # Creating the mask
    maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2)
    print('maskdata.shape (%d, %d, %d, %d)' % maskdata.shape)

    # Creating tensor model
    tenmodel = dti.TensorModel(gtab)
    tenfit = tenmodel.fit(maskdata)

    # Computing anisotropy measures
    print('Computing anisotropy measures (FA, RGB)')
def main():
    parser = argparse.ArgumentParser(
        description="calculate class probabilities with VoxResNet")
    parser.add_argument("--input_file",
                        "-i",
                        type=str,
                        help="input json file of test dataset")
    parser.add_argument("--output_suffix",
                        "-o",
                        type=str,
                        help="result of the segmentation")
    parser.add_argument(
        "--model",
        "-m",
        type=str,
        help="a file containing parameters of trained VoxResNet")
    parser.add_argument(
        "--input_shape",
        type=int,
        nargs="*",
        action="store",
        default=[80, 80, 80],
        help="input patch shape of VoxResNet, default=[80, 80, 80]")
    parser.add_argument(
        "--output_shape",
        type=int,
        nargs="*",
        action="store",
        default=[60, 60, 60],
        help="output patch shape of VoxResNet, default=[60, 60, 60]")
    parser.add_argument("--gpu",
                        "-g",
                        default=-1,
                        type=int,
                        help="negative value indicates no gpu, default=-1")
    parser.add_argument("--n_tiles",
                        type=int,
                        nargs="*",
                        action="store",
                        default=[5, 5, 5],
                        help="number of tiles along each axis")
    args = parser.parse_args()
    print(args)

    with open(args.input_file) as f:
        dataset = json.load(f)
    test_df = pd.DataFrame(dataset["data"])

    vrn = VoxResNet(dataset["in_channels"], dataset["n_classes"])
    chainer.serializers.load_npz(args.model, vrn)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        vrn.to_gpu()

    for image_path, subject in zip(test_df["image"], test_df["subject"]):
        image, affine = load_nifti(image_path, with_affine=True)
        output = feedforward(vrn, image, args.input_shape, args.output_shape,
                             args.n_tiles, dataset["n_classes"])

        output /= np.sum(output, axis=0, keepdims=True)

        nib.save(
            nib.Nifti1Image(np.float32(output).transpose(1, 2, 3, 0), affine),
            os.path.join(os.path.dirname(image_path),
                         subject + args.output_suffix))
Esempio n. 15
0
from sklearn.model_selection import train_test_split
import multiprocessing
import json

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import torchvision

from tabulate import tabulate

# Binary brain mask used to cut out the skull.
mask = utils.load_nifti('data/binary_brain_mask.nii.gz')

# ------------------------- ADNI data tables -----------------------------------

# Ritter/Haynes lab file system at BCCN Berlin.
#ADNI_DIR = '/analysis/share/ADNI'

# Local.
ADNI_DIR = 'data/ADNI'

# Filepaths for 3 Tesla scans.
table_3T = os.path.join(
    ADNI_DIR,
    'ADNI_tables/customized/DxByImgClean_CompleteAnnual2YearVisitList_3T.csv')
image_dir_3T = os.path.join(ADNI_DIR, 'ADNI_2Yr_3T_preprocessed')
corrupt_images_3T = [
Esempio n. 16
0
def main():
    parser = argparse.ArgumentParser(description="segment with VoxResNet")
    parser.add_argument("--input_file",
                        "-i",
                        type=str,
                        help="input json file of test dataset")
    parser.add_argument(
        "--output_suffix",
        "-o",
        type=str,
        default="_segTRI_predict.nii.gz",
        help="result of the segmentation, default=_segTRI_predict.nii.gz")
    parser.add_argument(
        "--model",
        "-m",
        type=str,
        help="a file containing parameters of trained VoxResNet")
    parser.add_argument(
        "--input_shape",
        type=int,
        nargs="*",
        action="store",
        default=[80, 80, 80],
        help="input patch shape of VoxResNet, default=[80, 80, 80]")
    parser.add_argument(
        "--output_shape",
        type=int,
        nargs="*",
        action="store",
        default=[60, 60, 60],
        help="output patch shape of VoxResNet, default=[60, 60, 60]")
    parser.add_argument("--gpu",
                        "-g",
                        default=-1,
                        type=int,
                        help="negative value indicates no gpu, default=-1")
    parser.add_argument(
        "--n_tiles",
        type=int,
        nargs="*",
        action="store",
        default=[5, 5, 5],
        help="number of tiles along each axis, default=[5, 5, 5]")
    args = parser.parse_args()
    print(args)

    with open(args.input_file) as f:
        dataset = json.load(f)
    test_df = pd.DataFrame(dataset["data"])

    vrn = VoxResNet(dataset["in_channels"], dataset["n_classes"])
    chainer.serializers.load_npz(args.model, vrn)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        vrn.to_gpu()
        xp = chainer.cuda.cupy
    else:
        xp = np

    for image_path, subject in zip(test_df["image"], test_df["subject"]):
        image, affine = load_nifti(image_path, with_affine=True)
        centers = [[], [], []]
        for img_len, len_out, center, n_tile in zip(image.shape,
                                                    args.output_shape, centers,
                                                    args.n_tiles):
            assert img_len < len_out * n_tile, "{} must be smaller than {} x {}".format(
                img_len, len_out, n_tile)
            stride = int((img_len - len_out) / (n_tile - 1))
            center.append(len_out / 2)
            for i in range(n_tile - 2):
                center.append(center[-1] + stride)
            center.append(img_len - len_out / 2)
        output = np.zeros((dataset["n_classes"], ) + image.shape[:-1])
        for x, y, z in itertools.product(*centers):
            patch = crop_patch(image, [x, y, z], args.input_shape)
            patch = np.expand_dims(patch, 0)
            patch = xp.asarray(patch)
            slices_out = [
                slice(center - len_out / 2, center + len_out / 2)
                for len_out, center in zip(args.output_shape, [x, y, z])
            ]
            slices_in = [
                slice((len_in - len_out) / 2, len_in - (len_in - len_out) / 2)
                for len_out, len_in, in zip(args.output_shape,
                                            args.input_shape)
            ]
            output[slice(None), slices_out[0], slices_out[1],
                   slices_out[2]] += chainer.cuda.to_cpu(
                       vrn(patch).data[0,
                                       slice(None), slices_in[0], slices_in[1],
                                       slices_in[2]])
        y = np.argmax(output, axis=0)
        nib.save(
            nib.Nifti1Image(np.int32(y), affine),
            os.path.join(os.path.dirname(image_path),
                         subject + args.output_suffix))
Esempio n. 17
0
            pos_depart = (np.random.randint(brain_mask.shape[0]), np.random.randint(brain_mask.shape[1]), np.random.randint(brain_mask.shape[2]))
        streamline.append(pos_depart)

        # On suit la direction principale la plus alignee jusqu'a sortir du masque
        streamline.extend(build_streamline(brain_mask, pos_depart, 0.5, [0.0, 0.0, 0.0]))

        # On inverse la streamline pour continuer de l'autre cote
        streamline.reverse()

        # On suit la direction principale dans l'autre sens]
        streamline.extend(build_streamline(brain_mask, pos_depart, -0.5, [0.0, 0.0, 0.0]))

        # Si on a trouve une streamline on l'ajoute a la liste finale
        if len(streamline) > 0:
            streamlines.append(np.array(streamline))

    return np.array(streamlines)

if __name__ == "__main__":

    # On load l'image, le masque et on fixe le nombre de stream lines
    nb_streamline = 30000
    direc_max = utils.load_nifti("_fodfpeaks.nii.gz").get_data()
    brain_mask = mask_matiere_blanche('_tensor_fa.nii.gz')

    # Afin de palier au probleme d'interpolation on s'assure que le rebord est noir
    brain_mask[:, :, 0] = 0

    # On effectue la tractographie et on sauveguarde les streamlines
    lines= tracto(brain_mask, direc_max, nb_streamline)
    utils.save_streamlines(lines, "tp3_data\\_streamline_fodf.trk")
Esempio n. 18
0
                              tensorboard_callback, cp_callback, stop_callback
                          ])

                # evaluate on the test and val sets
                test_loss = model.evaluate(test_dataset)
                #cc_mean_test[layernum][kernum][filtnum] = test_loss[2]
                cc_mean_test_1D[count] = test_loss[2]

                val_loss = model.evaluate(valid_dataset)
                #cc_mean_val[layernum][kernum][filtnum] = val_loss[2]
                cc_mean_val_1D[count] = val_loss[2]

                # predict on test data
                predicted_batch = model.predict(test_dataset)
                test_batch = load_nifti(TEST_SUBIDS,
                                        rootpath=DATA_PATH,
                                        labelname=TASK_FILE_NAME,
                                        labelnum=TASK_FILE_NUM)
                save_prediction(predicted_batch=predicted_batch,
                                rootpath=DATA_PATH,
                                outpath=OUT_PATH,
                                labelname=TASK_FILE_NAME,
                                labelnum=TASK_FILE_NUM,
                                template_subID=TEST_SUBIDS[0],
                                subIDs=TEST_SUBIDS)
                cc = act_pred_corr(predicted_batch, test_batch)

                print(np.mean(np.diagonal(cc)))
            # cc_norm = normalize(cc,axis=0)
            # cc_norm = normalize(cc_norm,axis=1)
            # plt.subplot(1, 2, 1)
            # plt.imshow(cc,cmap="jet")
Esempio n. 19
0
    def segment_scans(self,
                      checkpoint_fp: str,
                      scans_dp: str,
                      postfix: str,
                      ids: List[str] = None,
                      output_dp: str = None):
        """
        :param checkpoint_fp:   path to .pth file with net's params dict
        :param scans_dp:    path directory with .nii.gz scans.
                            will check that scans do not have any postfixes in their filenames.
        :param postfix:     postfix of segmented filenames
        :param ids:    list of image ids to consider. if None segment all scans under `scans_dp`
        :param output_dp:   path to directory to store results of segmentation
        """
        utils.check_var_to_be_iterable_collection(ids)

        print(const.SEPARATOR)
        print('Pipeline.segment_scans()')

        output_dp = output_dp or const.SEGMENTED_DN
        print(f'will store segmented masks under "{output_dp}"')
        os.makedirs(output_dp, exist_ok=True)

        print(f'postfix: {postfix}')

        self.load_net_from_weights(checkpoint_fp)
        scans_fps = utils.get_nii_gz_filepaths(scans_dp)
        print(f'# of .nii.gz files under "{scans_dp}": {len(scans_fps)}')

        # filter filepaths to scans
        scans_fps_filtered = []
        for fp in scans_fps:
            img_id, img_postfix = utils.parse_image_id_from_filepath(
                fp, get_postfix=True)
            if img_postfix != '' or ids is not None and img_id not in ids:
                continue
            scans_fps_filtered.append(fp)
        print(f'# of scans left after filtering: {len(scans_fps_filtered)}')

        print('\nstarting segmentation...')
        time_start_segmentation = time.time()

        with tqdm.tqdm(total=len(scans_fps_filtered)) as pbar:
            for fp in scans_fps_filtered:
                cur_id = utils.parse_image_id_from_filepath(fp)
                pbar.set_description(cur_id)

                scan_nifti, scan_data = utils.load_nifti(fp)

                # clip intensities as during training
                scan_data_clipped = preprocessing.clip_intensities(scan_data)

                segmented_data = mu.segment_single_scan(
                    scan_data_clipped, self.net, self.device)
                segmented_nifti = utils.change_nifti_data(segmented_data,
                                                          scan_nifti,
                                                          is_scan=False)

                out_fp = os.path.join(output_dp, f'{cur_id}_{postfix}.nii.gz')
                utils.store_nifti_to_file(segmented_nifti, out_fp)

                pbar.update()

        print(
            f'\nsegmentation ended. elapsed time: {utils.get_elapsed_time_str(time_start_segmentation)}'
        )
        utils.print_cuda_memory_stats(self.device)
Esempio n. 20
0
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import os
import nibabel as nib
import numpy as np
from dipy.segment.mask import median_otsu
import utils


if __name__ == "__main__":
    img = utils.load_nifti('b0.nii.gz')
    data = img.get_data()

    # b0_mask, mask = median_otsu(data, 2, 1)
    # b0_mask, mask = median_otsu(data, 3, 1)
    # b0_mask, mask = median_otsu(data, 1, 2)
    # b0_mask, mask = median_otsu(data, 1, 3)
    # b0_mask, mask = median_otsu(data, 3, 2)
    # b0_mask, mask = median_otsu(data, 2, 3)
    b0_mask, mask1 = median_otsu(data, 1, 1)
    b0_mask, mask2 = median_otsu(data, 4, 2)
    # parametres retenues
    b0_mask, mask = median_otsu(data, 2, 2)

    fig = plt.figure()
    fig.add_subplot(221)
    plt.title("Image")
    plt.axis('off')
    plt.imshow(data[data.shape[0]/2, :, :], cmap=cm.gray)

    fig.add_subplot(222)
Esempio n. 21
0
 def image_shape(self):
     """The shape of the MRI images."""
     return utils.load_nifti(self.filenames[0], mask=mask).shape
with open(args.input_file) as f:
    dataset = json.load(f)
test_df = pd.DataFrame(dataset["data"])

vrn = VoxResNet(dataset["in_channels"], dataset["n_classes"])
chainer.serializers.load_npz(args.model, vrn)

if args.gpu >= 0:
    chainer.cuda.get_device(args.gpu).use()
    vrn.to_gpu()
    xp = chainer.cuda.cupy
else:
    xp = np

for image_path, subject in zip(test_df["image"], test_df["subject"]):
    image, affine = load_nifti(image_path, with_affine=True)
    centers = [[], [], []]
    for img_len, len_out, center, n_tile in zip(image.shape, args.output_shape,
                                                centers, args.n_tiles):
        assert img_len < len_out * n_tile, "{} must be smaller than {} x {}".format(
            img_len, len_out, n_tile)
        stride = int((img_len - len_out) / (n_tile - 1))
        center.append(len_out / 2)
        for i in range(n_tile - 2):
            center.append(center[-1] + stride)
        center.append(img_len - len_out / 2)
    output = np.zeros((dataset["n_classes"], ) + image.shape[:-1])
    for x, y, z in itertools.product(*centers):
        patch = crop_patch(image, [x, y, z], args.input_shape)
        patch = np.expand_dims(patch, 0)
        patch = xp.asarray(patch)
Esempio n. 23
0
 def get_raw_image(self, idx):
     """Return the raw image at index idx (i.e. not normalized, no color channel, no transform."""
     return utils.load_nifti(self.filenames[idx], mask=self.mask)
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
import nibabel as nib
import numpy as np
import dipy.reconst.dti as dti
import utils

if __name__ == "__main__":

    # loading image and gradient table
    bvals, bvecs = read_bvals_bvecs("tp3_data\\bvals2000", "tp3_data\\bvecs2000")
    gtab = gradient_table(bvals, bvecs)
    img = utils.load_nifti("dwi2000.nii.gz")

    ##### utilisation d'un mask pour le calcul du tenseur
    tenmodel = dti.TensorModel(gtab)
    brain_mask = nib.load("tp3_data\\_mask.nii.gz").get_data()
    tenfit = tenmodel.fit(img.get_data(), mask=brain_mask)

    ##### sauvegarde des orientations principales
    peaks_fiberNav = np.zeros((112, 112, 60, 15), dtype='float32')
    peaks_fiberNav[:, :, :, 0:3] = tenfit.evecs[..., 0].astype(np.float32)
    nib.save(nib.Nifti1Image(peaks_fiberNav, img.get_affine()), "tp3_data\\_peaks")

    ##### sauvegarde des tenseurs
    from dipy.reconst.dti import lower_triangular
    tensor_vals = lower_triangular(tenfit.quadratic_form)
    correct_order = [0, 1, 3, 2, 4, 5]
    tensor_vals_reordered = tensor_vals[..., correct_order]
    fiber_tensors = nib.Nifti1Image(tensor_vals_reordered.astype(np.float32), img.get_affine())
    nib.save(fiber_tensors, "tp3_data\\_tensor")