Пример #1
0
def save_multilabel_img_as_multiple_files_endings_OLD(Config,
                                                      img,
                                                      affine,
                                                      path,
                                                      multilabel=True):
    '''
    multilabel True:    save as 1 and 2 without fourth dimension
    multilabel False:   save with beginnings and endings combined
    '''
    # bundles = exp_utils.get_bundle_names("20")[1:]
    bundles = exp_utils.get_bundle_names(Config.CLASSES)[1:]
    for idx, bundle in enumerate(bundles):
        data = img[:, :, :, (idx * 2):(idx * 2) + 2] > 0

        multilabel_img = np.zeros(data.shape[:3])

        if multilabel:
            multilabel_img[data[:, :, :, 0]] = 1
            multilabel_img[data[:, :, :, 1]] = 2
        else:
            multilabel_img[data[:, :, :, 0]] = 1
            multilabel_img[data[:, :, :, 1]] = 1

        img_seg = nib.Nifti1Image(multilabel_img, affine)
        exp_utils.make_dir(join(path, "endings"))
        nib.save(img_seg, join(path, "endings", bundle + ".nii.gz"))
Пример #2
0
def save_multilabel_img_as_multiple_files_endings(classes, img, affine, path):
    bundles = dataset_specific_utils.get_bundle_names(classes)[1:]
    for idx, bundle in enumerate(bundles):
        img_seg = nib.Nifti1Image(img[:, :, :, idx], affine)
        exp_utils.make_dir(join(path, "endings_segmentations"))
        nib.save(img_seg,
                 join(path, "endings_segmentations", bundle + ".nii.gz"))
Пример #3
0
def create_preprocessed_files(subject):

    # Estimate bounding box from this file and then apply it to all other files
    bb_file = "12g_125mm_peaks"

    # todo: adapt
    # filenames_data = ["12g_125mm_peaks", "90g_125mm_peaks", "270g_125mm_peaks",
    #                   "12g_125mm_bedpostx_peaks_scaled", "90g_125mm_bedpostx_peaks_scaled",
    #                   "270g_125mm_bedpostx_peaks_scaled"]
    # filenames_seg = ["bundle_masks_72", "bundle_masks_dm", "endpoints_72_ordered",
    #                  "bundle_peaks_Part1", "bundle_peaks_Part2", "bundle_peaks_Part3", "bundle_peaks_Part4",
    #                  "bundle_masks_autoPTX_dm", "bundle_masks_autoPTX_thr001"]

    filenames_data = ["bundle_uncertainties"]
    filenames_seg = []

    print("idx: {}".format(subjects.index(subject)))
    exp_utils.make_dir(join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject))

    # Get bounding box
    data = nib.load(
        join(C.NETWORK_DRIVE, DATASET_FOLDER, subject,
             bb_file + ".nii.gz")).get_data()
    _, _, bbox, _ = data_utils.crop_to_nonzero(np.nan_to_num(data))

    for idx, filename in enumerate(filenames_data):
        path = join(C.NETWORK_DRIVE, DATASET_FOLDER, subject,
                    filename + ".nii.gz")
        if os.path.exists(path):
            img = nib.load(path)
            data = img.get_data()
            affine = img.affine
            data = np.nan_to_num(data)

            # Add channel dimension if does not exist yet
            if len(data.shape) == 3:
                data = data[..., None]

            data, _, _, _ = data_utils.crop_to_nonzero(data, bbox=bbox)

            # np.save(join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject, filename + ".npy"), data)
            nib.save(
                nib.Nifti1Image(data, affine),
                join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject,
                     filename + ".nii.gz"))
        else:
            print("skipping file: {}-{}".format(subject, idx))
            raise IOError("File missing")

    for filename in filenames_seg:
        img = nib.load(
            join(C.NETWORK_DRIVE, DATASET_FOLDER, subject,
                 filename + ".nii.gz"))
        data = img.get_data()
        data, _, _, _ = data_utils.crop_to_nonzero(data, bbox=bbox)
        # np.save(join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject, filename + ".npy"), data)
        nib.save(
            nib.Nifti1Image(data, img.affine),
            join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject,
                 filename + ".nii.gz"))
def main():

    args = sys.argv[1:]
    out_dir = args[0]

    exp_utils.make_dir(join(out_dir, "endings_segmentations"))
    exp_utils.make_dir(join(out_dir, "TOM_trackings"))

    affine = np.array([[-1., 0., 0., 90], [0., 1., 0., 126], [0., 0., 1., -72],
                       [0., 0., 0., 1.]])
    offset = np.array([90, 126, -72])
    spacing = abs(affine[0, 0])

    data = [
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
        [[0.5, 0.9, 0.5], [0.9, 0.5, 0.9], [0.5, 0.9, 0.5]],
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
    ]
    data = np.array(data)
    data[0, 0, 0] = 0.1
    data[2, 2, 2] = 0.3
    data[0, 2, 2] = 0.4
    img = nib.Nifti1Image(data, affine)
    nib.save(img, join(out_dir, "toy_FA.nii.gz"))

    mask = np.zeros((3, 3, 3))
    mask[0, 0, 0] = 1
    img = nib.Nifti1Image(mask, affine)
    nib.save(img, join(out_dir, "endings_segmentations", "toy_b.nii.gz"))

    # sl1 = np.array([[0., 0., 0.], [2., 2., 2.]])
    sl2 = np.array([[0., 2., 2.], [0., 0., 0.]])
    streamlines = [sl2]

    # Have to substract 0.5 to move from convention "0mm is in voxel corner" to convention "0mm is in voxel center"
    # We have to do this because nifti uses convention "0mm is in voxel center" (streamlines are in world space,
    # but edge of first voxel of nifti is not at 0,0,0 but at -0.5,-0.5,-0.5). If we do not apply this results
    # will be displayed incorrectly in image viewers (e.g. MITK) and dipy functions (e.g. near_roi) will give wrong results.
    streamlines = fiber_utils.add_to_each_streamline(streamlines, -0.5)

    streamlines = list(transform_streamlines(streamlines, affine))

    streamlines = fiber_utils.invert_streamlines(streamlines,
                                                 data,
                                                 affine,
                                                 axis="y")

    # This is equivalent to doing -0.5 before transforms
    #  -> Have to change sign of each axis where there is minus in affine or invert
    # streamlines = fiber_utils.add_to_each_streamline_axis(streamlines, 0.5 * spacing, axis="x")
    # streamlines = fiber_utils.add_to_each_streamline_axis(streamlines, 0.5 * spacing, axis="y")
    # streamlines = fiber_utils.add_to_each_streamline_axis(streamlines, -0.5 * spacing, axis="z")

    fiber_utils.save_streamlines_as_trk_legacy(
        join(out_dir, "TOM_trackings", "toy.trk"), streamlines, affine,
        data.shape)
Пример #5
0
def save_multilabel_img_as_multiple_files(Config,
                                          img,
                                          affine,
                                          path,
                                          name="bundle_segmentations"):
    bundles = exp_utils.get_bundle_names(Config.CLASSES)[1:]
    for idx, bundle in enumerate(bundles):
        img_seg = nib.Nifti1Image(img[:, :, :, idx], affine)
        exp_utils.make_dir(join(path, name))
        nib.save(img_seg, join(path, name, bundle + ".nii.gz"))
Пример #6
0
def create_preprocessed_files(subject):

    # todo: adapt
    # filenames_data = ["12g_125mm_peaks", "90g_125mm_peaks", "270g_125mm_peaks", "125mm_bedpostx_tensor"]
    # filenames_seg = ["bundle_masks_72", "bundle_peaks_Part1", "bundle_masks_dm",
    #                  "bundle_masks_autoPTX_dm", "bundle_masks_autoPTX_thr001"]

    # filenames_data = ["270g_125mm_peaks", "90g_125mm_peaks", "12g_125mm_peaks",
    #                   "12g_125mm_bedpostx_peaks_scaled", "90g_125mm_bedpostx_peaks_scaled",
    #                   "270g_125mm_bedpostx_peaks_scaled"]

    # filenames_data = ["270g_125mm_bedpostx_peaks_scaled", "32g_125mm_bedpostx_peaks_scaled"]
    # filenames_seg = ["bundle_masks_autoPTX_dm", "bundle_masks_autoPTX_thr001"]
    filenames_data = [
        "270g_125mm_bedpostx_peaks_scaled", "32g_125mm_bedpostx_peaks_scaled"
    ]
    filenames_seg = []

    print("idx: {}".format(subjects.index(subject)))
    exp_utils.make_dir(join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject))

    for idx, filename in enumerate(filenames_data):
        path = join(C.NETWORK_DRIVE, DATASET_FOLDER, subject,
                    filename + ".nii.gz")
        if os.path.exists(path):
            img = nib.load(path)
            data = img.get_data()
            affine = img.get_affine()
            data = np.nan_to_num(data)

            if idx == 0:
                data, _, bbox, _ = dataset_utils.crop_to_nonzero(data)
            else:
                data, _, _, _ = dataset_utils.crop_to_nonzero(data, bbox=bbox)

            # if idx > 0:
            # np.save(join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject, filename + ".npy"), data)
            nib.save(
                nib.Nifti1Image(data, affine),
                join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject,
                     filename + ".nii.gz"))
        else:
            print("skipping file: {}-{}".format(subject, idx))
            raise IOError("File missing")

    for filename in filenames_seg:
        data = nib.load(
            join(C.NETWORK_DRIVE, DATASET_FOLDER, subject,
                 filename + ".nii.gz")).get_data()
        data, _, _, _ = dataset_utils.crop_to_nonzero(data, bbox=bbox)
        # np.save(join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject, filename + ".npy"), data)
        nib.save(
            nib.Nifti1Image(data, affine),
            join(C.DATA_PATH, DATASET_FOLDER_PREPROC, subject,
                 filename + ".nii.gz"))
Пример #7
0
def save_multilabel_img_as_multiple_files_peaks(Config, img, affine, path, name="TOM"):
    bundles = exp_utils.get_bundle_names(Config.CLASSES)[1:]
    for idx, bundle in enumerate(bundles):
        data = img[:, :, :, (idx*3):(idx*3)+3]

        if Config.FLIP_OUTPUT_PEAKS:
            data[:, :, :, 2] *= -1  # flip z Axis for correct view in MITK
            filename = bundle + "_f.nii.gz"
        else:
            filename = bundle + ".nii.gz"

        img_seg = nib.Nifti1Image(data, affine)
        exp_utils.make_dir(join(path, name))
        nib.save(img_seg, join(path, name, filename))
Пример #8
0
def save_fusion_nifti_as_npy():

    #Can leave this always the same (for 270g and 32g)
    class Config:
        DATASET = "HCP"
        RESOLUTION = "1.25mm"
        FEATURES_FILENAME = "270g_125mm_peaks"
        LABELS_TYPE = np.int16
        LABELS_FILENAME = "bundle_masks"
        DATASET_FOLDER = "HCP"

    DIFFUSION_FOLDER = "32g_25mm"
    subjects = get_all_subjects()

    print("\n\nProcessing Data...")
    for s in subjects:
        print("processing data subject {}".format(s))
        start_time = time.time()
        data = nib.load(
            join(C.NETWORK_DRIVE, "HCP_fusion_" + DIFFUSION_FOLDER,
                 s + "_probmap.nii.gz")).get_data()
        print("Done Loading")
        data = np.nan_to_num(data)
        data = dataset_utils.scale_input_to_unet_shape(data, Config.DATASET,
                                                       Config.RESOLUTION)
        # cut one pixel at the end, because in scale_input_to_world_shape we ouputted 146 -> one too much at the end
        data = data[:-1, :, :-1, :]
        exp_utils.make_dir(
            join(C.NETWORK_DRIVE, "HCP_fusion_npy_" + DIFFUSION_FOLDER, s))
        np.save(
            join(C.NETWORK_DRIVE, "HCP_fusion_npy_" + DIFFUSION_FOLDER, s,
                 DIFFUSION_FOLDER + "_xyz.npy"), data)
        print("Took {}s".format(time.time() - start_time))

        print("processing seg subject {}".format(s))
        start_time = time.time()
        # seg = ImgUtils.create_multilabel_mask(Config, s, labels_type=Config.LABELS_TYPE)
        seg = nib.load(
            join(C.NETWORK_DRIVE, "HCP_for_training_COPY", s,
                 Config.LABELS_FILENAME + ".nii.gz")).get_data()
        if Config.RESOLUTION == "2.5mm":
            seg = img_utils.resize_first_three_dims(seg, order=0, zoom=0.5)
        seg = dataset_utils.scale_input_to_unet_shape(seg, Config.DATASET,
                                                      Config.RESOLUTION)
        np.save(
            join(C.NETWORK_DRIVE, "HCP_fusion_npy_" + DIFFUSION_FOLDER, s,
                 "bundle_masks.npy"), seg)
        print("Took {}s".format(time.time() - start_time))
Пример #9
0
def save_multilabel_img_as_multiple_files_peaks(flip_output_peaks,
                                                classes,
                                                img,
                                                affine,
                                                path,
                                                name="TOM"):
    bundles = dataset_specific_utils.get_bundle_names(classes)[1:]
    for idx, bundle in enumerate(bundles):
        data = img[:, :, :, (idx * 3):(idx * 3) + 3]

        if flip_output_peaks:
            data[:, :, :, 2] *= -1  # flip z Axis for correct view in MITK
            filename = bundle + "_f.nii.gz"
        else:
            filename = bundle + ".nii.gz"

        img_seg = nib.Nifti1Image(data, affine)
        exp_utils.make_dir(join(path, name))
        nib.save(img_seg, join(path, name, filename))
Пример #10
0
def peaks2fixel(peaks_file_in, fixel_dir_out):
    """
    Transform TOM peak file to mrtrix fixels format. That can then be transformed to spherical harmonics using
    fixel2sh.

    Args:
        peaks_file_in: (x,y,z,3)   (only 1 peak allowed per voxel)
        fixel_dir_out:

    Returns:
        Void
    """
    exp_utils.make_dir(fixel_dir_out)

    peaks_img = nib.load(peaks_file_in)
    peaks = peaks_img.get_data()
    s = peaks.shape

    directions = []
    index = np.zeros(list(s[:3]) + [2])
    amplitudes = []

    idx_ctr = 0
    for x in range(s[0]):
        for y in range(s[1]):
            for z in range(s[2]):
                peak = peaks[x, y, z]
                peak_len = np.linalg.norm(peak)
                if peak_len > 0:
                    peak_normalized = peak / (peak_len + 1e-20)
                    directions.append(peak_normalized)
                    amplitudes.append(peak_len)
                    index[x, y, z] = [1, idx_ctr]
                    idx_ctr += 1

    nib.save(nib.Nifti2Image(np.array(directions), np.eye(4)),
             join(fixel_dir_out, "directions.nii.gz"))
    nib.save(nib.Nifti2Image(index, peaks_img.affine),
             join(fixel_dir_out, "index.nii.gz"))
    nib.save(nib.Nifti2Image(np.array(amplitudes), np.eye(4)),
             join(fixel_dir_out, "amplitudes.nii.gz"))
Пример #11
0
def main():

    args = sys.argv[1:]
    out_dir = args[0]

    exp_utils.make_dir(join(out_dir, "endings_segmentations"))
    exp_utils.make_dir(join(out_dir, "TOM_trackings"))

    affine = np.eye(4)

    data = [
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
        [[0.5, 0.9, 0.5], [0.9, 0.5, 0.9], [0.5, 0.9, 0.5]],
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
    ]
    data = np.array(data)
    data[0, 0, 0] = 0.1
    data[2, 2, 2] = 0.3
    data[0, 2, 2] = 0.4
    img = nib.Nifti1Image(data, affine)
    nib.save(img, join(out_dir, "toy_FA.nii.gz"))

    mask = np.zeros((3, 3, 3))
    mask[0, 0, 0] = 1
    img = nib.Nifti1Image(mask, affine)
    nib.save(img, join(out_dir, "endings_segmentations", "toy_b.nii.gz"))

    sl1 = np.array([[0., 0., 0.], [2., 2., 2.]])
    sl2 = np.array([[0., 2., 2.], [0., 0., 0.]])
    streamlines = [sl1, sl2]

    # Have to substract 0.5 to move from convention "0mm is in voxel corner" to convention "0mm is in voxel center"
    # We have to do this because nifti uses convention "0mm is in voxel center" (streamlines are in world space,
    # but edge of first voxel of nifti is not at 0,0,0 but at -0.5,-0.5,-0.5). If we do not apply this results
    # will be displayed incorrectly in image viewers (e.g. MITK) and dipy functions (e.g. near_roi) will give wrong results.
    streamlines = fiber_utils.add_to_each_streamline(streamlines, -0.5)

    fiber_utils.save_streamlines_as_trk_legacy(
        join(out_dir, "TOM_trackings", "toy.trk"), streamlines, affine,
        data.shape)
Пример #12
0
def precompute_batches(custom_type=None):
    '''
    9000 slices per epoch -> 200 batches (batchsize=44) per epoch
    => 200-1000 batches needed
    '''
    class Config:
        NORMALIZE_DATA = True
        DATA_AUGMENTATION = False
        CV_FOLD = 0
        INPUT_DIM = (144, 144)
        BATCH_SIZE = 44
        DATASET_FOLDER = "HCP"
        TYPE = "single_direction"
        EXP_PATH = "~"
        LABELS_FILENAME = "bundle_peaks"
        FEATURES_FILENAME = "270g_125mm_peaks"
        DATASET = "HCP"
        RESOLUTION = "1.25mm"
        LABELS_TYPE = np.float32

    Config.TRAIN_SUBJECTS, Config.VALIDATE_SUBJECTS, Config.TEST_SUBJECTS = exp_utils.get_cv_fold(
        Config.CV_FOLD)

    num_batches_base = 5000
    num_batches = {
        "train": num_batches_base,
        "validate": int(num_batches_base / 3.),
        "test": int(num_batches_base / 3.),
    }

    if custom_type is None:
        types = ["train", "validate", "test"]
    else:
        types = [custom_type]

    for type in types:
        data_loader = DataLoaderTraining(Config)
        batch_gen = data_loader.get_batch_generator(
            batch_size=Config.BATCH_SIZE,
            type=type,
            subjects=getattr(Config,
                             type.upper() + "_SUBJECTS"))

        num_batches = num_batches[type]
        for idx in range(num_batches):
            print("Processing: {}".format(idx))
            batch = next(batch_gen)

            DATASET_DIR = "HCP_batches/270g_125mm_bundle_peaks_XYZ"
            exp_utils.make_dir(join(C.HOME, DATASET_DIR, type))

            data = nib.Nifti1Image(
                batch["data"],
                img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION))
            nib.save(
                data,
                join(C.HOME, DATASET_DIR, type,
                     "batch_" + str(idx) + "_data.nii.gz"))

            seg = nib.Nifti1Image(
                batch["seg"],
                img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION))
            nib.save(
                seg,
                join(C.HOME, DATASET_DIR, type,
                     "batch_" + str(idx) + "_seg.nii.gz"))