Example #1
0
def _orient_to_same_start_region(streamlines, beginnings):
    # (we could also use dipy.tracking.streamline.orient_by_streamline instead)
    streamlines = fiber_utils.add_to_each_streamline(streamlines, 0.5)
    streamlines_new = []
    for idx, sl in enumerate(streamlines):
        startpoint = sl[0]
        # Flip streamline if not in right order
        if beginnings[int(startpoint[0]),
                      int(startpoint[1]),
                      int(startpoint[2])] == 0:
            sl = sl[::-1, :]
        streamlines_new.append(sl)
    streamlines_new = fiber_utils.add_to_each_streamline(streamlines_new, -0.5)
    return streamlines_new
def main():

    args = sys.argv[1:]
    out_dir = args[0]

    exp_utils.make_dir(join(out_dir, "endings_segmentations"))
    exp_utils.make_dir(join(out_dir, "TOM_trackings"))

    affine = np.array([[-1., 0., 0., 90], [0., 1., 0., 126], [0., 0., 1., -72],
                       [0., 0., 0., 1.]])
    offset = np.array([90, 126, -72])
    spacing = abs(affine[0, 0])

    data = [
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
        [[0.5, 0.9, 0.5], [0.9, 0.5, 0.9], [0.5, 0.9, 0.5]],
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
    ]
    data = np.array(data)
    data[0, 0, 0] = 0.1
    data[2, 2, 2] = 0.3
    data[0, 2, 2] = 0.4
    img = nib.Nifti1Image(data, affine)
    nib.save(img, join(out_dir, "toy_FA.nii.gz"))

    mask = np.zeros((3, 3, 3))
    mask[0, 0, 0] = 1
    img = nib.Nifti1Image(mask, affine)
    nib.save(img, join(out_dir, "endings_segmentations", "toy_b.nii.gz"))

    # sl1 = np.array([[0., 0., 0.], [2., 2., 2.]])
    sl2 = np.array([[0., 2., 2.], [0., 0., 0.]])
    streamlines = [sl2]

    # Have to substract 0.5 to move from convention "0mm is in voxel corner" to convention "0mm is in voxel center"
    # We have to do this because nifti uses convention "0mm is in voxel center" (streamlines are in world space,
    # but edge of first voxel of nifti is not at 0,0,0 but at -0.5,-0.5,-0.5). If we do not apply this results
    # will be displayed incorrectly in image viewers (e.g. MITK) and dipy functions (e.g. near_roi) will give wrong results.
    streamlines = fiber_utils.add_to_each_streamline(streamlines, -0.5)

    streamlines = list(transform_streamlines(streamlines, affine))

    streamlines = fiber_utils.invert_streamlines(streamlines,
                                                 data,
                                                 affine,
                                                 axis="y")

    # This is equivalent to doing -0.5 before transforms
    #  -> Have to change sign of each axis where there is minus in affine or invert
    # streamlines = fiber_utils.add_to_each_streamline_axis(streamlines, 0.5 * spacing, axis="x")
    # streamlines = fiber_utils.add_to_each_streamline_axis(streamlines, 0.5 * spacing, axis="y")
    # streamlines = fiber_utils.add_to_each_streamline_axis(streamlines, -0.5 * spacing, axis="z")

    fiber_utils.save_streamlines_as_trk_legacy(
        join(out_dir, "TOM_trackings", "toy.trk"), streamlines, affine,
        data.shape)
Example #3
0
def main():

    args = sys.argv[1:]
    out_dir = args[0]

    exp_utils.make_dir(join(out_dir, "endings_segmentations"))
    exp_utils.make_dir(join(out_dir, "TOM_trackings"))

    affine = np.eye(4)

    data = [
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
        [[0.5, 0.9, 0.5], [0.9, 0.5, 0.9], [0.5, 0.9, 0.5]],
        [[0.9, 0.5, 0.9], [0.5, 0.9, 0.5], [0.9, 0.5, 0.9]],
    ]
    data = np.array(data)
    data[0, 0, 0] = 0.1
    data[2, 2, 2] = 0.3
    data[0, 2, 2] = 0.4
    img = nib.Nifti1Image(data, affine)
    nib.save(img, join(out_dir, "toy_FA.nii.gz"))

    mask = np.zeros((3, 3, 3))
    mask[0, 0, 0] = 1
    img = nib.Nifti1Image(mask, affine)
    nib.save(img, join(out_dir, "endings_segmentations", "toy_b.nii.gz"))

    sl1 = np.array([[0., 0., 0.], [2., 2., 2.]])
    sl2 = np.array([[0., 2., 2.], [0., 0., 0.]])
    streamlines = [sl1, sl2]

    # Have to substract 0.5 to move from convention "0mm is in voxel corner" to convention "0mm is in voxel center"
    # We have to do this because nifti uses convention "0mm is in voxel center" (streamlines are in world space,
    # but edge of first voxel of nifti is not at 0,0,0 but at -0.5,-0.5,-0.5). If we do not apply this results
    # will be displayed incorrectly in image viewers (e.g. MITK) and dipy functions (e.g. near_roi) will give wrong results.
    streamlines = fiber_utils.add_to_each_streamline(streamlines, -0.5)

    fiber_utils.save_streamlines_as_trk_legacy(
        join(out_dir, "TOM_trackings", "toy.trk"), streamlines, affine,
        data.shape)
Example #4
0
def track(peaks,
          seed_image,
          max_nr_fibers=2000,
          smooth=None,
          compress=0.1,
          bundle_mask=None,
          start_mask=None,
          end_mask=None,
          dilation=1,
          nr_cpus=-1,
          verbose=True):
    """
    Great speedup was archived by:
    - only seeding in bundle_mask instead of entire image (seeding took very long)
    - calculating fiber length on the fly instead of using extra function which has to iterate over entire fiber a
    second time

    Args:
        peaks:
        seed_image:
        max_nr_fibers:
        peak_threshold:
        smooth:
        compress:
        bundle_mask:
        start_mask:
        end_mask:
        dilation:
        nr_cpus:
        verbose:

    Returns:

    """
    peaks[:, :, :, 0] *= -1  # how to flip along x axis to work properly
    if dilation > 0:
        # Add +1 dilation for start and end mask to be more robust
        start_mask = binary_dilation(start_mask,
                                     iterations=dilation + 1).astype(np.uint8)
        end_mask = binary_dilation(end_mask,
                                   iterations=dilation + 1).astype(np.uint8)
        bundle_mask = binary_dilation(bundle_mask,
                                      iterations=dilation).astype(np.uint8)

    global _PEAKS
    _PEAKS = peaks
    global _BUNDLE_MASK
    _BUNDLE_MASK = bundle_mask
    global _START_MASK
    _START_MASK = start_mask
    global _END_MASK
    _END_MASK = end_mask

    # Get list of coordinates of each voxel in mask to seed from those
    mask_coords = np.array(np.where(bundle_mask == 1)).transpose()
    nr_voxels = mask_coords.shape[0]
    spacing = seed_image.header.get_zooms()[0]

    max_nr_seeds = 250 * max_nr_fibers  # after how many seeds to abort (to avoid endless runtime)
    # How many seeds to process in each pool.map iteration
    seeds_per_batch = 5000

    if nr_cpus == -1:
        nr_processes = psutil.cpu_count()
    else:
        nr_processes = nr_cpus

    streamlines = []
    fiber_ctr = 0
    seed_ctr = 0
    # Processing seeds in batches to we can stop after we reached desired nr of streamlines. Not ideal. Could be
    #   optimised if more familiar with multiprocessing.
    while fiber_ctr < max_nr_fibers:
        pool = multiprocessing.Pool(processes=nr_processes)
        streamlines_tmp = pool.map(
            partial(process_seedpoint, spacing=spacing),
            seed_generator(mask_coords, seeds_per_batch))
        # streamlines_tmp = [process_seedpoint(seed, spacing=spacing) for seed in
        #                    seed_generator(mask_coords, seeds_per_batch)] # single threaded for debug
        pool.close()
        pool.join()

        streamlines_tmp = [sl for sl in streamlines_tmp
                           if len(sl) > 0]  # filter empty
        streamlines += streamlines_tmp
        fiber_ctr = len(streamlines)
        if verbose:
            print("nr_fibs: {}".format(fiber_ctr))
        seed_ctr += seeds_per_batch
        if seed_ctr > max_nr_seeds:
            if verbose:
                print("Early stopping because max nr of seeds reached.")
            break

    if verbose:
        print("final nr streamlines: {}".format(len(streamlines)))

    streamlines = streamlines[:
                              max_nr_fibers]  # remove surplus of fibers (comes from multiprocessing)
    streamlines = Streamlines(streamlines)  # Generate streamlines object

    # Move from origin being at the edge of the voxel to the origin being at the center of the voxel. Otherwise
    # tractogram and mask do not perfectly align when viewing in MITK, but are slightly offset.
    # We can still see a few fibers a little bit outside of mask because of big step size (no resegmenting done).
    streamlines = fiber_utils.add_to_each_streamline(streamlines, -0.5)

    # move streamlines to coordinate space
    streamlines = list(
        move_streamlines(streamlines, output_space=seed_image.get_affine()))

    if smooth:
        streamlines = fiber_utils.smooth_streamlines(streamlines,
                                                     smoothing_factor=smooth)

    if compress:
        streamlines = fiber_utils.compress_streamlines(streamlines,
                                                       error_threshold=0.1,
                                                       nr_cpus=nr_cpus)

    return streamlines
Example #5
0
def plot_bundles_with_metric(bundle_path,
                             endings_path,
                             brain_mask_path,
                             bundle,
                             metrics,
                             output_path,
                             tracking_format="trk_legacy",
                             show_color_bar=True):
    import seaborn as sns  # import in function to avoid error if not installed (this is only needed in this function)
    from dipy.viz import actor, window
    from tractseg.libs import vtk_utils

    def _add_extra_point_to_last_streamline(sl):
        # Coloring broken as soon as all streamlines have same number of points -> why???
        # Add one number to last streamline to make it have a different number
        sl[-1] = np.append(sl[-1], [sl[-1][-1]], axis=0)
        return sl

    # Settings
    NR_SEGMENTS = 100
    ANTI_INTERPOL_MULT = 1  # increase number of points to avoid interpolation to blur the colors
    algorithm = "distance_map"  # equal_dist | distance_map | cutting_plane
    # colors = np.array(sns.color_palette("coolwarm", NR_SEGMENTS))  # colormap blue to red (does not fit to colorbar)
    colors = np.array(sns.light_palette(
        "red", NR_SEGMENTS))  # colormap only red, which fits to color_bar
    img_size = (1000, 1000)

    # Tractometry skips first and last element. Therefore we only have 98 instead of 100 elements.
    # Here we duplicate the first and last element to get back to 100 elements
    metrics = list(metrics)
    metrics = np.array([metrics[0]] + metrics + [metrics[-1]])

    metrics_max = metrics.max()
    metrics_min = metrics.min()
    if metrics_max == metrics_min:
        metrics = np.zeros(len(metrics))
    else:
        metrics = img_utils.scale_to_range(
            metrics,
            range=(0, 99))  # range needs to be same as segments in colormap

    orientation = dataset_specific_utils.get_optimal_orientation_for_bundle(
        bundle)

    # Load mask
    beginnings_img = nib.load(endings_path)
    beginnings = beginnings_img.get_data()
    for i in range(1):
        beginnings = binary_dilation(beginnings)

    # Load trackings
    if tracking_format == "trk_legacy":
        streams, hdr = trackvis.read(bundle_path)
        streamlines = [s[0] for s in streams]
    else:
        sl_file = nib.streamlines.load(bundle_path)
        streamlines = sl_file.streamlines

    # Reduce streamline count
    streamlines = streamlines[::2]

    # Reorder to make all streamlines have same start region
    streamlines = fiber_utils.add_to_each_streamline(streamlines, 0.5)
    streamlines_new = []
    for idx, sl in enumerate(streamlines):
        startpoint = sl[0]
        # Flip streamline if not in right order
        if beginnings[int(startpoint[0]),
                      int(startpoint[1]),
                      int(startpoint[2])] == 0:
            sl = sl[::-1, :]
        streamlines_new.append(sl)
    streamlines = fiber_utils.add_to_each_streamline(streamlines_new, -0.5)

    if algorithm == "distance_map" or algorithm == "equal_dist":
        streamlines = fiber_utils.resample_fibers(
            streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT)
    elif algorithm == "cutting_plane":
        streamlines = fiber_utils.resample_to_same_distance(
            streamlines,
            max_nr_points=NR_SEGMENTS,
            ANTI_INTERPOL_MULT=ANTI_INTERPOL_MULT)

    # Cut start and end by percentage
    # streamlines = FiberUtils.resample_fibers(streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT)
    # remove = int((NR_SEGMENTS * ANTI_INTERPOL_MULT) * 0.15)  # remove X% in beginning and end
    # streamlines = np.array(streamlines)[:, remove:-remove, :]
    # streamlines = list(streamlines)

    if algorithm == "equal_dist":
        segment_idxs = []
        for i in range(len(streamlines)):
            segment_idxs.append(list(range(NR_SEGMENTS * ANTI_INTERPOL_MULT)))
        segment_idxs = np.array(segment_idxs)

    elif algorithm == "distance_map":
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines)
        centroids = Streamlines(clusters.centroids)
        _, segment_idxs = cKDTree(centroids.data, 1,
                                  copy_data=True).query(streamlines, k=1)

    elif algorithm == "cutting_plane":
        streamlines_resamp = fiber_utils.resample_fibers(
            streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT)
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines_resamp)
        centroid = Streamlines(clusters.centroids)[0]
        # index of the middle cluster
        middle_idx = int(NR_SEGMENTS / 2) * ANTI_INTERPOL_MULT
        middle_point = centroid[middle_idx]
        segment_idxs = fiber_utils.get_idxs_of_closest_points(
            streamlines, middle_point)
        # Align along the middle and assign indices
        segment_idxs_eqlen = []
        for idx, sl in enumerate(streamlines):
            sl_middle_pos = segment_idxs[idx]
            before_elems = sl_middle_pos
            after_elems = len(sl) - sl_middle_pos
            base_idx = 1000  # use higher index to avoid negative numbers for area below middle
            r = range((base_idx - before_elems), (base_idx + after_elems))
            segment_idxs_eqlen.append(r)
        segment_idxs = segment_idxs_eqlen

    # Add extra point otherwise coloring BUG
    streamlines = _add_extra_point_to_last_streamline(streamlines)

    renderer = window.Renderer()
    colors_all = []  # final shape will be [nr_streamlines, nr_points, 3]
    for jdx, sl in enumerate(streamlines):
        colors_sl = []
        for idx, p in enumerate(sl):
            if idx >= len(segment_idxs[jdx]):
                seg_idx = segment_idxs[jdx][idx - 1]
            else:
                seg_idx = segment_idxs[jdx][idx]

            m = metrics[int(seg_idx / ANTI_INTERPOL_MULT)]
            color = colors[int(m)]
            colors_sl.append(color)
        colors_all.append(
            colors_sl
        )  # this can not be converted to numpy array because last element has one more elem

    sl_actor = actor.streamtube(streamlines,
                                colors=colors_all,
                                linewidth=0.2,
                                opacity=1)
    renderer.add(sl_actor)

    # plot brain mask
    mask = nib.load(brain_mask_path).get_data()
    cont_actor = vtk_utils.contour_from_roi_smooth(
        mask,
        affine=beginnings_img.affine,
        color=[.9, .9, .9],
        opacity=.2,
        smoothing=50)
    renderer.add(cont_actor)

    if show_color_bar:
        lut_cmap = actor.colormap_lookup_table(scale_range=(metrics_min,
                                                            metrics_max),
                                               hue_range=(0.0, 0.0),
                                               saturation_range=(0.0, 1.0))
        renderer.add(actor.scalar_bar(lut_cmap))

    if orientation == "sagittal":
        renderer.set_camera(position=(-412.95, -34.38, 80.15),
                            focal_point=(102.46, -16.96, -11.71),
                            view_up=(0.1806, 0.0, 0.9835))
    elif orientation == "coronal":
        renderer.set_camera(position=(-48.63, 360.31, 98.37),
                            focal_point=(-20.16, 92.89, 36.02),
                            view_up=(-0.0047, -0.2275, 0.9737))
    elif orientation == "axial":
        pass
    else:
        raise ValueError("Invalid orientation provided")

    # Use this to interatively get new camera angle
    # window.show(renderer, size=img_size, reset_camera=False)
    # print(renderer.get_camera())

    window.record(renderer, out_path=output_path, size=img_size)
Example #6
0
def track(peaks,
          seed_image,
          max_nr_fibers=2000,
          smooth=None,
          compress=0.1,
          bundle_mask=None,
          start_mask=None,
          end_mask=None,
          tracking_uncertainties=None,
          dilation=0,
          next_step_displacement_std=0.15,
          nr_cpus=-1,
          verbose=True):
    """
    Generate streamlines.

    Great speedup was archived by:
    - only seeding in bundle_mask instead of entire image (seeding took very long)
    - calculating fiber length on the fly instead of using extra function which has to iterate over entire fiber a
    second time
    """

    peaks[:, :, :, 0] *= -1  # how to flip along x axis to work properly
    # Add +1 dilation for start and end mask to be more robust
    start_mask = binary_dilation(start_mask,
                                 iterations=dilation + 1).astype(np.uint8)
    end_mask = binary_dilation(end_mask,
                               iterations=dilation + 1).astype(np.uint8)
    if dilation > 0:
        bundle_mask = binary_dilation(bundle_mask,
                                      iterations=dilation).astype(np.uint8)

    if tracking_uncertainties is not None:
        tracking_uncertainties = img_utils.scale_to_range(
            tracking_uncertainties, range=(0, 1))

    global _PEAKS
    _PEAKS = peaks
    global _BUNDLE_MASK
    _BUNDLE_MASK = bundle_mask
    global _START_MASK
    _START_MASK = start_mask
    global _END_MASK
    _END_MASK = end_mask
    global _TRACKING_UNCERTAINTIES
    _TRACKING_UNCERTAINTIES = tracking_uncertainties

    # Get list of coordinates of each voxel in mask to seed from those
    mask_coords = np.array(np.where(bundle_mask == 1)).transpose()
    spacing = seed_image.header.get_zooms()[0]

    max_nr_seeds = 100 * max_nr_fibers  # after how many seeds to abort (to avoid endless runtime)
    # How many seeds to process in each pool.map iteration
    seeds_per_batch = 5000

    if nr_cpus == -1:
        nr_processes = psutil.cpu_count()
    else:
        nr_processes = nr_cpus

    streamlines = []
    fiber_ctr = 0
    seed_ctr = 0
    # Processing seeds in batches so we can stop after we reached desired nr of streamlines. Not ideal. Could be
    #   optimised by more multiprocessing fanciness.
    while fiber_ctr < max_nr_fibers:
        pool = multiprocessing.Pool(processes=nr_processes)
        streamlines_tmp = pool.map(
            partial(process_seedpoint,
                    next_step_displacement_std=next_step_displacement_std,
                    spacing=spacing),
            seed_generator(mask_coords, seeds_per_batch))
        # streamlines_tmp = [process_seedpoint(seed, spacing=spacing) for seed in
        #                    seed_generator(mask_coords, seeds_per_batch)] # single threaded for debugging
        pool.close()
        pool.join()

        streamlines_tmp = [sl for sl in streamlines_tmp
                           if len(sl) > 0]  # filter empty ones
        streamlines += streamlines_tmp
        fiber_ctr = len(streamlines)
        if verbose:
            print("nr_fibs: {}".format(fiber_ctr))
        seed_ctr += seeds_per_batch
        if seed_ctr > max_nr_seeds:
            if verbose:
                print("Early stopping because max nr of seeds reached.")
            break

    if verbose:
        print("final nr streamlines: {}".format(len(streamlines)))

    streamlines = streamlines[:
                              max_nr_fibers]  # remove surplus of fibers (comes from multiprocessing)
    streamlines = Streamlines(streamlines)  # Generate streamlines object

    # Move from convention "0mm is in voxel corner" to convention "0mm is in voxel center". Most toolkits use the
    # convention "0mm is in voxel center".
    streamlines = fiber_utils.add_to_each_streamline(streamlines, -0.5)

    # move streamlines to coordinate space
    #  This is doing: streamlines(coordinate_space) = affine * streamlines(voxel_space)
    streamlines = list(transform_streamlines(streamlines, seed_image.affine))

    # Smoothing does not change overall results at all because is just little smoothing. Just removes small unevenness.
    if smooth:
        streamlines = fiber_utils.smooth_streamlines(streamlines,
                                                     smoothing_factor=smooth)

    if compress:
        streamlines = fiber_utils.compress_streamlines(streamlines,
                                                       error_threshold=0.1,
                                                       nr_cpus=nr_cpus)

    return streamlines