Beispiel #1
0
def visualize(fibers, outf=None):
    """
    Takes fiber streamlines and visualizes them using DiPy

    Required Arguments:
        - fibers:
            fiber streamlines in a list as returned by DiPy
    Optional Arguments:
        - save:
            flag indicating whether or not you want the image saved
            to disk after being displayed
    """
    # Initialize renderer
    renderer = window.Renderer()

    # Add streamlines as a DiPy viz object
    stream_actor = actor.line(fibers)

    # Set camera orientation properties
    # TODO: allow this as an argument
    renderer.set_camera()  # args are: position=(), focal_point=(), view_up=()

    # Add streamlines to viz session
    renderer.add(stream_actor)

    # Display fibers
    # TODO: allow size of window as an argument
    window.show(renderer, size=(600, 600), reset_camera=False)

    # Saves file, if you're into that sort of thing...
    if outf is not None:
        window.record(renderer, out_path=outf, size=(600, 600))
def simple_viewer(streamlines, vol, affine):

    from dipy.viz import actor, window

    renderer = window.Renderer()
    renderer.add(actor.line(streamlines))
    renderer.add(actor.slicer(vol, affine))
    window.show(renderer)
Beispiel #3
0
def test_streamtube_and_line_actors():
    renderer = window.renderer()

    line1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
    line2 = line1 + np.array([0.5, 0., 0.])

    lines = [line1, line2]
    colors = np.array([[1, 0, 0], [0, 0, 1.]])
    c = actor.line(lines, colors, linewidth=3)
    window.add(renderer, c)

    c = actor.line(lines, colors, spline_subdiv=5, linewidth=3)
    window.add(renderer, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = actor.streamtube(lines, colors, linewidth=.1)
    c2.SetPosition(2, 0, 0)
    window.add(renderer, c2)

    arr = window.snapshot(renderer)

    report = window.analyze_snapshot(arr,
                                     colors=[(255, 0, 0), (0, 0, 255)],
                                     find_objects=True)

    npt.assert_equal(report.objects, 4)
    npt.assert_equal(report.colors_found, [True, True])

    # as before with splines
    c2 = actor.streamtube(lines, colors, spline_subdiv=5, linewidth=.1)
    c2.SetPosition(2, 0, 0)
    window.add(renderer, c2)

    arr = window.snapshot(renderer)

    report = window.analyze_snapshot(arr,
                                     colors=[(255, 0, 0), (0, 0, 255)],
                                     find_objects=True)

    npt.assert_equal(report.objects, 4)
    npt.assert_equal(report.colors_found, [True, True])
Beispiel #4
0
def visualize_fibs(fibs, fibfile, atlasfile, outdir, opacity, num_samples):
    """
    Takes fiber streamlines and visualizes them using DiPy
    Required Arguments:
        - fibfile: Path to fiber file
        - atlasfile: Path to atlas file
        - outdir: Path to output directory
        - opacity: Opacity of overlayed brain
        - num_samples: number of fibers to randomly sample from fibfile
    Optional Arguments:
    """
    try:
        import vtk
        print("VTK found - beginning fiber QA.")
    except ImportError:
        print("!! VTK not found; skipping fiber QA.")
        return

    # loading the fibers
    fibs = threshold_fibers(fibs)

    # make sure if fiber streamlines
    # have no fibers, no error occurs
    if len(fibs) == 0:
        return
    # randomly sample num_samples fibers from given fibers
    resampled_fibs = random_sample(fibs, num_samples)

    # load atlas file
    atlas_volume = load_atlas(atlasfile, opacity)

    # Initialize renderer
    renderer = window.Renderer()
    renderer.SetBackground(1.0, 1.0, 1.0)

    # Add streamlines as a DiPy viz object
    stream_actor = actor.line(fibs)

    # Set camera orientation properties
    # TODO: allow this as an argument
    renderer.set_camera()  # args are: position=(), focal_point=(), view_up=()

    # Add streamlines to viz session
    renderer.add(stream_actor)
    renderer.add(atlas_volume)

    # Display fibers
    # TODO: allow size of window as an argument
    # window.show(renderer, size=(600, 600), reset_camera=False)

    fname = os.path.split(fibfile)[1].split('.')[0] + '.png'
    window.record(renderer, out_path=outdir + fname, size=(600, 600))
def show_bundles(bundles, colors=None, size=(1080, 600),
                 show=False, fname=None):

    ren = window.Renderer()
    ren.background((1., 1, 1))

    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines = actor.line(bundle, color, linewidth=1.5)
        ren.add(lines)

    ren.reset_clipping_range()
    ren.reset_camera()

    # if show:
    window.show(ren, size=size, reset_camera=True)

    if fname is not None:
        window.record(ren, n_frames=1, out_path=fname, size=size)
Beispiel #6
0
    def pick_callback(obj, event):
        global centroid_actors
        global picked_actors

        prop = obj.GetProp3D()

        ac = np.array(centroid_actors)
        index = np.where(ac == prop)[0]

        if len(index) > 0:
            try:
                bundle = picked_actors[prop]
                ren.rm(bundle)
                del picked_actors[prop]
            except:
                bundle = actor.line(clusters[visible_cluster_id[index]],
                                    lod=False)
                picked_actors[prop] = bundle
                ren.add(bundle)

        if prop in picked_actors.values():
            ren.rm(prop)
Beispiel #7
0
    def build_scene(self):

        scene = window.Renderer()
        for (t, streamlines) in enumerate(self.tractograms):
            if self.random_colors:
                colors = self.prng.random_sample(3)
            else:
                colors = None

            if self.cluster:

                print(' Clustering threshold {} \n'.format(self.cluster_thr))
                clusters = qbx_and_merge(streamlines,
                                         [40, 30, 25, 20, self.cluster_thr])
                self.tractogram_clusters[t] = clusters
                centroids = clusters.centroids
                print(' Number of centroids is {}'.format(len(centroids)))
                sizes = np.array([len(c) for c in clusters])
                linewidths = np.interp(sizes,
                                       [sizes.min(), sizes.max()], [0.1, 2.])
                centroid_lengths = np.array([length(c) for c in centroids])

                print(' Minimum number of streamlines in cluster {}'
                      .format(sizes.min()))

                print(' Maximum number of streamlines in cluster {}'
                      .format(sizes.max()))

                print(' Construct cluster actors')
                for (i, c) in enumerate(centroids):

                    centroid_actor = actor.streamtube([c], colors,
                                                      linewidth=linewidths[i],
                                                      lod=False)
                    scene.add(centroid_actor)

                    cluster_actor = actor.line(clusters[i],
                                               lod=False)
                    cluster_actor.GetProperty().SetRenderLinesAsTubes(1)
                    cluster_actor.GetProperty().SetLineWidth(6)
                    cluster_actor.GetProperty().SetOpacity(1)
                    cluster_actor.VisibilityOff()

                    scene.add(cluster_actor)

                    # Every centroid actor (cea) is paired to a cluster actor
                    # (cla).

                    self.cea[centroid_actor] = {
                        'cluster_actor': cluster_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0, 'expanded': 0}

                    self.cla[cluster_actor] = {
                        'centroid_actor': centroid_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0}
                    apply_shader(self, cluster_actor)
                    apply_shader(self, centroid_actor)

            else:

                streamline_actor = actor.line(streamlines, colors=colors)
                streamline_actor.GetProperty().SetEdgeVisibility(1)
                streamline_actor.GetProperty().SetRenderLinesAsTubes(1)
                streamline_actor.GetProperty().SetLineWidth(6)
                streamline_actor.GetProperty().SetOpacity(1)
                scene.add(streamline_actor)
        return scene
detmax_dg = DeterministicMaximumDirectionGetter.from_shcoeff(
    csd_fit.shm_coeff, max_angle=30., sphere=default_sphere)
streamline_generator = LocalTracking(detmax_dg,
                                     stopping_criterion,
                                     seeds,
                                     affine,
                                     step_size=.5)
streamlines = Streamlines(streamline_generator)

sft = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
save_trk(sft, "tractogram_deterministic_dg.trk")

if has_fury:
    r = window.Renderer()
    r.add(actor.line(streamlines, colormap.line_colors(streamlines)))
    window.record(r,
                  out_path='tractogram_deterministic_dg.png',
                  size=(800, 800))
    if interactive:
        window.show(r)
"""
.. figure:: tractogram_deterministic_dg.png
   :align: center

   **Corpus Callosum using deterministic maximum direction getter**
"""
"""
.. include:: ../links_names.inc

"""
from dipy.tracking.streamline import Streamlines
from dipy.data import small_sphere

boot_dg_csd = BootDirectionGetter.from_data(data,
                                            csd_model,
                                            max_angle=30.,
                                            sphere=small_sphere)
boot_streamline_generator = LocalTracking(boot_dg_csd,
                                          classifier,
                                          seeds,
                                          affine,
                                          step_size=.5)
streamlines = Streamlines(boot_streamline_generator)

renderer.clear()
renderer.add(actor.line(streamlines, line_colors(streamlines)))
window.record(renderer, out_path='bootstrap_dg_CSD.png', size=(600, 600))
"""
.. figure:: bootstrap_dg_CSD.png
   :align: center

   **Corpus Callosum Bootstrap Probabilistic Direction Getter**

We have created a bootstrapped probabilistic set of streamlines. If you repeat
the fiber tracking (keeping all inputs the same) you will NOT get exactly the
same set of streamlines. We can save the streamlines as a Trackvis file so it
can be loaded into other software for visualization or further analysis.
"""

save_trk("bootstrap_dg_CSD.trk", streamlines, affine, labels.shape)
"""
    return roi_actor_list


roi, roi_affine = set_viz_roi(roi_img, mask=True)
roi1, roi1_affine = set_viz_roi(roi1_img, mask=True)
roi_actor = create_roi_actor(roi, roi_affine)
roi1_actor = create_roi_actor(roi1, roi1_affine)

# roi_viz, roi_viz_affine = set_viz_roi(roi_vis)
# roi_viz_actor = create_roi_actor(roi_viz, roi_viz_affine)

streamlines = select_by_vol_roi(streamlines, roi[0], roi_img.affine)
print len(streamlines)
# create a rendering renderer
ren = window.Renderer()
stream_actor = actor.line(streamlines)
stream_init_actor = actor.line(streamlines, (0.0, 1.0, 0.0))
vol = nib.load(vol_file)


def create_image_actor(vol, opacity=0.6):
    data = vol.get_data()
    shape = vol.shape
    affine = vol.affine

    image_actor_z = actor.slicer(data, affine)
    slicer_opacity = opacity
    image_actor_z.opacity(slicer_opacity)
    image_actor_x = image_actor_z.copy()
    image_actor_x.opacity(slicer_opacity)
    x_midpoint = int(np.round(shape[0] / 2))
Beispiel #11
0
"""

# Make a corpus callosum seed mask for tracking
seed_mask = labels == 2
seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

# Make a streamline bundle model of the corpus callosum ROI connectivity
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                            step_size=2)
streamlines = Streamlines(streamlines)

# Visualize the streamlines and the Path Length Map base ROI
# (in this case also the seed ROI)

streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))
surface_opacity = 0.5
surface_color = [0, 1, 1]
seedroi_actor = actor.contour_from_roi(seed_mask, affine,
                                       surface_color, surface_opacity)

ren = window.Renderer()
ren.add(streamlines_actor)
ren.add(seedroi_actor)

"""
If you set interactive to True (below), the rendering will pop up in an
interactive window.
"""

interactive = False
from dipy.tracking.streamline import Streamlines

# Enables/disables interactive visualization
interactive = False

# Initialization of LocalTracking. The computation happens in the next step.
streamlines_generator = LocalTracking(csa_peaks, classifier, seeds, affine, step_size=.5)

# Generate streamlines object
streamlines = Streamlines(streamlines_generator)

# Prepare the display objects.
color = line_colors(streamlines)

if window.have_vtk:
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)

    # Save still images for this static example. Or for interactivity use
    window.record(r, n_frames=1, out_path='deterministic.png', size=(800, 800))
    if interactive:
        window.show(r)

"""
.. figure:: deterministic.png
   :align: center

   **Corpus Callosum Deterministic**
Beispiel #13
0
def fiber_simple_3d_show_advanced(img, streamlines, colors=None, linewidth=1):

    streamlines = streamlines
    data = img.get_data()
    shape = img.shape
    affine = img.affine
    """
    With our current design it is easy to decide in which space you want the
    streamlines and slices to appear. The default we have here is to appear in
    world coordinates (RAS 1mm).
    """

    world_coords = True
    """
    If we want to see the objects in native space we need to make sure that all
    objects which are currently in world coordinates are transformed back to
    native space using the inverse of the affine.
    """

    if not world_coords:
        from dipy.tracking.streamline import transform_streamlines
        streamlines = transform_streamlines(streamlines, np.linalg.inv(affine))
    """
    Now we create, a ``Renderer`` object and add the streamlines using the ``line``
    function and an image plane using the ``slice`` function.
    """

    ren = window.Renderer()
    stream_actor = actor.line(streamlines, colors=colors, linewidth=linewidth)

    if not world_coords:
        image_actor_z = actor.slicer(data, affine=np.eye(4))
    else:
        image_actor_z = actor.slicer(data, affine)
    """
    We can also change also the opacity of the slicer.
    """

    slicer_opacity = 0.6
    image_actor_z.opacity(slicer_opacity)
    """
    We can add additonal slicers by copying the original and adjusting the
    ``display_extent``.
    """

    image_actor_x = image_actor_z.copy()
    image_actor_x.opacity(slicer_opacity)
    x_midpoint = int(np.round(shape[0] / 2))
    image_actor_x.display_extent(x_midpoint, x_midpoint, 0, shape[1] - 1, 0,
                                 shape[2] - 1)

    image_actor_y = image_actor_z.copy()
    image_actor_y.opacity(slicer_opacity)
    y_midpoint = int(np.round(shape[1] / 2))
    image_actor_y.display_extent(0, shape[0] - 1, y_midpoint, y_midpoint, 0,
                                 shape[2] - 1)
    """
    Connect the actors with the Renderer.
    """

    ren.add(stream_actor)
    ren.add(image_actor_z)
    ren.add(image_actor_x)
    ren.add(image_actor_y)
    """
    Now we would like to change the position of each ``image_actor`` using a
    slider. The sliders are widgets which require access to different areas of the
    visualization pipeline and therefore we don't recommend using them with
    ``show``. The more appropriate way is to use them with the ``ShowManager``
    object which allows accessing the pipeline in different areas. Here is how:
    """

    show_m = window.ShowManager(ren, size=(1200, 900))
    show_m.initialize()
    """
    After we have initialized the ``ShowManager`` we can go ahead and create
    sliders to move the slices and change their opacity.
    """

    line_slider_z = ui.LineSlider2D(min_value=0,
                                    max_value=shape[2] - 1,
                                    initial_value=shape[2] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    line_slider_x = ui.LineSlider2D(min_value=0,
                                    max_value=shape[0] - 1,
                                    initial_value=shape[0] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    line_slider_y = ui.LineSlider2D(min_value=0,
                                    max_value=shape[1] - 1,
                                    initial_value=shape[1] / 2,
                                    text_template="{value:.0f}",
                                    length=140)

    opacity_slider = ui.LineSlider2D(min_value=0.0,
                                     max_value=1.0,
                                     initial_value=slicer_opacity,
                                     length=140)
    """
    Now we will write callbacks for the sliders and register them.
    """
    def change_slice_z(i_ren, obj, slider):
        z = int(np.round(slider.value))
        image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z)

    def change_slice_x(i_ren, obj, slider):
        x = int(np.round(slider.value))
        image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1)

    def change_slice_y(i_ren, obj, slider):
        y = int(np.round(slider.value))
        image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1)

    def change_opacity(i_ren, obj, slider):
        slicer_opacity = slider.value
        image_actor_z.opacity(slicer_opacity)
        image_actor_x.opacity(slicer_opacity)
        image_actor_y.opacity(slicer_opacity)

    line_slider_z.add_callback(line_slider_z.slider_disk, "MouseMoveEvent",
                               change_slice_z)
    line_slider_x.add_callback(line_slider_x.slider_disk, "MouseMoveEvent",
                               change_slice_x)
    line_slider_y.add_callback(line_slider_y.slider_disk, "MouseMoveEvent",
                               change_slice_y)
    opacity_slider.add_callback(opacity_slider.slider_disk, "MouseMoveEvent",
                                change_opacity)
    """
    We'll also create text labels to identify the sliders.
    """

    def build_label(text):
        label = ui.TextBlock2D()
        label.message = text
        label.font_size = 18
        label.font_family = 'Arial'
        label.justification = 'left'
        label.bold = False
        label.italic = False
        label.shadow = False
        # label.actor.GetTextProperty().SetBackgroundColor(0, 0, 0)
        # label.actor.GetTextProperty().SetBackgroundOpacity(0.0)
        label.color = (1, 1, 1)

        return label

    line_slider_label_z = build_label(text="Z Slice")
    line_slider_label_x = build_label(text="X Slice")
    line_slider_label_y = build_label(text="Y Slice")
    opacity_slider_label = build_label(text="Opacity")
    """
    Now we will create a ``panel`` to contain the sliders and labels.
    """

    panel = ui.Panel2D(center=(1030, 120),
                       size=(300, 200),
                       color=(1, 1, 1),
                       opacity=0.1,
                       align="right")

    panel.add_element(line_slider_label_x, 'relative', (0.1, 0.75))
    panel.add_element(line_slider_x, 'relative', (0.65, 0.8))
    panel.add_element(line_slider_label_y, 'relative', (0.1, 0.55))
    panel.add_element(line_slider_y, 'relative', (0.65, 0.6))
    panel.add_element(line_slider_label_z, 'relative', (0.1, 0.35))
    panel.add_element(line_slider_z, 'relative', (0.65, 0.4))
    panel.add_element(opacity_slider_label, 'relative', (0.1, 0.15))
    panel.add_element(opacity_slider, 'relative', (0.65, 0.2))

    show_m.ren.add(panel)
    """
    Then, we can render all the widgets and everything else in the screen and
    start the interaction using ``show_m.start()``.


    However, if you change the window size, the panel will not update its position
    properly. The solution to this issue is to update the position of the panel
    using its ``re_align`` method every time the window size changes.
    """

    global size
    size = ren.GetSize()

    def win_callback(obj, event):
        global size
        if size != obj.GetSize():
            size_old = size
            size = obj.GetSize()
            size_change = (size[0] - size_old[0], 0)
            panel.re_align(size_change)

    show_m.initialize()
    """
    Finally, please set the following variable to ``True`` to interact with the
    datasets in 3D.
    """

    interactive = True  #False

    ren.zoom(1.5)
    ren.reset_clipping_range()

    if interactive:

        show_m.add_window_callback(win_callback)
        show_m.render()
        show_m.start()

    else:

        window.record(ren,
                      out_path='test_1.png',
                      size=(1200, 900),
                      reset_camera=False)
    """
    .. figure:: bundles_and_3_slices.png
       :align: center

       A few bundles with interactive slicing.
    """

    del show_m
    """
         vox_size=np.array([2., 2., 2.]),
         shape=csapeaks.gfa.shape[:3])

"""
Visualize the streamlines with `dipy.viz` module (python vtk is required).
"""

from dipy.viz import window, actor
from dipy.viz.colormap import line_colors

# Enables/disables interactive visualization
interactive = False

ren = window.Renderer()

ren.add(actor.line(csa_streamlines, line_colors(csa_streamlines)))

print('Saving illustration as tensor_tracks.png')

window.record(ren, out_path='csa_tracking.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: csa_tracking.png
   :align: center

   Deterministic streamlines with EuDX on ODF peaks field modulated by GFA.

It is also possible to use EuDX with multiple ODF peaks, which is very helpful when
tracking in crossing areas.
Beispiel #15
0
transform it into native image coordinates so that it is in the same coordinate
space as the ``fa`` image.
"""

bundle_native = transform_streamlines(bundle, np.linalg.inv(affine))

"""
Show every streamline with an orientation color
===============================================

This is the default option when you are using ``line`` or ``streamtube``.
"""

renderer = window.Renderer()

stream_actor = actor.line(bundle_native)

renderer.set_camera(position=(-176.42, 118.52, 128.20),
                    focal_point=(113.30, 128.31, 76.56),
                    view_up=(0.18, 0.00, 0.98))

renderer.add(stream_actor)

# Uncomment the line below to show to display the window
# window.show(renderer, size=(600, 600), reset_camera=False)
window.record(renderer, out_path='bundle1.png', size=(600, 600))

"""
.. figure:: bundle1.png
   :align: center
Beispiel #16
0
    def build_scene(self):

        scene = window.Renderer()
        for (t, streamlines) in enumerate(self.tractograms):
            if self.random_colors:
                colors = self.prng.random_sample(3)
            else:
                colors = None

            if self.cluster:

                print(' Clustering threshold {} \n'.format(self.cluster_thr))
                clusters = qbx_and_merge(streamlines,
                                         [40, 30, 25, 20, self.cluster_thr])
                self.tractogram_clusters[t] = clusters
                centroids = clusters.centroids
                print(' Number of centroids is {}'.format(len(centroids)))
                sizes = np.array([len(c) for c in clusters])
                linewidths = np.interp(sizes,
                                       [sizes.min(), sizes.max()], [0.1, 2.])
                centroid_lengths = np.array([length(c) for c in centroids])

                print(' Minimum number of streamlines in cluster {}'
                      .format(sizes.min()))

                print(' Maximum number of streamlines in cluster {}'
                      .format(sizes.max()))

                print(' Construct cluster actors')
                for (i, c) in enumerate(centroids):

                    centroid_actor = actor.streamtube([c], colors,
                                                      linewidth=linewidths[i],
                                                      lod=False)
                    scene.add(centroid_actor)

                    cluster_actor = actor.line(clusters[i],
                                               lod=False)
                    cluster_actor.GetProperty().SetRenderLinesAsTubes(1)
                    cluster_actor.GetProperty().SetLineWidth(6)
                    cluster_actor.GetProperty().SetOpacity(1)
                    cluster_actor.VisibilityOff()

                    scene.add(cluster_actor)

                    # Every centroid actor (cea) is paired to a cluster actor
                    # (cla).

                    self.cea[centroid_actor] = {
                        'cluster_actor': cluster_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0, 'expanded': 0}

                    self.cla[cluster_actor] = {
                        'centroid_actor': centroid_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0}
                    apply_shader(self, cluster_actor)
                    apply_shader(self, centroid_actor)

            else:

                streamline_actor = actor.line(streamlines, colors=colors)
                streamline_actor.GetProperty().SetEdgeVisibility(1)
                streamline_actor.GetProperty().SetRenderLinesAsTubes(1)
                streamline_actor.GetProperty().SetLineWidth(6)
                streamline_actor.GetProperty().SetOpacity(1)
                scene.add(streamline_actor)
        return scene
Beispiel #17
0
def main():
    global parser
    global args
    global model
    global bar
    global lut_cmap
    global list_x_file
    global max_weight
    global saturation
    global renderer
    global norm_fib
    global norm1
    global norm2
    global norm3
    global big_stream_actor
    global good_stream_actor
    global weak_stream_actor
    global big_Weight
    global good_Weight
    global weak_Weight
    global smallBundle_safe
    global smallWeight_safe
    global show_m
    global big_bundle
    global good_bundle
    global weak_bundle
    global nF
    global nIC
    global Ra
    global change_colormap_slider
    global remove_small_weights_slider
    global opacity_slider
    global remove_big_weights_slider
    global change_iteration_slider
    global num_computed_streamlines
    global numbers_of_streamlines_in_interval
    
    #defining the model used (Stick or cylinder)
    model = None
    if(os.path.isdir(args.commitOutputPath+"/Results_StickZeppelinBall") and os.path.isdir(args.commitOutputPath+"/Results_CylinderZeppelinBall")):
        model_index = input("Which model do you want to load (1 for 'Cylinder', 2 for 'Stick') : ")
        if(model_index==1): model = "Cylinder"
        else: model ="Stick"
    elif(os.path.isdir(args.commitOutputPath+"/Results_StickZeppelinBall")):
        model = "Stick"
    elif(os.path.isdir(args.commitOutputPath+"/Results_CylinderZeppelinBall")):
        model = "Cylinder"
    else:
        print("No valide model in this path")
        sys.exit(0)


    #formalizing the filenames of the iterations
    list_x_file = [file for file in os.listdir(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/") if (file.endswith('.npy') and (file[:-4]).isdigit() )]
    normalize_file_name(list_x_file)
    list_x_file.sort()
    num_iteration=len(list_x_file)

    #number of streamlines we want to load
    num_computed_streamlines = int(args.streamlinesNumber)
    #computing interval of weights
    max_weight = 0;
    if(model == "Cylinder"):
        file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' )
        object_file = pickle.load( file )

        Ra = np.linspace( 0.75,3.5,12 ) * 1E-6

        nIC = len(Ra)    # IC  atoms
        nEC = 4          # EC  atoms
        nISO = 1         # ISO atoms

        nF = object_file[0]['optimization']['regularisation']['sizeIC']
        nE = object_file[0]['optimization']['regularisation']['sizeEC']
        nV = object_file[0]['optimization']['regularisation']['sizeISO']


        num_ADI = np.zeros( nF )
        den_ADI = np.zeros( nF )

        dim = nib.load(args.commitOutputPath+"/Results_"+model+"ZeppelinBall/compartment_IC.nii.gz").get_data().shape
        norm_fib = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm_fib.npy")
        norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy")
        norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy")
        norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy")
        for itNbr in list_x_file:
            #computing diameter
            x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+ itNbr +'.npy')
            x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) )

            for i in range(nIC):
                den_ADI = den_ADI + x_norm[i*nF:(i+1)*nF]
                num_ADI = num_ADI + x_norm[i*nF:(i+1)*nF] * Ra[i]

            Weight = 2 * ( num_ADI / ( den_ADI + np.spacing(1) ) ) * 1E6
            smallWeight_safe = Weight[:num_computed_streamlines]
            itNbr_max = np.amax(smallWeight_safe)
            if(itNbr_max>max_weight):
                max_weight=itNbr_max
    else:#model==Stick
        file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' )
        object_file = pickle.load( file )
        norm_fib = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm_fib.npy")
        norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy")
        norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy")
        norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy")
        nF = object_file[0]['optimization']['regularisation']['sizeIC']
        for itNbr in list_x_file:
            x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+ itNbr +'.npy')
            x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) )

            Weight = x_norm[:nF]  #signal fractions
            smallWeight_safe = Weight[:num_computed_streamlines]
            itNbr_max = np.amax(smallWeight_safe)
            if(itNbr_max>max_weight):
                max_weight=itNbr_max
    #we need an interval slightly bigger than the max_weight
    max_weight = max_weight + 0.00001

    #computing initial weights
    if(model == "Cylinder"):#model==Cylinder
        file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' )
        object_file = pickle.load( file )

        Ra = np.linspace( 0.75,3.5,12 ) * 1E-6

        nIC = len(Ra)    # IC  atoms
        nEC = 4          # EC  atoms
        nISO = 1         # ISO atoms

        nF = object_file[0]['optimization']['regularisation']['sizeIC']
        nE = object_file[0]['optimization']['regularisation']['sizeEC']
        nV = object_file[0]['optimization']['regularisation']['sizeISO']

        dim = nib.load(args.commitOutputPath+"/Results_"+model+"ZeppelinBall/compartment_IC.nii.gz").get_data().shape


        norm_fib = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm_fib.npy")
        #add the normalisation
        x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+list_x_file[0]+'.npy')
        norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy")
        norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy")
        norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy")
        x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) )

        num_ADI = np.zeros( nF )
        den_ADI = np.zeros( nF )

        for i in range(nIC):
            den_ADI = den_ADI + x_norm[i*nF:(i+1)*nF]
            num_ADI = num_ADI + x_norm[i*nF:(i+1)*nF] * Ra[i]

        Weight = 2 * ( num_ADI / ( den_ADI + np.spacing(1) ) ) * 1E6
        smallWeight_safe = Weight[:num_computed_streamlines]
        weak_Weight = smallWeight_safe[:1]
        big_Weight = smallWeight_safe[:1]
        good_Weight = copy.copy(smallWeight_safe)
    else:#model==Stick
        file = open( args.commitOutputPath+"/Results_"+model+"ZeppelinBall/results.pickle",'rb' )
        object_file = pickle.load( file )
        nF = object_file[0]['optimization']['regularisation']['sizeIC']
        x = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/"+list_x_file[0]+'.npy')
        norm1 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm1.npy")
        norm2 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm2.npy")
        norm3 = np.load(args.commitOutputPath+"/Coeff_x_"+model+"ZeppelinBall/norm3.npy")
        x_norm = x / np.hstack( (norm1*norm_fib,norm2,norm3) )

        Weight = x_norm[:nF]  #signal fractions
        smallWeight_safe = Weight[:num_computed_streamlines]
        weak_Weight = smallWeight_safe[:1]
        big_Weight = smallWeight_safe[:1]
        good_Weight = copy.copy(smallWeight_safe)

    #load streamlines from the dictionary_TRK_fibers_trk file
    streams, hdr = nib.trackvis.read(args.commitOutputPath+"/dictionary_TRK_fibers.trk")
    streamlines = [s[0] for s in streams]
    smallBundle_safe = streamlines[:num_computed_streamlines]
    weak_bundle = smallBundle_safe[:1]
    big_bundle = smallBundle_safe[:1]
    good_bundle = copy.copy(smallBundle_safe)
    #number of good streamlines
    num_streamlines = len(smallBundle_safe)


    # mapping streamlines and initial weights(with a red bar) in a renderer
    hue = [0, 0]  # red only
    saturation = [0.0, 1.0]  # black to white

    lut_cmap = actor.colormap_lookup_table(
        scale_range=(0, max_weight),
        hue_range=hue,
        saturation_range=saturation)

    weak_stream_actor = actor.line(weak_bundle, weak_Weight,
                                   lookup_colormap=lut_cmap)
    big_stream_actor = actor.line(big_bundle, big_Weight,
                                lookup_colormap=lut_cmap)
    good_stream_actor = actor.line(good_bundle, good_Weight,
                               lookup_colormap=lut_cmap)

    bar = actor.scalar_bar(lut_cmap, title = 'weight')
    bar.SetHeight(0.5)
    bar.SetWidth(0.1)
    bar.SetPosition(0.85,0.45)

    renderer = window.Renderer()

    renderer.set_camera(position=(-176.42, 118.52, 128.20),
                        focal_point=(113.30, 100, 76.56),
                        view_up=(0.18, 0.00, 0.98))

    renderer.add(big_stream_actor)
    renderer.add(good_stream_actor)
    renderer.add(weak_stream_actor)
    renderer.add(bar)

    #adding sliders and renderer to a ShowManager
    show_m = window.ShowManager(renderer, size=(1200, 900))
    show_m.initialize()

    save_one_image_bouton = ui.LineSlider2D(min_value=0,
                                    max_value=1,
                                    initial_value=0,
                                    text_template="save",
                                    length=1)

    add_graph_bouton = ui.LineSlider2D(min_value=0,
                                    max_value=1,
                                    initial_value=0,
                                    text_template="graph",
                                    length=1)

    color_slider = ui.LineSlider2D(min_value=0.0,
                                     max_value=1.0,
                                     initial_value=0,
                                     text_template="{value:.1f}",
                                     length=140)

    change_colormap_slider = ui.LineSlider2D(min_value=0,
                                    max_value=1.0,
                                    initial_value=0,
                                    text_template="",
                                    length=40)

    change_iteration_slider = ui.LineSlider2D(min_value=0,
                    #we can't have max_value=num_iteration because
                    #list_x_file[num_iteration] lead to an error
                                    max_value=num_iteration-0.01,
                                    initial_value=0,
                                    text_template=list_x_file[0],
                                    length=140)

    remove_big_weights_slider = ui.LineSlider2D(min_value=0,
                                    max_value=max_weight,
                                    initial_value=max_weight,
                                    text_template="{value:.2f}",
                                    length=140)

    remove_small_weights_slider = ui.LineSlider2D(min_value=0,
                                    max_value=max_weight,
                                    initial_value=0,
                                    text_template="{value:.2f}",
                                    length=140)

    opacity_slider = ui.LineSlider2D(min_value=0.0,
                                     max_value=1.0,
                                     initial_value=0.5,
                                     text_template="{ratio:.0%}",
                                     length=140)

    save_one_image_bouton.add_callback(save_one_image_bouton.slider_disk,
                                "LeftButtonPressEvent", save_one_image)

    color_slider.add_callback(color_slider.slider_disk,
                                "MouseMoveEvent", change_streamlines_color)
    color_slider.add_callback(color_slider.slider_line,
                               "LeftButtonPressEvent", change_streamlines_color)
    add_graph_bouton.add_callback(add_graph_bouton.slider_disk,
                                "LeftButtonPressEvent", add_graph)

    change_colormap_slider.add_callback(change_colormap_slider.slider_disk,
                                "MouseMoveEvent", change_colormap)
    change_colormap_slider.add_callback(change_colormap_slider.slider_line,
                                "LeftButtonPressEvent", change_colormap)
    change_iteration_slider.add_callback(change_iteration_slider.slider_disk,
                                "MouseMoveEvent", change_iteration)
    change_iteration_slider.add_callback(change_iteration_slider.slider_line,
                               "LeftButtonPressEvent", change_iteration)

    remove_big_weights_slider.add_callback(remove_big_weights_slider.slider_disk,
                                "MouseMoveEvent", remove_big_weight)
    remove_big_weights_slider.add_callback(remove_big_weights_slider.slider_line,
                               "LeftButtonPressEvent", remove_big_weight)

    remove_small_weights_slider.add_callback(remove_small_weights_slider.slider_disk,
                                "MouseMoveEvent", remove_small_weight)
    remove_small_weights_slider.add_callback(remove_small_weights_slider.slider_line,
                               "LeftButtonPressEvent", remove_small_weight)
    opacity_slider.add_callback(opacity_slider.slider_disk,
                                "MouseMoveEvent", change_opacity)
    opacity_slider.add_callback(opacity_slider.slider_line,
                               "LeftButtonPressEvent", change_opacity)

    color_slider_label = ui.TextBlock2D()
    color_slider_label.message = 'color of streamlines'

    change_colormap_slider_label_weight = ui.TextBlock2D()
    change_colormap_slider_label_weight.message = 'weight color'
    change_colormap_slider_label_direction = ui.TextBlock2D()
    change_colormap_slider_label_direction.message = 'direction color'

    change_iteration_slider_label = ui.TextBlock2D()
    change_iteration_slider_label.message = 'number of the iteration'

    remove_big_weights_slider_label = ui.TextBlock2D()
    remove_big_weights_slider_label.message = 'big weights subdued'

    remove_small_weights_slider_label = ui.TextBlock2D()
    remove_small_weights_slider_label.message = 'small weights subdued'

    opacity_slider_label = ui.TextBlock2D()
    opacity_slider_label.message = 'Unwanted weights opacity'

    numbers_of_streamlines_in_interval = ui.TextBlock2D()
    numbers_of_streamlines_in_interval.message = "Number of streamlines in interval: "+str(num_streamlines)


    panel = ui.Panel2D(center=(300, 160),
                       size=(500, 280),
                       color=(1, 1, 1),
                       opacity=0.1,
                       align="right")

    panel.add_element(save_one_image_bouton, 'relative', (0.9, 0.9))
    panel.add_element(add_graph_bouton, 'relative', (0.9, 0.77))
    panel.add_element(color_slider_label, 'relative', (0.05, 0.85))
    panel.add_element(color_slider, 'relative', (0.7, 0.9))
    panel.add_element(numbers_of_streamlines_in_interval, 'relative', (0.05, 0.72))
    panel.add_element(change_colormap_slider_label_weight, 'relative', (0.05, 0.59))
    panel.add_element(change_colormap_slider_label_direction, 'relative', (0.5, 0.59))
    panel.add_element(change_colormap_slider, 'relative', (0.4, 0.64))
    panel.add_element(change_iteration_slider_label, 'relative', (0.05, 0.46))
    panel.add_element(change_iteration_slider, 'relative', (0.7, 0.51))
    panel.add_element(remove_big_weights_slider_label, 'relative', (0.05, 0.33))
    panel.add_element(remove_big_weights_slider, 'relative', (0.7, 0.37))
    panel.add_element(remove_small_weights_slider_label, 'relative', (0.05, 0.2))
    panel.add_element(remove_small_weights_slider, 'relative', (0.7, 0.24))
    panel.add_element(opacity_slider_label, 'relative', (0.05, 0.07))
    panel.add_element(opacity_slider, 'relative', (0.7, 0.11))

    panel.add_to_renderer(renderer)
    renderer.reset_clipping_range()

    show_m.render()
    show_m.start()
Beispiel #18
0
# Visualize the streamlines, colored by cci
scene = window.Scene()

hue = [0.5, 1]
saturation = [0.0, 1.0]

lut_cmap = actor.colormap_lookup_table(scale_range=(cci.min(), cci.max() / 4),
                                       hue_range=hue,
                                       saturation_range=saturation)

bar3 = actor.scalar_bar(lut_cmap)
scene.add(bar3)

stream_actor = actor.line(long_streamlines,
                          cci,
                          linewidth=0.1,
                          lookup_colormap=lut_cmap)
scene.add(stream_actor)
"""
If you set interactive to True (below), the scene will pop up in an
interactive window.
"""

interactive = False
if interactive:
    window.show(scene)
window.record(scene,
              n_frames=1,
              out_path='cci_streamlines.png',
              size=(800, 800))
"""
Beispiel #19
0
def visualize_bundles(trk,
                      affine_or_mapping=None,
                      bundle=None,
                      ren=None,
                      color=None,
                      inline=True,
                      interact=False):
    """
    Visualize bundles in 3D using VTK
    """
    if isinstance(trk, str):
        trk = nib.streamlines.load(trk)
        tg = trk.tractogram
    else:
        # Assume these are streamlines (as list or Streamlines object):
        tg = nib.streamlines.Tractogram(trk)

    if affine_or_mapping is not None:
        tg = tg.apply_affine(np.linalg.inv(affine_or_mapping))

    streamlines = tg.streamlines

    if ren is None:
        ren = window.Renderer()

    # There are no bundles in here:
    if list(tg.data_per_streamline.keys()) == []:
        streamlines = list(streamlines)
        sl_actor = actor.line(streamlines, line_colors(streamlines))
        ren.add(sl_actor)
        sl_actor.GetProperty().SetRenderLinesAsTubes(1)
        sl_actor.GetProperty().SetLineWidth(6)
    if bundle is None:
        for b in np.unique(tg.data_per_streamline['bundle']):
            idx = np.where(tg.data_per_streamline['bundle'] == b)[0]
            this_sl = list(streamlines[idx])
            if color is not None:
                sl_actor = actor.line(this_sl, color)
                sl_actor.GetProperty().SetRenderLinesAsTubes(1)
                sl_actor.GetProperty().SetLineWidth(6)
            else:
                sl_actor = actor.line(this_sl,
                                      Tableau_20.colors[np.mod(20, int(b))])
                sl_actor.GetProperty().SetRenderLinesAsTubes(1)
                sl_actor.GetProperty().SetLineWidth(6)

            ren.add(sl_actor)
    else:
        idx = np.where(tg.data_per_streamline['bundle'] == bundle)[0]
        this_sl = list(streamlines[idx])
        if color is not None:
            sl_actor = actor.line(this_sl, color)
            sl_actor.GetProperty().SetRenderLinesAsTubes(1)
            sl_actor.GetProperty().SetLineWidth(6)

        else:
            sl_actor = actor.line(this_sl,
                                  Tableau_20.colors[np.mod(20, int(bundle))])
            sl_actor.GetProperty().SetRenderLinesAsTubes(1)
            sl_actor.GetProperty().SetLineWidth(6)
        ren.add(sl_actor)

    return _inline_interact(ren, inline, interact)
Beispiel #20
0
"""
The total number of streamlines is shown below.
"""

print(len(streamlines))

"""
To increase the number of streamlines you can change the parameter
``seeds_count`` in ``random_seeds_from_mask``.

We can visualize the streamlines using ``actor.line`` or ``actor.streamtube``.
"""

ren.clear()
ren.add(actor.line(streamlines))

if interactive:
    window.show(ren, size=(900, 900))
else:
    print('Saving illustration as det_streamlines.png')
    window.record(ren, out_path='det_streamlines.png', size=(900, 900))

"""
.. figure:: det_streamlines.png
 :align: center

 **Deterministic streamlines using EuDX (new framework)**

To learn more about this process you could start playing with the number of
seed points or, even better, specify seeds to be in specific regions of interest
                                  'train_list.npy'))
test_list = np.load(os.path.join('..', 'short_data_4_model', 'test_list.npy'))
valid_list = np.load(os.path.join('..', 'short_data_4_model',
                                  'valid_list.npy'))

folder = os.path.join('subsample-data', str(sub_len))
npz2data(train_list, 'train')
print(1)
npz2data(valid_list, 'valid')
print(2)
npz2data(test_list, 'test')
print(3)

#%%
from dipy.viz import colormap
from dipy.viz import actor, window

color = colormap.line_colors(subsamp_sls)

streamlines_actor = actor.line(subsamp_sls, colormap.line_colors(subsamp_sls))

# Create the 3D display.
scene = window.Scene()
scene.add(streamlines_actor)

window.show(scene)

#%%

#%%
Beispiel #22
0
def show_lines(streamlines, affine=None):
    renderer = window.Renderer()
    lines = actor.line(streamlines, affine)
    renderer.add(lines)
    window.show(renderer)
Beispiel #23
0
fbc_sl_thres, clrs_thres, rfbc_thres = \
  fbc.get_points_rfbc_thresholded(0.125, emphasis=0.01)
"""
The results of FBC measures are visualized, showing the original fibers
colored by LFBC, and the fibers after the cleaning procedure via RFBC
thresholding.
"""

# Visualize the results
from dipy.viz import fvtk, actor

# Create renderer
ren = fvtk.ren()

# Original lines colored by LFBC
lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2)
fvtk.add(ren, lineactor)

# Horizontal (axial) slice of T1 data
vol_actor1 = fvtk.slicer(t1_data, affine=affine)
vol_actor1.display(None, None, 20)
fvtk.add(ren, vol_actor1)

# Vertical (sagittal) slice of T1 data
vol_actor2 = fvtk.slicer(t1_data, affine=affine)
vol_actor2.display(35, None, None)
fvtk.add(ren, vol_actor2)

# Show original fibers
fvtk.camera(ren,
            pos=(-264, 285, 155),
Beispiel #24
0
atlas_header = create_tractogram_header(atlas_file,
                                        *sft_atlas.space_attributes)

sft_target = load_trk(target_file, "same", bbox_valid_check=False)
target = sft_target.streamlines
target_header = create_tractogram_header(target_file,
                                         *sft_target.space_attributes)
"""
let's visualize atlas tractogram and target tractogram before registration
"""

interactive = False

scene = window.Scene()
scene.SetBackground(1, 1, 1)
scene.add(actor.line(atlas, colors=(1, 0, 1)))
scene.add(actor.line(target, colors=(1, 1, 0)))
window.record(scene, out_path='tractograms_initial.png', size=(600, 600))
if interactive:
    window.show(scene)
"""
.. figure:: tractograms_initial.png
   :align: center

   Atlas and target before registration.

"""
"""
We will register target tractogram to model atlas' space using streamlinear
registeration (SLR) [Garyfallidis15]_
"""
Beispiel #25
0
def fiber_simple_3d_show(img, streamlines, world_coords=True, slicer_opacity=0.6):
    if not world_coords:
        from dipy.tracking.streamline import transform_streamlines
        streamlines = transform_streamlines(streamlines, np.linalg.inv(img.affine))

    # Renderer
    ren = window.Renderer()
    stream_actor = actor.line(streamlines)

    if not world_coords:
        image_actor = actor.slicer(img.get_data(), affine=np.eye(4))
    else:
        image_actor = actor.slicer(img.get_data(), img.affine)

    # opacity
    image_actor.opacity(slicer_opacity)

    # add some slice
    image_actor2 = image_actor.copy()
    image_actor2.opacity(slicer_opacity)
    # image_actor2.display()
    image_actor2.display(None, image_actor2.shape[1] / 2, None)
    image_actor3 = image_actor.copy()
    image_actor3.opacity(slicer_opacity)
    # image_actor3.display()
    image_actor3.display(image_actor3.shape[0] / 2, None, None)

    # connect the actors with the Render
    ren.add(stream_actor)
    ren.add(image_actor)
    ren.add(image_actor2)
    ren.add(image_actor3)

    # initial showmanager
    show_m = window.ShowManager(ren, size=(1200, 900))
    show_m.initialize()

    # change the position of the image_actor using a slider
    def change_slice(obj, event):
        z = int(np.round(obj.get_value()))
        image_actor.display_extent(0, img.shape[0] - 1, 0, img.shape[1] - 1, z, z)

    slicer = widget.slider(show_m.iren, show_m.ren, callback=change_slice, min_value=0, max_value=img.shape[2] - 1,
                           value=img.shape[2] / 2, label="Move slice",
                           right_normalized_pos=(.98, 0.6), size=(120, 0), label_format="%0.1f", color=(1., 1., 1.),
                           selected_color=(0.86, 0.33, 1.))

    # change the position of the image_actor using a slider
    def change_slice2(obj, event):
        y = int(np.round(obj.get_value()))
        image_actor2.display_extent(0, img.shape[0] - 1, y, y, 0, img.shape[2] - 1)

    slicer2 = widget.slider(show_m.iren, show_m.ren, callback=change_slice2, min_value=0, max_value=img.shape[1] - 1,
                            value=img.shape[1] / 2, label="Coronal slice",
                            right_normalized_pos=(.98, 0.3), size=(120, 0), label_format="%0.1f", color=(1., 1., 1.),
                            selected_color=(0.86, 0.33, 1.))

    # change the position of the image_actor using a slider
    def change_slice3(obj, event):
        x = int(np.round(obj.get_value()))
        image_actor3.display_extent(x, x, 0, img.shape[1] - 1, 0, img.shape[2] - 1)

    slicer3 = widget.slider(show_m.iren, show_m.ren, callback=change_slice3, min_value=0, max_value=img.shape[0] - 1,
                            value=img.shape[0] / 2, label="Sagittal slice",
                            right_normalized_pos=(.98, 0.9), size=(120, 0), label_format="%0.1f", color=(1., 1., 1.),
                            selected_color=(0.86, 0.33, 1.))

    # change window size, the slider will change
    global size
    size = ren.GetSize()

    def win_callback(obj, event):
        global size
        if size != obj.GetSize():
            slicer.place(ren)
            slicer2.place(ren)
            slicer3.place(ren)
            size = obj.GetSize()

    show_m.initialize()

    # interact with the available 3D and 2D objects
    show_m.add_window_callback(win_callback)
    show_m.render()
    show_m.start()

    ren.zoom(1.5)
    ren.reset_clipping_range()

    window.record(ren, out_path='/home/brain/workingdir/pyfat/pyfat/example/test_results/cc_clusters_test.png', size=(1200, 900), reset_camera=False)
    del show_m
cci = cluster_confidence(long_streamlines)

# Visualize the streamlines, colored by cci
ren = window.Renderer()

hue = [0.5, 1]
saturation = [0.0, 1.0]

lut_cmap = actor.colormap_lookup_table(scale_range=(cci.min(), cci.max()/4),
                                       hue_range=hue,
                                       saturation_range=saturation)

bar3 = actor.scalar_bar(lut_cmap)
ren.add(bar3)

stream_actor = actor.line(long_streamlines, cci, linewidth=0.1,
                          lookup_colormap=lut_cmap)
ren.add(stream_actor)


"""
If you set interactive to True (below), the rendering will pop up in an
interactive window.
"""


interactive = False
if interactive:
    window.show(ren)
window.record(ren, n_frames=1, out_path='cci_streamlines.png',
              size=(800, 800))
Beispiel #27
0
streamlines = Streamlines(streamline_generator)
"""
The total number of streamlines is shown below.
"""

print(len(streamlines))
"""
To increase the number of streamlines you can change the parameter
``seeds_count`` in ``random_seeds_from_mask``.

We can visualize the streamlines using ``actor.line`` or ``actor.streamtube``.
"""

ren.clear()
ren.add(actor.line(streamlines))

if interactive:
    window.show(ren, size=(900, 900))
else:
    print('Saving illustration as det_streamlines.png')
    window.record(
        ren,
        out_path=
        '/Users/ptm/desktop/Current_working_directory/DIPY/det_streamlines.png',
        size=(900, 900))

##
##
##
import cv2 as cv2
Beispiel #28
0
                                      affine=affine,
                                      step_size=.5)
# Generate streamlines object
streamlines = Streamlines(streamlines_generator)
"""
We will then display the resulting streamlines using the ``fury``
python package.
"""

from dipy.viz import colormap

if has_fury:
    # Prepare the display objects.
    color = colormap.line_colors(streamlines)

    streamlines_actor = actor.line(streamlines,
                                   colormap.line_colors(streamlines))

    # Create the 3D display.
    scene = window.Scene()
    scene.add(streamlines_actor)

    # Save still images for this static example. Or for interactivity use
    window.record(scene, out_path='tractogram_EuDX.png', size=(800, 800))
    if interactive:
        window.show(scene)
"""
.. figure:: tractogram_EuDX.png
   :align: center

   **Corpus Callosum using EuDx**
Beispiel #29
0
all_streamlines_threshold_classifier = LocalTracking(dg,
                                                     threshold_classifier,
                                                     seeds,
                                                     affine,
                                                     step_size=.5,
                                                     return_all=True)

save_trk("deterministic_threshold_classifier_all.trk",
         all_streamlines_threshold_classifier, affine, labels.shape)

streamlines = Streamlines(all_streamlines_threshold_classifier)

if have_fury:
    window.clear(ren)
    ren.add(actor.line(streamlines, cmap.line_colors(streamlines)))
    window.record(ren,
                  out_path='all_streamlines_threshold_classifier.png',
                  size=(600, 600))
    if interactive:
        window.show(ren)
"""
.. figure:: all_streamlines_threshold_classifier.png
 :align: center

 **Deterministic tractography using a thresholded fractional anisotropy.**
"""
"""
Binary Tissue Classifier
------------------------
A binary mask can be used to define where the tracking stops. The binary
Beispiel #30
0
cc_streamlines = utils.target(streamlines, affine, cc_slice)
cc_streamlines = Streamlines(cc_streamlines)

other_streamlines = utils.target(streamlines, affine, cc_slice,
                                 include=False)
other_streamlines = Streamlines(other_streamlines)
assert len(other_streamlines) + len(cc_streamlines) == len(streamlines)

from dipy.viz import window, actor, colormap as cmap

# Enables/disables interactive visualization
interactive = False

# Make display objects
color = cmap.line_colors(cc_streamlines)
cc_streamlines_actor = actor.line(cc_streamlines,
                                  cmap.line_colors(cc_streamlines))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
                                      opacity=0.5)

vol_actor = actor.slicer(t1_data)

vol_actor.display(x=40)
vol_actor2 = vol_actor.copy()
vol_actor2.display(z=35)

# Add display objects to canvas
r = window.Renderer()
r.add(vol_actor)
r.add(vol_actor2)
r.add(cc_streamlines_actor)
r.add(cc_ROI_actor)
Beispiel #31
0
atlas_file, atlas_folder = fetch_bundle_atlas_hcp842()

atlas_file, all_bundles_files = get_bundle_atlas_hcp842()
target_file = get_target_tractogram_hcp()

atlas, atlas_header = load_trk(atlas_file)
target, target_header = load_trk(target_file)
"""
let's visualize atlas tractogram and target tractogram before registration
"""

interactive = False

ren = window.Renderer()
ren.SetBackground(1, 1, 1)
ren.add(actor.line(atlas, colors=(1, 0, 1)))
ren.add(actor.line(target, colors=(1, 1, 0)))
window.record(ren, out_path='tractograms_initial.png', size=(600, 600))
if interactive:
    window.show(ren)
"""
.. figure:: tractograms_initial.png
   :align: center

   Atlas and target before registration.

"""
"""
We will register target tractogram to model atlas' space using streamlinear
registeration (SLR) [Garyfallidis15]_
"""
Beispiel #32
0
def dwi_probabilistic_tracing(image,
                              bvecs,
                              bvals,
                              wm,
                              seeds,
                              fibers,
                              rseed=42,
                              prune_length=3,
                              plot=False,
                              verbose=False):
    # Pipeline transcribed from:
    #   https://dipy.org/documentation/1.1.1./examples_built/tracking_probabilistic/
    # Load Images
    dwi_loaded = nib.load(image)
    dwi_data = dwi_loaded.get_fdata()

    wm_loaded = nib.load(wm)
    wm_data = wm_loaded.get_fdata()

    seeds_loaded = nib.load(seeds)
    seeds_data = seeds_loaded.get_fdata()
    seeds = utils.seeds_from_mask(seeds_data, dwi_loaded.affine, density=1)

    # Load B-values & B-vectors
    # NB. Use aligned b-vecs if providing eddy-aligned data
    bvals, bvecs = read_bvals_bvecs(bvals, bvecs)
    gtab = gradient_table(bvals, bvecs)

    # Establish ODF model
    response, ratio = auto_response(gtab, dwi_data, roi_radius=10, fa_thr=0.7)
    csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)
    csd_fit = csd_model.fit(dwi_data, mask=wm_data)

    # Set stopping criterion
    csa_model = CsaOdfModel(gtab, sh_order=6)
    gfa = csa_model.fit(dwi_data, mask=wm_data).gfa
    stopping_criterion = ThresholdStoppingCriterion(gfa, .25)

    # Create Probabilisitic direction getter
    fod = csd_fit.odf(default_sphere)
    pmf = fod.clip(min=0)
    prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf,
                                                    max_angle=30.,
                                                    sphere=default_sphere)

    # Generate streamlines
    streamline_generator = LocalTracking(prob_dg,
                                         stopping_criterion,
                                         seeds,
                                         dwi_loaded.affine,
                                         0.5,
                                         random_seed=rseed)
    streamlines = Streamlines(streamline_generator)

    # Prune streamlines
    streamlines = ArraySequence(
        [strline for strline in streamlines if len(strline) > prune_length])
    sft = StatefulTractogram(streamlines, dwi_loaded, Space.RASMM)

    # Save streamlines
    save_trk(sft, fibers + ".trk")

    # Visualize fibers
    if plot and has_fury:
        from dipy.viz import window, actor, colormap as cmap

        # Create the 3D display.
        r = window.Renderer()
        r.add(actor.line(streamlines, cmap.line_colors(streamlines)))
        window.record(r, out_path=fibers + '.png', size=(800, 800))
colored by LFBC (see :ref:`optic_radiation_before_cleaning`), and the fibers
after the cleaning procedure via RFBC thresholding (see
:ref:`optic_radiation_after_cleaning`).
"""

# Visualize the results
from dipy.viz import window, actor

# Enables/disables interactive visualization
interactive = False

# Create renderer
ren = window.Renderer()

# Original lines colored by LFBC
lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2)
ren.add(lineactor)

# Horizontal (axial) slice of T1 data
vol_actor1 = actor.slicer(t1_data, affine=affine)
vol_actor1.display(z=20)
ren.add(vol_actor1)

# Vertical (sagittal) slice of T1 data
vol_actor2 = actor.slicer(t1_data, affine=affine)
vol_actor2.display(x=35)
ren.add(vol_actor2)

# Show original fibers
ren.set_camera(position=(-264, 285, 155), focal_point=(0, -14, 9), view_up=(0, 0, 1))
window.record(ren, n_frames=1, out_path='OR_before.png', size=(900, 900))
Beispiel #34
0
    def add_cluster_actors(self,
                           scene,
                           tractograms,
                           threshold,
                           enable_callbacks=True):
        """ Add streamline actors to the scene

        Parameters
        ----------
        scene : Scene
        tractograms : list
            list of tractograms
        threshold : float
            Cluster threshold
        enable_callbacks : bool
            Enable callbacks for selecting clusters
        """
        color_ind = 0
        for (t, sft) in enumerate(tractograms):
            streamlines = sft.streamlines

            if 'tracts' in self.random_colors:
                colors = next(self.color_gen)
            else:
                colors = None

            if not self.world_coords:
                # TODO we need to read the affine of a tractogram
                # from a StatefullTractogram
                msg = 'Currently native coordinates are not supported'
                msg += ' for streamlines'
                raise ValueError(msg)

            if self.cluster:

                print(' Clustering threshold {} \n'.format(threshold))
                clusters = qbx_and_merge(streamlines,
                                         [40, 30, 25, 20, threshold])
                self.tractogram_clusters[t] = clusters
                centroids = clusters.centroids
                print(' Number of centroids is {}'.format(len(centroids)))
                sizes = np.array([len(c) for c in clusters])
                linewidths = np.interp(sizes,
                                       [sizes.min(), sizes.max()], [0.1, 2.])
                centroid_lengths = np.array([length(c) for c in centroids])

                print(' Minimum number of streamlines in cluster {}'.format(
                    sizes.min()))

                print(' Maximum number of streamlines in cluster {}'.format(
                    sizes.max()))

                print(' Construct cluster actors')
                for (i, c) in enumerate(centroids):

                    centroid_actor = actor.streamtube([c],
                                                      colors,
                                                      linewidth=linewidths[i],
                                                      lod=False)
                    scene.add(centroid_actor)
                    self.mem.centroid_actors.append(centroid_actor)

                    cluster_actor = actor.line(clusters[i], lod=False)
                    cluster_actor.GetProperty().SetRenderLinesAsTubes(1)
                    cluster_actor.GetProperty().SetLineWidth(6)
                    cluster_actor.GetProperty().SetOpacity(1)
                    cluster_actor.VisibilityOff()

                    scene.add(cluster_actor)
                    self.mem.cluster_actors.append(cluster_actor)

                    # Every centroid actor (cea) is paired to a cluster actor
                    # (cla).

                    self.cea[centroid_actor] = {
                        'cluster_actor': cluster_actor,
                        'cluster': i,
                        'tractogram': t,
                        'size': sizes[i],
                        'length': centroid_lengths[i],
                        'selected': 0,
                        'expanded': 0
                    }

                    self.cla[cluster_actor] = {
                        'centroid_actor': centroid_actor,
                        'cluster': i,
                        'tractogram': t,
                        'size': sizes[i],
                        'length': centroid_lengths[i],
                        'selected': 0,
                        'highlighted': 0
                    }
                    apply_shader(self, cluster_actor)
                    apply_shader(self, centroid_actor)

            else:

                s_colors = self.buan_colors[color_ind] if self.buan else colors
                streamline_actor = actor.line(streamlines, colors=s_colors)

                streamline_actor.GetProperty().SetEdgeVisibility(1)
                streamline_actor.GetProperty().SetRenderLinesAsTubes(1)
                streamline_actor.GetProperty().SetLineWidth(6)
                streamline_actor.GetProperty().SetOpacity(1)
                scene.add(streamline_actor)
                self.mem.streamline_actors.append(streamline_actor)

            color_ind += 1

        if not enable_callbacks:
            return

        def left_click_centroid_callback(obj, event):
            self.cea[obj]['selected'] = not self.cea[obj]['selected']
            self.cla[self.cea[obj]['cluster_actor']]['selected'] = \
                self.cea[obj]['selected']
            self.show_m.render()

        def left_click_cluster_callback(obj, event):
            if self.cla[obj]['selected']:
                self.cla[obj]['centroid_actor'].VisibilityOn()
                ca = self.cla[obj]['centroid_actor']
                self.cea[ca]['selected'] = 0
                obj.VisibilityOff()
                self.cea[ca]['expanded'] = 0

            self.show_m.render()

        for cl in self.cla:
            cl.AddObserver('LeftButtonPressEvent', left_click_cluster_callback,
                           1.0)
            self.cla[cl]['centroid_actor'].AddObserver(
                'LeftButtonPressEvent', left_click_centroid_callback, 1.0)
Beispiel #35
0
def horizon(tractograms, data, affine, cluster=False, cluster_thr=15.,
            random_colors=False,
            length_lt=0, length_gt=np.inf, clusters_lt=0, clusters_gt=np.inf):

    rng = np.random.RandomState(42)
    slicer_opacity = .8

    ren = window.Renderer()
    global centroid_actors
    centroid_actors = []
    for streamlines in tractograms:

        print(' Number of streamlines loaded {} \n'.format(len(streamlines)))

        if not random_colors:
            ren.add(actor.line(streamlines, opacity=1., lod_points=10 ** 5))
        else:
            colors = rng.rand(3)
            ren.add(actor.line(streamlines, colors, opacity=1., lod_points=10 ** 5))

    class SimpleTrackBallNoBB(window.vtk.vtkInteractorStyleTrackballCamera):
        def HighlightProp(self, p):
            pass

    style = SimpleTrackBallNoBB()
    # very hackish way
    style.SetPickColor(0, 0, 0)
    # style.HighlightProp(None)
    show_m = window.ShowManager(ren, size=(1200, 900), interactor_style=style)
    show_m.initialize()

    if data is not None:
        #from dipy.core.geometry import rodrigues_axis_rotation
        #affine[:3, :3] = np.dot(affine[:3, :3], rodrigues_axis_rotation((0, 0, 1), 45))

        image_actor = actor.slicer(data, affine)
        image_actor.opacity(slicer_opacity)
        image_actor.SetInterpolate(False)
        ren.add(image_actor)

        ren.add(fvtk.axes((10, 10, 10)))

        last_value = [10]
        def change_slice(obj, event):
            new_value = int(np.round(obj.get_value()))
            if new_value == image_actor.shape[1] - 1 or new_value == 0:
                new_value = last_value[0] + np.sign(new_value - last_value[0])

            image_actor.display(None, new_value, None)
            obj.set_value(new_value)
            last_value[0] = new_value

        slider = widget.slider(show_m.iren, show_m.ren,
                               callback=change_slice,
                               min_value=0,
                               max_value=image_actor.shape[1] - 1,
                               value=image_actor.shape[1] / 2,
                               label="Move slice",
                               right_normalized_pos=(.98, 0.6),
                               size=(120, 0), label_format="%0.lf",
                               color=(1., 1., 1.),
                               selected_color=(0.86, 0.33, 1.))

        slider.SetAnimationModeToJump()

    global size
    size = ren.GetSize()
    # ren.background((1, 0.5, 0))
    ren.background((0, 0, 0))
    global picked_actors
    picked_actors = {}

    def pick_callback(obj, event):
        global centroid_actors
        global picked_actors

        prop = obj.GetProp3D()

        ac = np.array(centroid_actors)
        index = np.where(ac == prop)[0]

        if len(index) > 0:
            try:
                bundle = picked_actors[prop]
                ren.rm(bundle)
                del picked_actors[prop]
            except:
                bundle = actor.line(clusters[visible_cluster_id[index]],
                                    lod=False)
                picked_actors[prop] = bundle
                ren.add(bundle)

        if prop in picked_actors.values():
            ren.rm(prop)

    def win_callback(obj, event):
        global size
        if size != obj.GetSize():

            if data is not None:
                slider.place(ren)
            size = obj.GetSize()

    global centroid_visibility
    centroid_visibility = True

    def key_press(obj, event):
        global centroid_visibility
        key = obj.GetKeySym()
        if key == 'h' or key == 'H':
            if cluster:
                if centroid_visibility is True:
                    for ca in centroid_actors:
                        ca.VisibilityOff()
                    centroid_visibility = False
                else:
                    for ca in centroid_actors:
                        ca.VisibilityOn()
                    centroid_visibility = True
                show_m.render()

    show_m.initialize()
    show_m.iren.AddObserver('KeyPressEvent', key_press)
    show_m.add_window_callback(win_callback)
    #show_m.add_picker_callback(pick_callback)
    show_m.render()
    show_m.start()
def simple_viewer(streamlines, vol, affine):

    renderer = window.Renderer()
    renderer.add(actor.line(streamlines))
    renderer.add(actor.slicer(vol, affine))
    window.show(renderer)
Beispiel #37
0
If we want to see the objects in native space we need to make sure that all
objects which are currently in world coordinates are transformed back to
native space using the inverse of the affine.
"""

if not world_coords:
    from dipy.tracking.streamline import transform_streamlines
    streamlines = transform_streamlines(streamlines, np.linalg.inv(affine))

"""
Now we create, a ``Renderer`` object and add the streamlines using the ``line``
function and an image plane using the ``slice`` function.
"""

ren = window.Renderer()
stream_actor = actor.line(streamlines)

if not world_coords:
    image_actor = actor.slicer(data, affine=np.eye(4))
else:
    image_actor = actor.slicer(data, affine)

"""
We can also change also the opacity of the slicer
"""

slicer_opacity = .6
image_actor.opacity(slicer_opacity)

"""
Connect the actors with the Renderer.
# Apply a threshold on the RFBC to remove spurious fibers
fbc_sl_thres, clrs_thres, rfbc_thres = fbc.get_points_rfbc_thresholded(
    0.125, emphasis=0.01)

print("The process is already running here.")

# Visualize the results
from dipy.viz import window, actor

# Enables/disables interactive visualization
interactive = True

# Create renderer
ren = window.Renderer()
lineactor = actor.line(fiber_data, linewidth=0.2)
ren.add(lineactor)

# Horizontal (axial) slice of T1 data
vol_actor1 = actor.slicer(img_T1w_data, affine=affine)
vol_actor1.display(z=50)
ren.add(vol_actor1)

# Vertical (sagittal) slice of T1 data
vol_actor2 = actor.slicer(img_T1w_data, affine=affine)
vol_actor2.display(x=70)
ren.add(vol_actor2)

# Show original fibers
if interactive:
    window.show(ren)
Beispiel #39
0
def interactive_viewer(streamlines, outlierness):
    import vtk
    from dipy.viz import fvtk, actor, window, widget
    from dipy.data.fetcher import read_viz_icons

    colormap_name = "jet"
    stream_actor = actor.line(streamlines, colors=fvtk.create_colormap(outlierness, name=colormap_name))
    stream_actor.SetPosition(-np.array(stream_actor.GetCenter()))

    global threshold
    threshold = 0.8

    streamlines_color = np.zeros(len(streamlines), dtype="float32")
    streamlines_color[outlierness < threshold] = 1
    streamlines_color[outlierness >= threshold] = 0

    lut = vtk.vtkLookupTable()
    lut.SetNumberOfTableValues(2)
    lut.Build()
    lut.SetTableValue(0, tuple(fvtk.colors.orange_red) + (1,))
    lut.SetTableValue(1, tuple(fvtk.colors.green) + (1,))
    lut.SetTableRange(0, 1)

    stream_split_actor = actor.line(streamlines, colors=streamlines_color, lookup_colormap=lut)
    stream_split_actor.SetPosition(-np.array(stream_split_actor.GetCenter()))
    hist_actor, hist_fig = create_hist_actor(outlierness, colormap_name=colormap_name)

    # Main renderder
    bg = (0, 0, 0)
    global screen_size
    screen_size = (0, 0)
    ren_main = window.Renderer()
    ren_main.background(bg)
    show_m = window.ShowManager(ren_main, size=(1066, 600), interactor_style="trackball")
    show_m.window.SetNumberOfLayers(2)
    ren_main.SetLayer(1)
    ren_main.InteractiveOff()

    # Outlierness renderer
    ren_outlierness = window.Renderer()
    show_m.window.AddRenderer(ren_outlierness)
    ren_outlierness.background(bg)
    ren_outlierness.SetViewport(0, 0.3, 0.5, 1)
    ren_outlierness.add(stream_actor)
    ren_outlierness.reset_camera_tight()

    ren_split = window.Renderer()
    show_m.window.AddRenderer(ren_split)
    ren_split.background(bg)
    ren_split.SetViewport(0.5, 0.3, 1, 1)
    ren_split.add(stream_split_actor)
    ren_split.SetActiveCamera(ren_outlierness.GetActiveCamera())

    # Histogram renderer
    ren_hist = window.Renderer()
    show_m.window.AddRenderer(ren_hist)
    ren_hist.projection("parallel")
    ren_hist.background(bg)
    ren_hist.SetViewport(0, 0, 1, 0.3)
    ren_hist.add(hist_actor)
    ren_hist.SetInteractive(False)

    def apply_threshold(obj, evt):
        global threshold
        new_threshold = np.round(obj.GetSliderRepresentation().GetValue(), decimals=2)
        obj.GetSliderRepresentation().SetValue(new_threshold)
        if threshold != new_threshold:
            threshold = new_threshold

            streamlines_color = np.zeros(len(streamlines), dtype=np.float32)
            streamlines_color[outlierness < threshold] = 1
            streamlines_color[outlierness >= threshold] = 0

            colors = []
            for color, streamline in zip(streamlines_color, streamlines):
                colors += [color] * len(streamline)

            scalars = stream_split_actor.GetMapper().GetInput().GetPointData().GetScalars()
            for i, c in enumerate(colors):
                scalars.SetValue(i, c)

            scalars.Modified()

    threshold_slider_rep = vtk.vtkSliderRepresentation3D()
    threshold_slider_rep.SetMinimumValue(0.)
    threshold_slider_rep.SetMaximumValue(1.)
    threshold_slider_rep.SetValue(threshold)
    threshold_slider_rep.SetLabelFormat("%0.2lf")
    threshold_slider_rep.SetLabelHeight(0.02)
    threshold_slider_rep.GetPoint1Coordinate().SetCoordinateSystemToWorld()
    x1, x2, y1, y2, z1, z2 = hist_actor.GetBounds()
    threshold_slider_rep.GetPoint1Coordinate().SetValue(x1*1., y1-5, 0)
    threshold_slider_rep.GetPoint2Coordinate().SetCoordinateSystemToWorld()
    threshold_slider_rep.GetPoint2Coordinate().SetValue(x2*1., y1-5, 0)
    threshold_slider_rep.SetEndCapLength(0.)
    threshold_slider_rep.SetEndCapWidth(0.)

    threshold_slider = vtk.vtkSliderWidget()
    threshold_slider.SetInteractor(show_m.iren)
    threshold_slider.SetRepresentation(threshold_slider_rep)
    threshold_slider.SetCurrentRenderer(ren_hist)
    threshold_slider.SetAnimationModeToJump()
    threshold_slider.EnabledOn()

    threshold_slider.AddObserver("InteractionEvent", apply_threshold)

    #ren_main
    def _place_buttons():
        sz = 30.0
        width, _ = ren_main.GetSize()

        # bds = np.zeros(6)
        # bds[0] = width - sz - 5
        # bds[1] = bds[0] + sz
        # bds[2] = 5
        # bds[3] = bds[2] + sz
        # bds[4] = bds[5] = 0.0
        # save_button.GetRepresentation().PlaceWidget(bds)

    def _window_callback(obj, event):
        ren_hist.reset_camera_tight(margin_factor=1.2)
        _place_buttons()

    show_m.add_window_callback(_window_callback)
    show_m.initialize()
    show_m.render()
    show_m.start()

    inliers = [s for s, keep in zip(streamlines, outlierness < threshold) if keep]
    outliers = [s for s, keep in zip(streamlines, outlierness >= threshold) if keep]
    return inliers, outliers
Beispiel #40
0
transform it into native image coordinates so that it is in the same coordinate
space as the ``fa`` image.
"""

bundle_native = transform_streamlines(bundle, np.linalg.inv(affine))

"""
Show every streamline with an orientation color
===============================================

This is the default option when you are using ``line`` or ``streamtube``.
"""

renderer = window.Renderer()

stream_actor = actor.line(bundle_native)

renderer.set_camera(position=(-176.42, 118.52, 128.20), focal_point=(113.30, 128.31, 76.56), view_up=(0.18, 0.00, 0.98))

renderer.add(stream_actor)

# Uncomment the line below to show to display the window
# window.show(renderer, size=(600, 600), reset_camera=False)
window.record(renderer, out_path="bundle1.png", size=(600, 600))

"""
.. figure:: bundle1.png
   :align: center

   **One orientation color for every streamline**.
Beispiel #41
0
# Initialization of LocalTracking. The computation happens in the next step.
streamlines_generator = LocalTracking(csa_peaks,
                                      classifier,
                                      seeds,
                                      affine=np.eye(4),
                                      step_size=.5)

# Generate streamlines object
streamlines = Streamlines(streamlines_generator)

# Prepare the display objects.
color = line_colors(streamlines)

if window.have_vtk:
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)

    # Save still images for this static example. Or for interactivity use
    window.record(r, n_frames=1, out_path='deterministic.png', size=(800, 800))
    if interactive:
        window.show(r)
"""
.. figure:: deterministic.png
   :align: center

   **Corpus Callosum Deterministic**
Beispiel #42
0
"""
We can use some of dipy_'s visualization tools to display the ROI we targeted
above and all the streamlines that pass though that ROI. The ROI is the yellow
region near the center of the axial image.
"""

from dipy.viz import window, actor
from dipy.viz.colormap import line_colors

# Enables/disables interactive visualization
interactive = False

# Make display objects
color = line_colors(cc_streamlines)
cc_streamlines_actor = actor.line(cc_streamlines, line_colors(cc_streamlines))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
                                      opacity=0.5)

vol_actor = actor.slicer(t1_data)

vol_actor.display(x=40)
vol_actor2 = vol_actor.copy()
vol_actor2.display(z=35)

# Add display objects to canvas
r = window.Renderer()
r.add(vol_actor)
r.add(vol_actor2)
r.add(cc_streamlines_actor)
r.add(cc_ROI_actor)
"""
Example #1: Bootstrap direction getter with CSD Model
"""

from dipy.direction import BootDirectionGetter
from dipy.tracking.streamline import Streamlines
from dipy.data import small_sphere

boot_dg_csd = BootDirectionGetter.from_data(data, csd_model, max_angle=30.,
                                            sphere=small_sphere)
boot_streamline_generator = LocalTracking(boot_dg_csd, classifier, seeds,
                                          affine, step_size=.5)
streamlines = Streamlines(boot_streamline_generator)

renderer.clear()
renderer.add(actor.line(streamlines, line_colors(streamlines)))
window.record(renderer, out_path='bootstrap_dg_CSD.png', size=(600, 600))

"""
.. figure:: bootstrap_dg_CSD.png
   :align: center

   **Corpus Callosum Bootstrap Probabilistic Direction Getter**

We have created a bootstrapped probabilistic set of streamlines. If you repeat
the fiber tracking (keeping all inputs the same) you will NOT get exactly the
same set of streamlines. We can save the streamlines as a Trackvis file so it
can be loaded into other software for visualization or further analysis.
"""

save_trk("bootstrap_dg_CSD.trk", streamlines, affine, labels.shape)
Beispiel #44
0
def visualize_bundles(sft, affine=None, n_points=None, bundle_dict=None,
                      bundle=None, colors=None, color_by_volume=None,
                      cbv_lims=[None, None], figure=None, background=(1, 1, 1),
                      interact=False, inline=False):
    """
    Visualize bundles in 3D using VTK

    Parameters
    ----------
    sft : Stateful Tractogram, str
        A Stateful Tractogram containing streamline information
        or a path to a trk file
        In order to visualize individual bundles, the Stateful Tractogram
        must contain a bundle key in it's data_per_streamline which is a list
        of bundle `'uid'`.

    affine : ndarray, optional
       An affine transformation to apply to the streamlines before
       visualization. Default: no transform.

    n_points : int or None
        n_points to resample streamlines to before plotting. If None, no
        resampling is done.

    bundle_dict : dict, optional
        Keys are names of bundles and values are dicts that should include
        a key `'uid'` with values as integers for selection from the sft
        metadata. Default: bundles are either not identified, or identified
        only as unique integers in the metadata.

    bundle : str or int, optional
        The name of a bundle to select from among the keys in `bundle_dict`
        or an integer for selection from the sft metadata.

    colors : dict or list
        If this is a dict, keys are bundle names and values are RGB tuples.
        If this is a list, each item is an RGB tuple. Defaults to a list
        with Tableau 20 RGB values if bundle_dict is None, or dict from
        bundles to Tableau 20 RGB values if bundle_dict is not None.

    color_by_volume : ndarray or str, optional
        3d volume use to shade the bundles. If None, no shading
        is performed. Only works when using the plotly backend.
        Default: None

    cbv_lims : ndarray
        Of the form (lower bound, upper bound). Shading based on
        color_by_volume will only differentiate values within these bounds.
        If lower bound is None, will default to 0.
        If upper bound is None, will default to the maximum value in
        color_by_volume.
        Default: [None, None]

    background : tuple, optional
        RGB values for the background. Default: (1, 1, 1), which is white
        background.

    figure : fury Scene object, optional
        If provided, the visualization will be added to this Scene. Default:
        Initialize a new Scene.

    interact : bool
        Whether to provide an interactive VTK window for interaction.
        Default: False

    inline : bool
        Whether to embed the visualization inline in a notebook. Only works
        in the notebook context. Default: False.

    Returns
    -------
    Fury Scene object
    """

    if figure is None:
        figure = window.Scene()

    figure.SetBackground(background[0], background[1], background[2])

    for (sls, color, name, _) in vut.tract_generator(
            sft, affine, bundle, bundle_dict, colors, n_points):
        sls = list(sls)
        if name == "all_bundles":
            color = line_colors(sls)

        sl_actor = actor.line(sls, color)
        figure.add(sl_actor)
        sl_actor.GetProperty().SetRenderLinesAsTubes(1)
        sl_actor.GetProperty().SetLineWidth(6)

    return _inline_interact(figure, inline, interact)
Beispiel #45
0
def test_contour_from_roi():

    # Render volume
    renderer = window.renderer()
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine,
                                     color=np.array([1, 0, 1]),
                                     opacity=.5)
    renderer.add(surface)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # Test binarization
    renderer2 = window.renderer()
    data2 = np.zeros((50, 50, 50))
    data2[20:30, 25, 25] = 1.
    data2[35:40, 25, 25] = 1.
    affine = np.eye(4)
    surface2 = actor.contour_from_roi(data2, affine,
                                      color=np.array([0, 1, 1]),
                                      opacity=.5)
    renderer2.add(surface2)

    renderer2.reset_camera()
    renderer2.reset_clipping_range()
    # window.show(renderer2)

    arr = window.snapshot(renderer, 'test_surface.png', offscreen=True)
    arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True)

    report = window.analyze_snapshot(arr, find_objects=True)
    report2 = window.analyze_snapshot(arr2, find_objects=True)

    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report2.objects, 2)

    # test on real streamlines using tracking example
    from dipy.data import read_stanford_labels
    from dipy.reconst.shm import CsaOdfModel
    from dipy.data import default_sphere
    from dipy.direction import peaks_from_model
    from dipy.tracking.local import ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.tracking.local import LocalTracking
    from dipy.viz.colormap import line_colors

    hardi_img, gtab, labels_img = read_stanford_labels()
    data = hardi_img.get_data()
    labels = labels_img.get_data()
    affine = hardi_img.get_affine()

    white_matter = (labels == 1) | (labels == 2)

    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model, data, default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)

    seed_mask = labels == 2
    seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

    # Initialization of LocalTracking.
    # The computation happens in the next step.
    streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                                step_size=2)

    # Compute streamlines and store as a list.
    streamlines = list(streamlines)

    # Prepare the display objects.
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))
    seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)

    # Create the 3d display.
    r = window.ren()
    r2 = window.ren()
    r.add(streamlines_actor)
    arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
    report3 = window.analyze_snapshot(arr3, find_objects=True)
    r2.add(streamlines_actor)
    r2.add(seedroi_actor)
    arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
    report4 = window.analyze_snapshot(arr4, find_objects=True)

    # assert that the seed ROI rendering is not far
    # away from the streamlines (affine error)
    npt.assert_equal(report3.objects, report4.objects)
Beispiel #46
0
def peaks(peaks_dirs, peaks_values=None, scale=2.2, colors=(1, 0, 0)):
    """ Visualize peak directions as given from ``peaks_from_model``

    Parameters
    ----------
    peaks_dirs : ndarray
        Peak directions. The shape of the array can be (M, 3) or (X, M, 3) or
        (X, Y, M, 3) or (X, Y, Z, M, 3)
    peaks_values : ndarray
        Peak values. The shape of the array can be (M, ) or (X, M) or
        (X, Y, M) or (X, Y, Z, M)

    scale : float
        Distance between spheres

    colors : ndarray or tuple
        Peak colors

    Returns
    -------
    vtkActor

    See Also
    --------
    dipy.viz.fvtk.sphere_funcs

    """
    peaks_dirs = np.asarray(peaks_dirs)
    if peaks_dirs.ndim > 5:
        raise ValueError("Wrong shape")

    peaks_dirs = _makeNd(peaks_dirs, 5)
    if peaks_values is not None:
        peaks_values = _makeNd(peaks_values, 4)

    grid_shape = np.array(peaks_dirs.shape[:3])

    list_dirs = []

    for ijk in np.ndindex(*grid_shape):

        xyz = scale * (ijk - grid_shape / 2.)[:, None]

        xyz = xyz.T

        for i in range(peaks_dirs.shape[-2]):

            if peaks_values is not None:

                pv = peaks_values[ijk][i]

            else:

                pv = 1.

            symm = np.vstack((-peaks_dirs[ijk][i] * pv + xyz,
                              peaks_dirs[ijk][i] * pv + xyz))

            list_dirs.append(symm)

    return line(list_dirs, colors)
def dwi_deterministic_tracing(image, bvecs, bvals, wm, seeds, fibers,
                              prune_length=3, plot=False, verbose=False):
    # Pipeline transcribed from:
    #   http://nipy.org/dipy/examples_built/introduction_to_basic_tracking.html
    # Load Images
    dwi_loaded = nib.load(image)
    dwi_data = dwi_loaded.get_data()

    wm_loaded = nib.load(wm)
    wm_data = wm_loaded.get_data()

    seeds_loaded = nib.load(seeds)
    seeds_data = seeds_loaded.get_data()

    # Load B-values & B-vectors
    # NB. Use aligned b-vecs if providing eddy-aligned data
    bvals, bvecs = read_bvals_bvecs(bvals, bvecs)
    gtab = gradient_table(bvals, bvecs)

    # Establish ODF model
    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model, dwi_data, default_sphere,
                                 relative_peak_threshold=0.8,
                                 min_separation_angle=45,
                                 mask=wm_data)

    # Classify tissue for high FA and create seeds
    # (Putting this inside a looped try-block to handle fuzzy failures)
    classifier = ThresholdTissueClassifier(csa_peaks.gfa, 0.25)
    seeds = wrap_fuzzy_failures(utils.seeds_from_mask,
                                args=[seeds_data],
                                kwargs={"density": [2, 2, 2],
                                        "affine": np.eye(4)},
                                errortype=ValueError,
                                failure_threshold=5,
                                verbose=verbose)

    # Perform deterministic tracing
    # (Putting this inside a looped try-block to handle fuzzy failures)
    streamlines_generator = wrap_fuzzy_failures(LocalTracking,
                                                args=[csa_peaks,
                                                      classifier,
                                                      seeds],
                                                kwargs={"affine": np.eye(4),
                                                        "step_size": 0.5},
                                                errortype=ValueError,
                                                failure_threshold=5,
                                                verbose=verbose)
    streamlines = wrap_fuzzy_failures(Streamlines,
                                      args=[streamlines_generator],
                                      kwargs={},
                                      errortype=IndexError,
                                      failure_threshold=5,
                                      verbose=verbose)

    # Prune streamlines
    streamlines = ArraySequence([strline
                                 for strline in streamlines
                                 if len(strline) > prune_length])

    # Save streamlines
    save_trk(fibers + ".trk", streamlines, dwi_loaded.affine,
             shape=wm_data.shape, vox_size=wm_loaded.header.get_zooms())

    # Visualize fibers
    if plot and have_fury:
        from dipy.viz import window, actor, colormap as cmap

        color = cmap.line_colors(streamlines)
        streamlines_actor = actor.line(streamlines, color)

        # Create the 3D display.
        r = window.Renderer()
        r.add(streamlines_actor)

        # Save still image.
        window.record(r, n_frames=1, out_path=fibers + ".png",
                      size=(800, 800))
Beispiel #48
0
def plotTrk(trkFile, target, anatFile, roi=None,
        xSlice=None, ySlice=None, zSlice=None,
        xRot=None, yRot=None, zRot=None):

    anatImage = nibabel.load(anatFile)
    trkImage = [s[0] for s in nibabel.trackvis.read(trkFile, points_space='rasmm')[0]]

    ren = window.Renderer()

    trkActor = actor.line(
        trkImage, dipy.viz.colormap.line_colors(trkImage))

    if xSlice is not None:
        anatActorSliceX = actor.slicer(anatImage.get_data(), anatImage.affine)
        anatActorSliceX.display(xSlice, None, None)
        # Apply rotation
        anatActorSliceX.RotateX(xRot)
        anatActorSliceX.RotateY(yRot)
        anatActorSliceX.RotateZ(zRot)

        ren.add(anatActorSliceX)

    if ySlice is not None:
        anatActorSliceY = actor.slicer(anatImage.get_data(), anatImage.affine)
        anatActorSliceY.display(None, ySlice, None)
        # Apply rotation
        anatActorSliceY.RotateX(xRot)
        anatActorSliceY.RotateY(yRot)
        anatActorSliceY.RotateZ(zRot)

        ren.add(anatActorSliceY)

    if zSlice is not None:
        anatActorSliceZ = actor.slicer(anatImage.get_data(), anatImage.affine)
        anatActorSliceZ.display(None, None, zSlice)
        # Apply rotation
        anatActorSliceZ.RotateX(xRot)
        anatActorSliceZ.RotateY(yRot)
        anatActorSliceZ.RotateZ(zRot)

        ren.add(anatActorSliceZ)

    trkActor.RotateX(xRot)
    trkActor.RotateY(yRot)
    trkActor.RotateZ(zRot)

    ren.add(trkActor)

    # Not in dipy 0.11.0 
    # Wait until next version
    # Already fixed here: https://github.com/nipy/dipy/pull/1163
    #if roi is not None:
    #    roiImage= nibabel.load(roi)
    #    roiActor = dipy.viz.fvtk.contour(
    #            roiImage.get_data(), affine=anatomicalImage.affine, levels=[1],
    #            colors=[(1., 1., 0.)], opacities=[1.])

    #    roiActor.RotateX(xRot)
    #    roiActor.RotateY(yRot)
    #    roiActor.RotateZ(zRot)

    #    ren.add(roiActor)
    ren.set_camera(
            position=(0,0,1), focal_point=(0,0,0), view_up=(0,1,0))#, verbose=False)
    #window.record(ren, out_path=target, size=(1200, 1200), n_frames=1)
    window.snapshot(ren, fname=target, size=(1200, 1200), offscreen=True)
colored by LFBC (see :ref:`optic_radiation_before_cleaning`), and the fibers
after the cleaning procedure via RFBC thresholding (see
:ref:`optic_radiation_after_cleaning`).
"""

# Visualize the results
from dipy.viz import window, actor

# Enables/disables interactive visualization
interactive = False

# Create renderer
ren = window.Renderer()

# Original lines colored by LFBC
lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2)
ren.add(lineactor)

# Horizontal (axial) slice of T1 data
vol_actor1 = actor.slicer(t1_data, affine=affine)
vol_actor1.display(z=20)
ren.add(vol_actor1)

# Vertical (sagittal) slice of T1 data
vol_actor2 = actor.slicer(t1_data, affine=affine)
vol_actor2.display(x=35)
ren.add(vol_actor2)

# Show original fibers
ren.set_camera(position=(-264, 285, 155),
               focal_point=(0, -14, 9),
Beispiel #50
0
def test_bundle_maps():
    renderer = window.renderer()
    bundle = fornix_streamlines()
    bundle, shift = center_streamlines(bundle)

    mat = np.array([[1, 0, 0, 100],
                    [0, 1, 0, 100],
                    [0, 0, 1, 100],
                    [0, 0, 0, 1.]])

    bundle = transform_streamlines(bundle, mat)

    # metric = np.random.rand(*(200, 200, 200))
    metric = 100 * np.ones((200, 200, 200))

    # add lower values
    metric[100, :, :] = 100 * 0.5

    # create a nice orange-red colormap
    lut = actor.colormap_lookup_table(scale_range=(0., 100.),
                                      hue_range=(0., 0.1),
                                      saturation_range=(1, 1),
                                      value_range=(1., 1))

    line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
    window.add(renderer, line)
    window.add(renderer, actor.scalar_bar(lut, ' '))

    report = window.analyze_renderer(renderer)

    npt.assert_almost_equal(report.actors, 1)
    # window.show(renderer)

    renderer.clear()

    nb_points = np.sum([len(b) for b in bundle])
    values = 100 * np.random.rand(nb_points)
    # values[:nb_points/2] = 0

    line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    renderer.clear()

    colors = np.random.rand(nb_points, 3)
    # values[:nb_points/2] = 0

    line = actor.line(bundle, colors, linewidth=2)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
    # window.show(renderer)

    arr = window.snapshot(renderer)
    report2 = window.analyze_snapshot(arr)
    npt.assert_equal(report2.objects, 1)

    # try other input options for colors
    renderer.clear()
    actor.line(bundle, (1., 0.5, 0))
    actor.line(bundle, np.arange(len(bundle)))
    actor.line(bundle)
    colors = [np.random.rand(*b.shape) for b in bundle]
    actor.line(bundle, colors=colors)
Beispiel #51
0
If we want to see the objects in native space we need to make sure that all
objects which are currently in world coordinates are transformed back to
native space using the inverse of the affine.
"""

if not world_coords:
    from dipy.tracking.streamline import transform_streamlines
    streamlines = transform_streamlines(streamlines, np.linalg.inv(affine))

"""
Now we create, a ``Renderer`` object and add the streamlines using the ``line``
function and an image plane using the ``slice`` function.
"""

ren = window.Renderer()
stream_actor = actor.line(streamlines)

if not world_coords:
    image_actor_z = actor.slicer(data, affine=np.eye(4))
else:
    image_actor_z = actor.slicer(data, affine)

"""
We can also change also the opacity of the slicer.
"""

slicer_opacity = 0.6
image_actor_z.opacity(slicer_opacity)

"""
We can add additonal slicers by copying the original and adjusting the
Beispiel #52
0
def peaks(peaks_dirs, peaks_values=None, scale=2.2, colors=(1, 0, 0)):
    """ Visualize peak directions as given from ``peaks_from_model``

    Parameters
    ----------
    peaks_dirs : ndarray
        Peak directions. The shape of the array can be (M, 3) or (X, M, 3) or
        (X, Y, M, 3) or (X, Y, Z, M, 3)
    peaks_values : ndarray
        Peak values. The shape of the array can be (M, ) or (X, M) or
        (X, Y, M) or (X, Y, Z, M)

    scale : float
        Distance between spheres

    colors : ndarray or tuple
        Peak colors

    Returns
    -------
    vtkActor

    See Also
    --------
    dipy.viz.fvtk.sphere_funcs

    """
    peaks_dirs = np.asarray(peaks_dirs)
    if peaks_dirs.ndim > 5:
        raise ValueError("Wrong shape")

    peaks_dirs = _makeNd(peaks_dirs, 5)
    if peaks_values is not None:
        peaks_values = _makeNd(peaks_values, 4)

    grid_shape = np.array(peaks_dirs.shape[:3])

    list_dirs = []

    for ijk in np.ndindex(*grid_shape):

        xyz = scale * (ijk - grid_shape / 2.)[:, None]

        xyz = xyz.T

        for i in range(peaks_dirs.shape[-2]):

            if peaks_values is not None:

                pv = peaks_values[ijk][i]

            else:

                pv = 1.

            symm = np.vstack((-peaks_dirs[ijk][i] * pv + xyz,
                              peaks_dirs[ijk][i] * pv + xyz))

            list_dirs.append(symm)

    return line(list_dirs, colors)
Beispiel #53
0
def test_contour_from_roi():

    # Render volume
    renderer = window.renderer()
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data,
                                     affine,
                                     color=np.array([1, 0, 1]),
                                     opacity=.5)
    renderer.add(surface)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # Test binarization
    renderer2 = window.renderer()
    data2 = np.zeros((50, 50, 50))
    data2[20:30, 25, 25] = 1.
    data2[35:40, 25, 25] = 1.
    affine = np.eye(4)
    surface2 = actor.contour_from_roi(data2,
                                      affine,
                                      color=np.array([0, 1, 1]),
                                      opacity=.5)
    renderer2.add(surface2)

    renderer2.reset_camera()
    renderer2.reset_clipping_range()
    # window.show(renderer2)

    arr = window.snapshot(renderer, 'test_surface.png', offscreen=True)
    arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True)

    report = window.analyze_snapshot(arr, find_objects=True)
    report2 = window.analyze_snapshot(arr2, find_objects=True)

    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report2.objects, 2)

    # test on real streamlines using tracking example
    from dipy.data import read_stanford_labels
    from dipy.reconst.shm import CsaOdfModel
    from dipy.data import default_sphere
    from dipy.direction import peaks_from_model
    from dipy.tracking.local import ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.tracking.local import LocalTracking
    from dipy.viz.colormap import line_colors

    hardi_img, gtab, labels_img = read_stanford_labels()
    data = hardi_img.get_data()
    labels = labels_img.get_data()
    affine = hardi_img.get_affine()

    white_matter = (labels == 1) | (labels == 2)

    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model,
                                 data,
                                 default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)

    seed_mask = labels == 2
    seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

    # Initialization of LocalTracking.
    # The computation happens in the next step.
    streamlines = LocalTracking(csa_peaks,
                                classifier,
                                seeds,
                                affine,
                                step_size=2)

    # Compute streamlines and store as a list.
    streamlines = list(streamlines)

    # Prepare the display objects.
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))
    seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)

    # Create the 3d display.
    r = window.ren()
    r2 = window.ren()
    r.add(streamlines_actor)
    arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
    report3 = window.analyze_snapshot(arr3, find_objects=True)
    r2.add(streamlines_actor)
    r2.add(seedroi_actor)
    arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
    report4 = window.analyze_snapshot(arr4, find_objects=True)

    # assert that the seed ROI rendering is not far
    # away from the streamlines (affine error)
    npt.assert_equal(report3.objects, report4.objects)
  fbc.get_points_rfbc_thresholded(0.125, emphasis=0.01)

"""
The results of FBC measures are visualized, showing the original fibers
colored by LFBC, and the fibers after the cleaning procedure via RFBC
thresholding.
"""

# Visualize the results
from dipy.viz import fvtk, actor

# Create renderer
ren = fvtk.ren()

# Original lines colored by LFBC
lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2)
fvtk.add(ren, lineactor)

# Horizontal (axial) slice of T1 data
vol_actor1 = fvtk.slicer(t1_data, affine=affine)
vol_actor1.display(None, None, 20)
fvtk.add(ren, vol_actor1)

# Vertical (sagittal) slice of T1 data
vol_actor2 = fvtk.slicer(t1_data, affine=affine)
vol_actor2.display(35, None, None)
fvtk.add(ren, vol_actor2)

# Show original fibers
fvtk.camera(ren, pos=(-264, 285, 155), focal=(0, -14, 9), viewup=(0, 0, 1),
            verbose=False)
Beispiel #55
0
def test_bundle_maps():
    renderer = window.renderer()
    bundle = fornix_streamlines()
    bundle, shift = center_streamlines(bundle)

    mat = np.array([[1, 0, 0, 100], [0, 1, 0, 100], [0, 0, 1, 100],
                    [0, 0, 0, 1.]])

    bundle = transform_streamlines(bundle, mat)

    # metric = np.random.rand(*(200, 200, 200))
    metric = 100 * np.ones((200, 200, 200))

    # add lower values
    metric[100, :, :] = 100 * 0.5

    # create a nice orange-red colormap
    lut = actor.colormap_lookup_table(scale_range=(0., 100.),
                                      hue_range=(0., 0.1),
                                      saturation_range=(1, 1),
                                      value_range=(1., 1))

    line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
    window.add(renderer, line)
    window.add(renderer, actor.scalar_bar(lut, ' '))

    report = window.analyze_renderer(renderer)

    npt.assert_almost_equal(report.actors, 1)
    # window.show(renderer)

    renderer.clear()

    nb_points = np.sum([len(b) for b in bundle])
    values = 100 * np.random.rand(nb_points)
    # values[:nb_points/2] = 0

    line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    renderer.clear()

    colors = np.random.rand(nb_points, 3)
    # values[:nb_points/2] = 0

    line = actor.line(bundle, colors, linewidth=2)
    renderer.add(line)
    # window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
    # window.show(renderer)

    arr = window.snapshot(renderer)
    report2 = window.analyze_snapshot(arr)
    npt.assert_equal(report2.objects, 1)

    # try other input options for colors
    renderer.clear()
    actor.line(bundle, (1., 0.5, 0))
    actor.line(bundle, np.arange(len(bundle)))
    actor.line(bundle)
    colors = [np.random.rand(*b.shape) for b in bundle]
    actor.line(bundle, colors=colors)
def show_results(streamlines, vol, affine, world_coords=True, opacity=0.6):

    from dipy.viz import actor, window, widget
    import numpy as np
    shape = vol.shape

    if not world_coords:
        from dipy.tracking.streamline import transform_streamlines
        streamlines = transform_streamlines(streamlines, np.linalg.inv(affine))

    ren = window.Renderer()
    if streamlines is not None:
        stream_actor = actor.line(streamlines)

    if not world_coords:
        image_actor = actor.slicer(vol, affine=np.eye(4))
    else:
        image_actor = actor.slicer(vol, affine)

    slicer_opacity = opacity #.6
    image_actor.opacity(slicer_opacity)

    if streamlines is not None:
        ren.add(stream_actor)
    ren.add(image_actor)

    show_m = window.ShowManager(ren, size=(1200, 900))
    show_m.initialize()

    def change_slice(obj, event):
        z = int(np.round(obj.get_value()))
        image_actor.display_extent(0, shape[0] - 1,
                                   0, shape[1] - 1, z, z)

    slider = widget.slider(show_m.iren, show_m.ren,
                           callback=change_slice,
                           min_value=0,
                           max_value=shape[2] - 1,
                           value=shape[2] / 2,
                           label="Move slice",
                           right_normalized_pos=(.98, 0.6),
                           size=(120, 0), label_format="%0.lf",
                           color=(1., 1., 1.),
                           selected_color=(0.86, 0.33, 1.))

    global size
    size = ren.GetSize()

    def win_callback(obj, event):
        global size
        if size != obj.GetSize():

            slider.place(ren)
            size = obj.GetSize()

    show_m.initialize()

    show_m.add_window_callback(win_callback)
    show_m.render()
    show_m.start()

    # ren.zoom(1.5)
    # ren.reset_clipping_range()

    # window.record(ren, out_path='bundles_and_a_slice.png', size=(1200, 900),
    #               reset_camera=False)

    del show_m
                                                     threshold_classifier,
                                                     seeds,
                                                     affine,
                                                     step_size=.5,
                                                     return_all=True)

save_trk("deterministic_threshold_classifier_all.trk",
         all_streamlines_threshold_classifier,
         affine,
         labels.shape)

streamlines = Streamlines(all_streamlines_threshold_classifier)

if have_fury:
    window.clear(ren)
    ren.add(actor.line(streamlines, cmap.line_colors(streamlines)))
    window.record(ren, out_path='all_streamlines_threshold_classifier.png',
                  size=(600, 600))
    if interactive:
        window.show(ren)

"""
.. figure:: all_streamlines_threshold_classifier.png
 :align: center

 **Deterministic tractography using a thresholded fractional anisotropy.**
"""


"""
Binary Tissue Classifier