def show_gradients(gtab):
    
    renderer = window.Renderer()
    renderer.add(fvtk.point(gtab.gradients, (1,0,0), point_radius=100))
    renderer.add(fvtk.point(-gtab.gradients, (1,0,0), point_radius=100))
    
    window.show(renderer)
Ejemplo n.º 2
0
def visualize(fibers, outf=None):
    """
    Takes fiber streamlines and visualizes them using DiPy

    Required Arguments:
        - fibers:
            fiber streamlines in a list as returned by DiPy
    Optional Arguments:
        - save:
            flag indicating whether or not you want the image saved
            to disk after being displayed
    """
    # Initialize renderer
    renderer = window.Renderer()

    # Add streamlines as a DiPy viz object
    stream_actor = actor.line(fibers)

    # Set camera orientation properties
    # TODO: allow this as an argument
    renderer.set_camera()  # args are: position=(), focal_point=(), view_up=()

    # Add streamlines to viz session
    renderer.add(stream_actor)

    # Display fibers
    # TODO: allow size of window as an argument
    window.show(renderer, size=(600, 600), reset_camera=False)

    # Saves file, if you're into that sort of thing...
    if outf is not None:
        window.record(renderer, out_path=outf, size=(600, 600))
def simple_viewer(streamlines, vol, affine):

    from dipy.viz import actor, window

    renderer = window.Renderer()
    renderer.add(actor.line(streamlines))
    renderer.add(actor.slicer(vol, affine))
    window.show(renderer)
Ejemplo n.º 4
0
def test_peak_slicer(interactive=False):

    _peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4')
    # peak_dirs.shape = (1, 1, 1) + peak_dirs.shape

    peak_dirs = np.zeros((11, 11, 11, 3, 3))

    peak_values = np.random.rand(11, 11, 11, 3)

    peak_dirs[:, :, :] = _peak_dirs

    renderer = window.Renderer()
    peak_actor = actor.peak_slicer(peak_dirs)
    renderer.add(peak_actor)
    renderer.add(actor.axes((11, 11, 11)))
    if interactive:
        window.show(renderer)

    renderer.clear()
    renderer.add(peak_actor)
    renderer.add(actor.axes((11, 11, 11)))
    for k in range(11):
        peak_actor.display_extent(0, 10, 0, 10, k, k)

    for j in range(11):
        peak_actor.display_extent(0, 10, j, j, 0, 10)

    for i in range(11):
        peak_actor.display(i, None, None)

    renderer.rm_all()

    peak_actor = actor.peak_slicer(
        peak_dirs,
        peak_values,
        mask=None,
        affine=np.diag([3, 2, 1, 1]),
        colors=None,
        opacity=1,
        linewidth=3,
        lod=True,
        lod_points=10 ** 4,
        lod_points_size=3)

    renderer.add(peak_actor)
    renderer.add(actor.axes((11, 11, 11)))
    if interactive:
        window.show(renderer)

    report = window.analyze_renderer(renderer)
    ex = ['vtkLODActor', 'vtkOpenGLActor', 'vtkOpenGLActor', 'vtkOpenGLActor']
    npt.assert_equal(report.actors_classnames, ex)
Ejemplo n.º 5
0
def test_labels(interactive=False):

    text_actor = actor.label("Hello")

    renderer = window.Renderer()
    renderer.add(text_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)
Ejemplo n.º 6
0
def show_both_bundles(bundles, colors=None, show=True, fname=None):

    ren = window.Renderer()
    ren.SetBackground(1., 1, 1)
    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines_actor = actor.streamtube(bundle, color, linewidth=0.3)
        lines_actor.RotateX(-90)
        lines_actor.RotateZ(90)
        ren.add(lines_actor)
    if show:
        window.show(ren)
    if fname is not None:
        sleep(1)
        window.record(ren, n_frames=1, out_path=fname, size=(900, 900))
Ejemplo n.º 7
0
def show_two_images(vol1, affine1, vol2, affine2, shift=50):
    """ Show 2 images side by side"""

    renderer = window.Renderer()
    mean, std = vol1[vol1 > 0].mean(), vol1[vol1 > 0].std()
    value_range1 = (mean - 0.5 * std, mean + 1.5 * std)
    mean, std = vol2[vol2 > 0].mean(), vol2[vol2 > 0].std()
    value_range2 = (mean - 0.5 * std, mean + 1.5 * std)

    slice_actor1 = actor.slicer(vol1, affine1, value_range1)
    slice_actor2 = actor.slicer(vol2, affine2, value_range2)

    renderer.add(slice_actor1)
    renderer.add(slice_actor2)

    slice_actor2.SetPosition(slice_actor1.shape[0] + shift, 0, 0)

    window.show(renderer)
Ejemplo n.º 8
0
def show_bundles(bundles, colors=None, size=(1080, 600),
                 show=False, fname=None):

    ren = window.Renderer()
    ren.background((1., 1, 1))

    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines = actor.line(bundle, color, linewidth=1.5)
        ren.add(lines)

    ren.reset_clipping_range()
    ren.reset_camera()

    # if show:
    window.show(ren, size=size, reset_camera=True)

    if fname is not None:
        window.record(ren, n_frames=1, out_path=fname, size=size)
Ejemplo n.º 9
0
def test_points(interactive=False):
    points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
    colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])

    points_actor = actor.point(points,  colors)

    renderer = window.Renderer()
    renderer.add(points_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr,
                                     colors=colors)
    npt.assert_equal(report.objects, 3)
Ejemplo n.º 10
0
def show_mosaic(data, affine, border=70):
    """ Show a simple mosaic of the given image
    """

    renderer = window.Renderer()
    mean, std = data[data > 0].mean(), data[data > 0].std()
    value_range = (mean - 0.5 * std, mean + 1.5 * std)
    slice_actor = actor.slicer(data, affine, value_range)

    renderer.clear()
    renderer.projection('parallel')
    cnt = 0

    X, Y, Z = slice_actor.shape[:3]

    rows = 10
    cols = 15
    border = 70

    for j in range(rows):
        for i in range(cols):
            slice_mosaic = slice_actor.copy()
            slice_mosaic.display(None, None, cnt)
            slice_mosaic.SetPosition(
                (X + border) * i,
                0.5 * cols * (Y + border) - (Y + border) * j,
                0)
            renderer.add(slice_mosaic)
            cnt += 1
            if cnt > Z:
                break
        if cnt > Z:
            break

    renderer.reset_camera()
    renderer.zoom(1.6)

    window.show(renderer, size=(900, 600), reset_camera=False)
Ejemplo n.º 11
0
def test_dots(interactive=False):
    points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])

    dots_actor = actor.dots(points, color=(0, 255, 0))

    renderer = window.Renderer()
    renderer.add(dots_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)

    extent = renderer.GetActors().GetLastActor().GetBounds()
    npt.assert_equal(extent, (0.0, 1.0, 0.0, 1.0, 0.0, 0.0))

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr,
                                     colors=(0, 255, 0))
    npt.assert_equal(report.objects, 3)

    # Test one point
    points = np.array([0, 0, 0])
    dot_actor = actor.dots(points, color=(0, 0, 255))

    renderer.clear()
    renderer.add(dot_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr,
                                     colors=(0, 0, 255))
    npt.assert_equal(report.objects, 1)
def simple_viewer(streamlines, vol, affine):

    renderer = window.Renderer()
    renderer.add(actor.line(streamlines))
    renderer.add(actor.slicer(vol, affine))
    window.show(renderer)
    n = len(vert_list)
    vert_list.append([a * np.sin(np.deg2rad(v)), 0.0, z_new])
    vert_list.append([0.0, b * np.sin(np.deg2rad(v)), z_new])
    vert_list.append([-1 * a * np.sin(np.deg2rad(v)), 0.0, z_new])
    vert_list.append([0.0, -1 * b * np.sin(np.deg2rad(v)), z_new])
    edge_list.append([n - 4, n, n - 3])
    edge_list.append([n, n + 1, n - 3])
    edge_list.append([n - 3, n + 1, n - 2])
    edge_list.append([n + 1, n + 2, n - 2])
    edge_list.append([n - 2, n + 2, n - 1])
    edge_list.append([n + 2, n + 3, n - 1])
    edge_list.append([n - 1, n + 3, n - 4])
    edge_list.append([n + 3, n, n - 4])

my_vertices = np.array(vert_list)
my_triangles = np.array(edge_list)

ut_vtk.set_polydata_vertices(my_polydata, my_vertices)
ut_vtk.set_polydata_triangles(my_polydata, my_triangles.astype('i8'))

sphere_vertices = ut_vtk.get_polydata_vertices(my_polydata)
colors = sphere_vertices * 255
ut_vtk.set_polydata_colors(my_polydata, colors)

sphere_actor = ut_vtk.get_actor_from_polydata(my_polydata)

# renderer and scene
renderer = window.Renderer()
renderer.add(sphere_actor)
window.show(renderer, size=(600, 600), reset_camera=False)
Ejemplo n.º 14
0
ren = window.Renderer()
evals = response[0]
evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T

response_odf = single_tensor_odf(default_sphere.vertices, evals, evecs)
# transform our data from 1D to 4D
response_odf = response_odf[None, None, None, :]
response_actor = actor.odf_slicer(response_odf,
                                  sphere=default_sphere,
                                  colormap='plasma')
ren.add(response_actor)
print('Saving illustration as csd_response.png')
window.record(ren, out_path='csd_response.png', size=(200, 200))
if interactive:
    window.show(ren)
"""
.. figure:: csd_response.png
   :align: center

   Estimated response function.

"""

ren.rm(response_actor)
"""
**Strategy 2 - data-driven calibration of response function** Depending
on the dataset, FA threshold may not be the best way to find the best possible
response function. For one, it depends on the diffusion tensor
(FA and first eigenvector), which has lower accuracy at high
b-values. Alternatively, the response function can be calibrated in a
Ejemplo n.º 15
0
print(mcsd_odf.shape)
print(mcsd_odf[40, 40, 0])

fodf_spheres = actor.odf_slicer(mcsd_odf, sphere=sphere, scale=1,
                                norm=False, colormap='plasma')

interactive = False
scene = window.Scene()
scene.add(fodf_spheres)
scene.reset_camera_tight()

print('Saving illustration as msdodf.png')
window.record(scene, out_path='msdodf.png', size=(600, 600))

if interactive:
    window.show(scene)

"""
.. figure:: msdodf.png
   :align: center

   MSMT-CSD Peaks and ODFs.

References
----------

.. [Jeurissen2014] B. Jeurissen, et al., "Multi-tissue constrained spherical
                    deconvolution for improved analysis of multi-shell
                    diffusion MRI data." NeuroImage 103 (2014): 411-426.

.. [Tournier2007] J-D. Tournier, F. Calamante and A. Connelly, "Robust
Ejemplo n.º 16
0
vol_actor2.display(z=35)

# Add display objects to canvas
r = window.Renderer()
r.add(vol_actor)
r.add(vol_actor2)
r.add(cc_streamlines_actor)
r.add(cc_ROI_actor)

# Save figures
window.record(r,
              n_frames=1,
              out_path='corpuscallosum_axial.png',
              size=(800, 800))
if interactive:
    window.show(r)
r.set_camera(position=[-1, 0, 0], focal_point=[0, 0, 0], view_up=[0, 0, 1])
window.record(r,
              n_frames=1,
              out_path='corpuscallosum_sagittal.png',
              size=(800, 800))
if interactive:
    window.show(r)
"""
.. figure:: corpuscallosum_axial.png
   :align: center

   **Corpus Callosum Axial**

.. include:: ../links_names.inc
Ejemplo n.º 17
0
def test_tensor_slicer(interactive=False):

    evals = np.array([1.4, .35, .35]) * 10**(-3)
    evecs = np.eye(3)

    mevals = np.zeros((3, 2, 4, 3))
    mevecs = np.zeros((3, 2, 4, 3, 3))

    mevals[..., :] = evals
    mevecs[..., :, :] = evecs

    from dipy.data import get_sphere

    sphere = get_sphere('symmetric724')

    affine = np.eye(4)
    renderer = window.Renderer()

    tensor_actor = actor.tensor_slicer(mevals,
                                       mevecs,
                                       affine=affine,
                                       sphere=sphere,
                                       scale=.3)
    I, J, K = mevals.shape[:3]
    renderer.add(tensor_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    tensor_actor.display_extent(0, 1, 0, J, 0, K)
    tensor_actor.GetProperty().SetOpacity(1.0)
    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)

    # Test extent
    big_extent = renderer.GetActors().GetLastActor().GetBounds()
    big_extent_x = abs(big_extent[1] - big_extent[0])
    tensor_actor.display(x=2)

    if interactive:
        window.show(renderer, reset_camera=False)

    small_extent = renderer.GetActors().GetLastActor().GetBounds()
    small_extent_x = abs(small_extent[1] - small_extent[0])
    npt.assert_equal(big_extent_x > small_extent_x, True)

    # Test empty mask
    empty_actor = actor.tensor_slicer(mevals,
                                      mevecs,
                                      affine=affine,
                                      mask=np.zeros(mevals.shape[:3]),
                                      sphere=sphere,
                                      scale=.3)
    npt.assert_equal(empty_actor.GetMapper(), None)

    # Test mask
    mask = np.ones(mevals.shape[:3])
    mask[:2, :3, :3] = 0
    cfa = color_fa(fractional_anisotropy(mevals), mevecs)
    tensor_actor = actor.tensor_slicer(mevals,
                                       mevecs,
                                       affine=affine,
                                       mask=mask,
                                       scalar_colors=cfa,
                                       sphere=sphere,
                                       scale=.3)
    renderer.clear()
    renderer.add(tensor_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    mask_extent = renderer.GetActors().GetLastActor().GetBounds()
    mask_extent_x = abs(mask_extent[1] - mask_extent[0])
    npt.assert_equal(big_extent_x > mask_extent_x, True)

    # test display
    tensor_actor.display()
    current_extent = renderer.GetActors().GetLastActor().GetBounds()
    current_extent_x = abs(current_extent[1] - current_extent[0])
    npt.assert_equal(big_extent_x > current_extent_x, True)
    if interactive:
        window.show(renderer, reset_camera=False)

    tensor_actor.display(y=1)
    current_extent = renderer.GetActors().GetLastActor().GetBounds()
    current_extent_y = abs(current_extent[3] - current_extent[2])
    big_extent_y = abs(big_extent[3] - big_extent[2])
    npt.assert_equal(big_extent_y > current_extent_y, True)
    if interactive:
        window.show(renderer, reset_camera=False)

    tensor_actor.display(z=1)
    current_extent = renderer.GetActors().GetLastActor().GetBounds()
    current_extent_z = abs(current_extent[5] - current_extent[4])
    big_extent_z = abs(big_extent[5] - big_extent[4])
    npt.assert_equal(big_extent_z > current_extent_z, True)
    if interactive:
        window.show(renderer, reset_camera=False)
Ejemplo n.º 18
0
def test_odf_slicer(interactive=False):

    sphere = get_sphere('symmetric362')

    shape = (11, 11, 11, sphere.vertices.shape[0])

    fid, fname = mkstemp(suffix='_odf_slicer.mmap')
    print(fid)
    print(fname)

    odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape)

    odfs[:] = 1

    affine = np.eye(4)
    renderer = window.Renderer()

    mask = np.ones(odfs.shape[:3])
    mask[:4, :4, :4] = 0

    odfs[..., 0] = 1

    odf_actor = actor.odf_slicer(odfs,
                                 affine,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='jet')
    fa = 0. * np.zeros(odfs.shape[:3])
    fa[:, 0, :] = 1.
    fa[:, -1, :] = 1.
    fa[0, :, :] = 1.
    fa[-1, :, :] = 1.
    fa[5, 5, 5] = 1

    k = 5
    I, J, K = odfs.shape[:3]

    fa_actor = actor.slicer(fa, affine)
    fa_actor.display_extent(0, I, 0, J, k, k)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    odf_actor.display_extent(0, I, 0, J, k, k)
    odf_actor.GetProperty().SetOpacity(1.0)
    if interactive:
        window.show(renderer, reset_camera=False)

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 11 * 11)

    renderer.clear()
    renderer.add(fa_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    mask[:] = 0
    mask[5, 5, 5] = 1
    fa[5, 5, 5] = 0
    fa_actor = actor.slicer(fa, None)
    fa_actor.display(None, None, 5)
    odf_actor = actor.odf_slicer(odfs,
                                 None,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='jet',
                                 norm=False,
                                 global_cm=True)
    renderer.clear()
    renderer.add(fa_actor)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    odfs[:, :, :] = 1
    mask = np.ones(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs,
                                 None,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='jet',
                                 norm=False,
                                 global_cm=True)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    renderer.add(actor.axes((11, 11, 11)))
    for i in range(11):
        odf_actor.display(i, None, None)
        fa_actor.display(i, None, None)
        if interactive:
            window.show(renderer)
    for j in range(11):
        odf_actor.display(None, j, None)
        fa_actor.display(None, j, None)
        if interactive:
            window.show(renderer)
    # with mask equal to zero everything should be black
    mask = np.zeros(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs,
                                 None,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='plasma',
                                 norm=False,
                                 global_cm=True)
    renderer.clear()
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors, 1)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    del odf_actor
    odfs._mmap.close()
    del odfs
    os.close(fid)

    os.remove(fname)
Ejemplo n.º 19
0
def test_tensor_slicer(interactive=False):

    evals = np.array([1.4, .35, .35]) * 10 ** (-3)
    evecs = np.eye(3)

    mevals = np.zeros((3, 2, 4, 3))
    mevecs = np.zeros((3, 2, 4, 3, 3))

    mevals[..., :] = evals
    mevecs[..., :, :] = evecs

    from dipy.data import get_sphere

    sphere = get_sphere('symmetric724')

    affine = np.eye(4)
    renderer = window.Renderer()

    tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
                                       sphere=sphere,  scale=.3)
    I, J, K = mevals.shape[:3]
    renderer.add(tensor_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    tensor_actor.display_extent(0, 1, 0, J, 0, K)
    tensor_actor.GetProperty().SetOpacity(1.0)
    if interactive:
        window.show(renderer, reset_camera=False)

    npt.assert_equal(renderer.GetActors().GetNumberOfItems(), 1)

    # Test extent
    big_extent = renderer.GetActors().GetLastActor().GetBounds()
    big_extent_x = abs(big_extent[1] - big_extent[0])
    tensor_actor.display(x=2)

    if interactive:
        window.show(renderer, reset_camera=False)

    small_extent = renderer.GetActors().GetLastActor().GetBounds()
    small_extent_x = abs(small_extent[1] - small_extent[0])
    npt.assert_equal(big_extent_x > small_extent_x, True)

    # Test empty mask
    empty_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
                                      mask=np.zeros(mevals.shape[:3]),
                                      sphere=sphere,  scale=.3)
    npt.assert_equal(empty_actor.GetMapper(), None)

    # Test mask
    mask = np.ones(mevals.shape[:3])
    mask[:2, :3, :3] = 0
    cfa = color_fa(fractional_anisotropy(mevals), mevecs)
    tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine, mask=mask,
                                       scalar_colors=cfa, sphere=sphere,  scale=.3)
    renderer.clear()
    renderer.add(tensor_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    if interactive:
        window.show(renderer, reset_camera=False)

    mask_extent = renderer.GetActors().GetLastActor().GetBounds()
    mask_extent_x = abs(mask_extent[1] - mask_extent[0])
    npt.assert_equal(big_extent_x > mask_extent_x, True)

    # test display
    tensor_actor.display()
    current_extent = renderer.GetActors().GetLastActor().GetBounds()
    current_extent_x = abs(current_extent[1] - current_extent[0])
    npt.assert_equal(big_extent_x > current_extent_x, True)
    if interactive:
        window.show(renderer, reset_camera=False)

    tensor_actor.display(y=1)
    current_extent = renderer.GetActors().GetLastActor().GetBounds()
    current_extent_y = abs(current_extent[3] - current_extent[2])
    big_extent_y = abs(big_extent[3] - big_extent[2])
    npt.assert_equal(big_extent_y > current_extent_y, True)
    if interactive:
        window.show(renderer, reset_camera=False)

    tensor_actor.display(z=1)
    current_extent = renderer.GetActors().GetLastActor().GetBounds()
    current_extent_z = abs(current_extent[5] - current_extent[4])
    big_extent_z = abs(big_extent[5] - big_extent[4])
    npt.assert_equal(big_extent_z > current_extent_z, True)
    if interactive:
        window.show(renderer, reset_camera=False)
Ejemplo n.º 20
0
polydata_in = load_polydata(fib_file_name)
streamlines_in = get_streamlines(polydata_in)
streamlines = []

for line in streamlines_in:
    dist = line[:-1] - line[1:]
    line_length = np.sum(np.sqrt(np.sum(np.square(dist), axis=1)))
        
    if line_length > min_length:
        streamlines.append(line)

#streamlines_sub = streamlines
streamlines_sub = streamlines[::100]

print len(streamlines_in), len(streamlines_sub)

actor = streamtube(streamlines_sub, linewidth=linewidth, tube_sides=tube_sides, spline_subdiv=spline_subdiv)

renderer = window.Renderer()
renderer.add(actor)
my_window = window.show(renderer)

#objexporter = vtk.vtkOBJExporter()
#objexporter.SetInput(my_window)
#objexporter.SetFileName(save_file)
#objexporter.SetFileName("/home/eti/home_mint/Nic/s100_s100_tube_flow.obj")
#objexporter.Write()

#save_polydata(actor.GetMapper().GetInput(), save_file)
Ejemplo n.º 21
0
"""

seeds = random_seeds_from_mask(fa > 0.3, seeds_count=1)

"""
For quality assurance we can also visualize a slice from the direction field
which we will use as the basis to perform the tracking.
"""

ren = window.Renderer()
ren.add(actor.peak_slicer(csd_peaks.peak_dirs,
                          csd_peaks.peak_values,
                          colors=None))

if interactive:
    window.show(ren, size=(900, 900))
else:
    window.record(ren, out_path='csd_direction_field.png', size=(900, 900))

"""
.. figure:: csd_direction_field.png
 :align: center

 **Direction Field (peaks)**

``EuDX`` [Garyfallidis12]_ is a fast algorithm that we use here to generate
streamlines. This algorithm is what is used here and the default option
when providing the output of peaks directly in LocalTracking.
"""

streamline_generator = LocalTracking(csd_peaks, tissue_classifier,
Ejemplo n.º 22
0
"""
`actor.line` creates a streamline actor for streamline visualization
and `ren.add` adds this actor to the scene
"""

ren.add(actor.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))

print('Saving illustration as tensor_tracks.png')

ren.SetBackground(1, 1, 1)
window.record(ren, out_path='tensor_tracks.png', size=(600, 600))
# Enables/disables interactive visualization
interactive = False
if interactive:
    window.show(ren)

"""
.. figure:: tensor_tracks.png
   :align: center

   Deterministic streamlines with EuDX on a Tensor Field.

References
----------

.. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography",
   PhD thesis, University of Cambridge, 2012.

.. include:: ../links_names.inc
Ejemplo n.º 23
0
def show_image(data, affine=None):
    renderer = window.Renderer()
    slicer = actor.slicer(data, affine)
    renderer.add(slicer)
    window.show(renderer)
Ejemplo n.º 24
0
be done using the ``fury`` python package
"""

from dipy.viz import window, actor, has_fury

if has_fury:
    scene = window.Scene()
    scene.add(
        actor.peak_slicer(csa_peaks.peak_dirs,
                          csa_peaks.peak_values,
                          colors=None))

    window.record(scene, out_path='csa_direction_field.png', size=(900, 900))

    if interactive:
        window.show(scene, size=(800, 800))
"""
.. figure:: csa_direction_field.png
 :align: center

 **Direction Field (peaks)**
"""
"""
2. Next we need some way of restricting the fiber tracking to areas with good
directionality information. We've already created the white matter mask,
but we can go a step further and restrict fiber tracking to those areas where
the ODF shows significant restricted diffusion by thresholding on
the generalized fractional anisotropy (GFA).
"""

from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion
Ejemplo n.º 25
0
be done using the ``fury`` python package
"""

from dipy.viz import window, actor, has_fury

if has_fury:
    ren = window.Renderer()
    ren.add(
        actor.peak_slicer(csa_peaks.peak_dirs,
                          csa_peaks.peak_values,
                          colors=None))

    window.record(ren, out_path='csa_direction_field.png', size=(900, 900))

    if interactive:
        window.show(ren, size=(800, 800))
"""
.. figure:: csa_direction_field.png
 :align: center

 **Direction Field (peaks)**
"""
"""
2. Next we need some way of restricting the fiber tracking to areas with good
directionality information. We've already created the white matter mask,
but we can go a step further and restrict fiber tracking to those areas where
the ODF shows significant restricted diffusion by thresholding on
the generalized fractional anisotropy (GFA).
"""

from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion
Ejemplo n.º 26
0
    def visualize(self,
                  out_path='out/',
                  outer_box=True,
                  axes=True,
                  clip_neg=False,
                  azimuth=0,
                  elevation=0,
                  n_frames=1,
                  mag=1,
                  video=False,
                  viz_type='ODF',
                  mask=None,
                  mask_roi=None,
                  skip_n=1,
                  skip_n_roi=1,
                  scale=1,
                  roi_scale=1,
                  zoom_start=1.0,
                  zoom_end=1.0,
                  top_zoom=1,
                  interact=False,
                  save_parallels=False,
                  my_cam=None,
                  compress=True,
                  roi=None,
                  corner_text='',
                  scalemap=None,
                  titles_on=True,
                  scalebar_on=True,
                  invert=False,
                  flat=False,
                  colormap='bwr',
                  global_cm=True,
                  camtilt=False,
                  axes_on=False,
                  colors=None,
                  arrows=None,
                  arrow_color=np.array([0, 0, 0]),
                  linewidth=0.1,
                  mark_slices=None,
                  z_shift=0,
                  profiles=[],
                  markers=[],
                  marker_colors=[],
                  marker_scale=1,
                  normalize_glyphs=True,
                  gamma=1,
                  density_max=1):
        log.info('Preparing to render ' + out_path)

        # Handle scalemap
        if scalemap is None:
            scalemap = util.ScaleMap(min=np.min(self.f[..., 0]),
                                     max=np.max(self.f[..., 0]))

        # Prepare output
        util.mkdir(out_path)

        # Setup vtk renderers
        renWin = vtk.vtkRenderWindow()

        if not interact:
            renWin.SetOffScreenRendering(1)
        if isinstance(viz_type, str):
            viz_type = [viz_type]

        # Rows and columns
        cols = len(viz_type)
        if roi is None:
            rows = 1
        else:
            rows = 2

        renWin.SetSize(np.int(500 * mag * cols), np.int(500 * mag * rows))

        # Select background color
        if save_parallels:
            bg_color = [1, 1, 1]
            line_color = np.array([0, 0, 0])
            line_bcolor = np.array([1, 1, 1])
        else:
            if not invert:
                bg_color = [0, 0, 0]
                line_color = np.array([1, 1, 1])
                line_bcolor = np.array([0, 0, 0])
            else:
                bg_color = [1, 1, 1]
                line_color = np.array([0, 0, 0])
                line_bcolor = np.array([1, 1, 1])

        # For each viz_type
        rens = []
        zoom_start = []
        zoom_end = []
        for row in range(rows):
            for col in range(cols):
                # Render
                ren = window.Scene()
                rens.append(ren)
                if viz_type[col] is 'Density':
                    ren.background([0, 0, 0])
                    line_color = np.array([1, 1, 1])
                else:
                    ren.background(bg_color)
                ren.SetViewport(col / cols, (rows - row - 1) / rows,
                                (col + 1) / cols, (rows - row) / rows)
                renWin.AddRenderer(ren)
                iren = vtk.vtkRenderWindowInteractor()
                iren.SetRenderWindow(renWin)

                # Mask
                if mask is None:
                    mask = np.ones((self.X, self.Y, self.Z), dtype=np.bool)
                if mask_roi is None:
                    mask_roi = mask

                # Main vs roi
                if row == 0:
                    data = self.f
                    skip_mask = np.zeros(mask.shape, dtype=np.bool)
                    skip_mask[::skip_n, ::skip_n, ::skip_n] = 1
                    my_mask = np.logical_and(mask, skip_mask)
                    scale = scale
                    scalemap = scalemap
                    if np.sum(my_mask) == 0:
                        my_mask[0, 0, 0] = True
                else:
                    data = self.f[roi[0][0]:roi[1][0], roi[0][1]:roi[1][1],
                                  roi[0][2]:roi[1][2], :]
                    roi_mask = mask_roi[roi[0][0]:roi[1][0],
                                        roi[0][1]:roi[1][1],
                                        roi[0][2]:roi[1][2]]
                    skip_mask = np.zeros(roi_mask.shape, dtype=np.bool)
                    skip_mask[::skip_n_roi, ::skip_n_roi, ::skip_n_roi] = 1
                    my_mask = np.logical_and(roi_mask, skip_mask)
                    scale = roi_scale
                    scalemap = scalemap

                # Add visuals to renderer
                if viz_type[col] == "ODF":
                    renWin.SetMultiSamples(4)
                    log.info('Rendering ' + str(np.sum(my_mask)) + ' ODFs')
                    fodf_spheres = viz.odf_sparse(data,
                                                  self.Binv,
                                                  sphere=self.sphere,
                                                  scale=skip_n * scale * 0.5,
                                                  norm=False,
                                                  colormap=colormap,
                                                  mask=my_mask,
                                                  global_cm=global_cm,
                                                  scalemap=scalemap,
                                                  odf_sphere=False,
                                                  flat=flat,
                                                  normalize=normalize_glyphs)

                    ren.add(fodf_spheres)
                elif viz_type[col] == "ODF Sphere":
                    renWin.SetMultiSamples(4)
                    log.info('Rendering ' + str(np.sum(my_mask)) + ' ODFs')
                    fodf_spheres = viz.odf_sparse(data,
                                                  self.Binv,
                                                  sphere=self.sphere,
                                                  scale=skip_n * scale * 0.5,
                                                  norm=False,
                                                  colormap=colormap,
                                                  mask=my_mask,
                                                  global_cm=global_cm,
                                                  scalemap=scalemap,
                                                  odf_sphere=True,
                                                  flat=flat)
                    ren.add(fodf_spheres)
                elif viz_type[col] == "Ellipsoid":
                    renWin.SetMultiSamples(4)
                    log.info(
                        'Warning: scaling is not implemented for ellipsoids')
                    log.info('Rendering ' + str(np.sum(my_mask)) +
                             ' ellipsoids')
                    fodf_peaks = viz.tensor_slicer_sparse(data,
                                                          sphere=self.sphere,
                                                          scale=skip_n *
                                                          scale * 0.5,
                                                          mask=my_mask)
                    ren.add(fodf_peaks)
                elif viz_type[col] == "Peak":
                    renWin.SetMultiSamples(4)
                    log.info('Rendering ' + str(np.sum(my_mask)) + ' peaks')
                    fodf_peaks = viz.peak_slicer_sparse(
                        data,
                        self.Binv,
                        self.sphere.vertices,
                        linewidth=linewidth,
                        scale=skip_n * scale * 0.5,
                        colors=colors,
                        mask=my_mask,
                        scalemap=scalemap,
                        normalize=normalize_glyphs)
                    fodf_peaks.GetProperty().LightingOn()
                    fodf_peaks.GetProperty().SetDiffuse(
                        0.4)  # Doesn't work (VTK bug I think)
                    fodf_peaks.GetProperty().SetAmbient(0.15)
                    fodf_peaks.GetProperty().SetSpecular(0)
                    fodf_peaks.GetProperty().SetSpecularPower(0)

                    ren.add(fodf_peaks)
                elif viz_type[col] == "Principal":
                    log.info(
                        'Warning: scaling is not implemented for principals')
                    log.info('Rendering ' + str(np.sum(my_mask)) +
                             ' principals')
                    fodf_peaks = viz.principal_slicer_sparse(
                        data,
                        self.Binv,
                        self.sphere.vertices,
                        scale=skip_n * scale * 0.5,
                        mask=my_mask)
                    ren.add(fodf_peaks)
                elif viz_type[col] == "Density":
                    renWin.SetMultiSamples(0)  # Must be zero for smooth
                    # renWin.SetAAFrames(4) # Slow antialiasing for volume renders
                    log.info('Rendering density')
                    gamma_corr = np.where(data[..., 0] > 0,
                                          data[..., 0]**gamma, data[..., 0])
                    scalemap.max = density_max * scalemap.max**gamma
                    volume = viz.density_slicer(gamma_corr, scalemap)
                    ren.add(volume)

                X = np.float(data.shape[0])
                Y = np.float(data.shape[1])
                Z = np.float(data.shape[2]) - z_shift

                # Titles
                if row == 0 and titles_on:
                    viz.add_text(ren, viz_type[col], 0.5, 0.96, mag)

                # Scale bar
                if col == cols - 1 and not save_parallels and scalebar_on:
                    yscale = 1e-3 * self.vox_dim[1] * data.shape[1]
                    yscale_label = '{:.2g}'.format(yscale) + ' um'
                    viz.add_text(ren, yscale_label, 0.5, 0.03, mag)
                    viz.draw_scale_bar(ren, X, Y, Z, [1, 1, 1])

                # Corner text
                if row == rows - 1 and col == 0 and titles_on:
                    viz.add_text(ren, corner_text, 0.03, 0.03, mag, ha='left')

                # Draw boxes
                Nmax = np.max([X, Y, Z])
                if outer_box:
                    if row == 0:
                        viz.draw_outer_box(
                            ren,
                            np.array([[0, 0, 0], [X, Y, Z]]) - 0.5, line_color)
                    if row == 1:
                        viz.draw_outer_box(
                            ren,
                            np.array([[0, 0, 0], [X, Y, Z]]) - 0.5, [0, 1, 1])

                # Add colored axes
                if axes:
                    viz.draw_axes(ren, np.array([[0, 0, 0], [X, Y, Z]]) - 0.5)

                # Add custom arrows
                if arrows is not None:
                    for i in range(arrows.shape[0]):
                        viz.draw_single_arrow(ren,
                                              arrows[i, 0, :],
                                              arrows[i, 1, :],
                                              color=arrow_color)
                        viz.draw_unlit_line(ren, [
                            np.array([arrows[i, 0, :], [X / 2, Y / 2, Z / 2]])
                        ], [arrow_color],
                                            lw=0.3,
                                            scale=1.0)

                # Draw roi box
                if row == 0 and roi is not None:
                    maxROI = np.max([
                        roi[1][0] - roi[0][0], roi[1][1] - roi[0][1],
                        roi[1][2] - roi[0][2]
                    ])
                    maxXYZ = np.max([self.X, self.Y, self.Z])
                    viz.draw_outer_box(ren,
                                       roi, [0, 1, 1],
                                       lw=0.3 * maxXYZ / maxROI)
                    viz.draw_axes(ren, roi, lw=0.3 * maxXYZ / maxROI)

                # Draw marked slices
                if mark_slices is not None:
                    for slicen in mark_slices:
                        md = np.max((X, Z))
                        frac = slicen / data.shape[1]
                        rr = 0.83 * md
                        t1 = 0
                        t2 = np.pi / 2
                        t3 = np.pi
                        t4 = 3 * np.pi / 2
                        points = [
                            np.array([[
                                X / 2 + rr * np.cos(t1), frac * Y,
                                Z / 2 + rr * np.sin(t1)
                            ],
                                      [
                                          X / 2 + rr * np.cos(t2), frac * Y,
                                          Z / 2 + rr * np.sin(t2)
                                      ],
                                      [
                                          X / 2 + rr * np.cos(t3), frac * Y,
                                          Z / 2 + rr * np.sin(t3)
                                      ],
                                      [
                                          X / 2 + rr * np.cos(t4), frac * Y,
                                          Z / 2 + rr * np.sin(t4)
                                      ],
                                      [
                                          X / 2 + rr * np.cos(t1), frac * Y,
                                          Z / 2 + rr * np.sin(t1)
                                      ],
                                      [
                                          X / 2 + rr * np.cos(t2), frac * Y,
                                          Z / 2 + rr * np.sin(t2)
                                      ]])
                        ]
                        viz.draw_unlit_line(ren,
                                            points,
                                            6 * [line_color + 0.6],
                                            lw=0.3,
                                            scale=1.0)

                # Draw markers
                for i, marker in enumerate(markers):
                    # Draw sphere
                    source = vtk.vtkSphereSource()
                    source.SetCenter(marker)
                    source.SetRadius(marker_scale)
                    source.SetThetaResolution(30)
                    source.SetPhiResolution(30)

                    # mapper
                    mapper = vtk.vtkPolyDataMapper()
                    mapper.SetInputConnection(source.GetOutputPort())

                    # actor
                    actor = vtk.vtkActor()
                    actor.SetMapper(mapper)
                    actor.GetProperty().SetColor(marker_colors[i, :])
                    actor.GetProperty().SetLighting(0)
                    ren.AddActor(actor)

                # Draw profile lines
                colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])

                for i, profile in enumerate(profiles):
                    import pdb
                    pdb.set_trace()
                    n_seg = profile.shape[0]
                    viz.draw_unlit_line(ren, [profile],
                                        n_seg * [colors[i, :]],
                                        lw=0.5,
                                        scale=1.0)

                    # Draw sphere
                    source = vtk.vtkSphereSource()
                    source.SetCenter(profile[0])
                    source.SetRadius(1)
                    source.SetThetaResolution(30)
                    source.SetPhiResolution(30)

                    # mapper
                    mapper = vtk.vtkPolyDataMapper()
                    mapper.SetInputConnection(source.GetOutputPort())

                    # actor
                    actor = vtk.vtkActor()
                    actor.SetMapper(mapper)
                    # actor.GetProperty().SetColor(colors[i,:])
                    actor.GetProperty().SetLighting(0)

                    # assign actor to the renderer
                    ren.AddActor(actor)

                # Setup cameras
                Rmax = np.linalg.norm([Z / 2, X / 2, Y / 2])
                Rcam_rad = Rmax / np.tan(np.pi / 12)
                Ntmax = np.max([X, Y])
                ZZ = Z
                if ZZ > Ntmax:
                    Rcam_edge = np.max([X / 2, Y / 2])
                else:
                    Rcam_edge = np.min([X / 2, Y / 2])
                Rcam = Rcam_edge + Rcam_rad
                if my_cam is None:
                    cam = ren.GetActiveCamera()
                    if camtilt:
                        cam.SetPosition(
                            ((X - 1) / 2, (Y - 1) / 2, (Z - 1) / 2 + Rcam))
                        cam.SetViewUp((-1, 0, 1))
                        if axes_on:
                            max_dim = np.max((X, Z))
                            viz.draw_unlit_line(ren, [
                                np.array([[(X - max_dim) / 2, Y / 2, Z / 2],
                                          [X / 2, Y / 2, +Z / 2],
                                          [X / 2, Y / 2, (Z + max_dim) / 2]])
                            ],
                                                3 * [line_color],
                                                lw=max_dim / 250,
                                                scale=1.0)
                    else:
                        cam.SetPosition(
                            ((X - 1) / 2 + Rcam, (Y - 1) / 2, (Z - 1) / 2))
                        cam.SetViewUp((0, 0, 1))
                    cam.SetFocalPoint(((X - 1) / 2, (Y - 1) / 2, (Z - 1) / 2))
                    #ren.reset_camera()
                else:
                    ren.set_camera(*my_cam)
                ren.azimuth(azimuth)
                ren.elevation(elevation)

                # Set zooming
                if save_parallels:
                    zoom_start.append(1.7)
                    zoom_end.append(1.7)
                else:
                    if row == 0:
                        zoom_start.append(1.3 * top_zoom)
                        zoom_end.append(1.3 * top_zoom)
                    else:
                        zoom_start.append(1.3)
                        zoom_end.append(1.3)

        # Setup writer
        writer = vtk.vtkTIFFWriter()
        if not compress:
            writer.SetCompressionToNoCompression()

        # Execute renders
        az = 90
        naz = np.ceil(360 / n_frames)
        log.info('Rendering ' + out_path)
        if save_parallels:
            # Parallel rendering for summaries
            filenames = ['yz', 'xy', 'xz']
            zooms = [zoom_start[0], 1.0, 1.0]
            azs = [90, -90, 0]
            els = [0, 0, 90]
            ren.projection(proj_type='parallel')
            ren.reset_camera()
            for i in tqdm(range(3)):
                ren.zoom(zooms[i])
                ren.azimuth(azs[i])
                ren.elevation(els[i])
                ren.reset_clipping_range()
                renderLarge = vtk.vtkRenderLargeImage()
                renderLarge.SetMagnification(1)
                renderLarge.SetInput(ren)
                renderLarge.Update()
                writer.SetInputConnection(renderLarge.GetOutputPort())
                writer.SetFileName(out_path + filenames[i] + '.tif')
                writer.Write()
        else:
            # Rendering for movies
            for j, ren in enumerate(rens):
                ren.zoom(zoom_start[j])
            for i in tqdm(range(n_frames)):
                for j, ren in enumerate(rens):
                    ren.zoom(1 + ((zoom_end[j] - zoom_start[j]) / n_frames))
                    ren.azimuth(az)
                    ren.reset_clipping_range()

                renderLarge = vtk.vtkRenderLargeImage()
                renderLarge.SetMagnification(1)
                renderLarge.SetInput(ren)
                renderLarge.Update()
                writer.SetInputConnection(renderLarge.GetOutputPort())
                if n_frames != 1:
                    writer.SetFileName(out_path + str(i).zfill(3) + '.tif')
                else:
                    writer.SetFileName(out_path + '.tif')
                writer.Write()
                az = naz

        # Interactive
        if interact:
            window.show(ren)

        # Generate video (requires ffmpeg)
        if video:
            log.info('Generating video from frames')
            fps = np.ceil(n_frames / 12)
            subprocess.call([
                'ffmpeg', '-nostdin', '-y', '-framerate',
                str(fps), '-loglevel', 'panic', '-i',
                out_path + '%03d' + '.png', '-pix_fmt', 'yuvj420p', '-vcodec',
                'mjpeg', out_path[:-1] + '.avi'
            ])
            # subprocess.call(['rm', '-r', out_path])

        return my_cam
Ejemplo n.º 27
0
def show_seeds(seeds):
    renderer = window.Renderer()
    points = actor.point(seeds, colors=np.random.rand(*seeds.shape))
    renderer.add(points)
    window.show(renderer)
Ejemplo n.º 28
0
seeds. Using ``random_seeds_from_mask`` we can select a specific number of
seeds (``seeds_count``) in each voxel where the mask ``fa > 0.3`` is true.
"""

seeds = random_seeds_from_mask(fa > 0.3, seeds_count=1)
"""
For quality assurance we can also visualize a slice from the direction field
which we will use as the basis to perform the tracking.
"""

ren = window.Renderer()
ren.add(
    actor.peak_slicer(csd_peaks.peak_dirs, csd_peaks.peak_values, colors=None))

if interactive:
    window.show(ren, size=(900, 900))
else:
    window.record(ren, out_path='csd_direction_field.png', size=(900, 900))
"""
.. figure:: csd_direction_field.png
 :align: center

 **Direction Field (peaks)**

``EuDX`` [Garyfallidis12]_ is a fast algorithm that we use here to generate
streamlines. This algorithm is what is used here and the default option
when providing the output of peaks directly in LocalTracking.
"""

streamline_generator = LocalTracking(csd_peaks,
                                     tissue_classifier,
Ejemplo n.º 29
0
def show_lines(streamlines, affine=None):
    renderer = window.Renderer()
    lines = actor.line(streamlines, affine)
    renderer.add(lines)
    window.show(renderer)
Ejemplo n.º 30
0
def test_odf_slicer(interactive=False):

    sphere = get_sphere('symmetric362')

    shape = (11, 11, 11, sphere.vertices.shape[0])

    fid, fname = mkstemp(suffix='_odf_slicer.mmap')
    print(fid)
    print(fname)

    odfs = np.memmap(fname, dtype=np.float64, mode='w+',
                     shape=shape)

    odfs[:] = 1

    affine = np.eye(4)
    renderer = window.Renderer()

    mask = np.ones(odfs.shape[:3])
    mask[:4, :4, :4] = 0

    odfs[..., 0] = 1

    odf_actor = actor.odf_slicer(odfs, affine,
                                 mask=mask, sphere=sphere, scale=.25,
                                 colormap='jet')
    fa = 0. * np.zeros(odfs.shape[:3])
    fa[:, 0, :] = 1.
    fa[:, -1, :] = 1.
    fa[0, :, :] = 1.
    fa[-1, :, :] = 1.
    fa[5, 5, 5] = 1

    k = 5
    I, J, K = odfs.shape[:3]

    fa_actor = actor.slicer(fa, affine)
    fa_actor.display_extent(0, I, 0, J, k, k)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    odf_actor.display_extent(0, I, 0, J, k, k)
    odf_actor.GetProperty().SetOpacity(1.0)
    if interactive:
        window.show(renderer, reset_camera=False)

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 11 * 11)

    renderer.clear()
    renderer.add(fa_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    mask[:] = 0
    mask[5, 5, 5] = 1
    fa[5, 5, 5] = 0
    fa_actor = actor.slicer(fa, None)
    fa_actor.display(None, None, 5)
    odf_actor = actor.odf_slicer(odfs, None, mask=mask,
                                 sphere=sphere, scale=.25,
                                 colormap='jet',
                                 norm=False, global_cm=True)
    renderer.clear()
    renderer.add(fa_actor)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    odfs[:, :, :] = 1
    mask = np.ones(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs, None, mask=mask,
                                 sphere=sphere, scale=.25,
                                 colormap='jet',
                                 norm=False, global_cm=True)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    renderer.add(actor.axes((11, 11, 11)))
    for i in range(11):
        odf_actor.display(i, None, None)
        fa_actor.display(i, None, None)
        if interactive:
            window.show(renderer)
    for j in range(11):
        odf_actor.display(None, j, None)
        fa_actor.display(None, j, None)
        if interactive:
            window.show(renderer)
    # with mask equal to zero everything should be black
    mask = np.zeros(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs, None, mask=mask,
                                 sphere=sphere, scale=.25,
                                 colormap='plasma',
                                 norm=False, global_cm=True)
    renderer.clear()
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors, 1)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    del odf_actor
    odfs._mmap.close()
    del odfs
    os.close(fid)

    os.remove(fname)
streamlines = Streamlines(streamlines_generator)

# Prepare the display objects.
color = line_colors(streamlines)

if window.have_vtk:
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))

    # Create the 3D display.
    r = window.Renderer()
    r.add(streamlines_actor)

    # Save still images for this static example. Or for interactivity use
    window.record(r, n_frames=1, out_path='deterministic.png', size=(800, 800))
    if interactive:
        window.show(r)

"""
.. figure:: deterministic.png
   :align: center

   **Corpus Callosum Deterministic**

We've created a deterministic set of streamlines, so called because if you
repeat the fiber tracking (keeping all the inputs the same) you will get
exactly the same set of streamlines. We can save the streamlines as a Trackvis
file so it can be loaded into other software for visualization or further
analysis.
"""

from dipy.io.streamline import save_trk
Ejemplo n.º 32
0
def show_peaks(pam):
    renderer = window.Renderer()
    peaks = actor.peak_slicer(pam.peak_dirs, colors=None)
    renderer.add(peaks)
    window.show(renderer)