コード例 #1
0
ファイル: map_cst.py プロジェクト: baothien/tiensy
def show_Pi_mapping(streamlines_A, streamlines_B, Pi_ids_A, mapping_Pi):
        r = fvtk.ren()
        Pi_viz_A = fvtk.streamtube(streamlines_A[Pi_ids_A], fvtk.colors.red)
        fvtk.add(r, Pi_viz_A)
        # Pi_viz_B = fvtk.streamtube(streamlines_B[Pi_ids_B], fvtk.colors.blue)
        # fvtk.add(r, Pi_viz_B)
        Pi_viz_A_1nn_B = fvtk.streamtube(streamlines_B[mapping_Pi], fvtk.colors.white)
        fvtk.add(r, Pi_viz_A_1nn_B)
        fvtk.show(r)
コード例 #2
0
ファイル: test_fvtk.py プロジェクト: gsangui/dipy
def test_fvtk_functions():

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    fvtk.add(r, fvtk.slicer(vol, plane_i=[50]))

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)
コード例 #3
0
def show(imgtck, clusters, out_path):
    colormap = fvtk.create_colormap(np.ravel(clusters.centroids), name='jet')
    colormap_full = np.ones((len(imgtck.streamlines), 3))
    for cluster, color in zip(clusters, colormap):
        colormap_full[cluster.indices] = color

    ren = fvtk.ren()
    ren.SetBackground(1, 1, 1)
    fvtk.add(ren, fvtk.streamtube(imgtck.streamlines, colormap_full))
    fvtk.record(ren, n_frames=1, out_path=out_path, size=(600, 600))
    fvtk.show(ren)
コード例 #4
0
ファイル: test_fvtk.py プロジェクト: MPDean/dipy
def test_fvtk_functions():
    # This tests will fail if any of the given actors changed inputs or do
    # not exist

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    slicer = fvtk.slicer(vol)
    slicer.display(50, None, None)
    fvtk.add(r, slicer)

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)

    fvtk.clear(r)

    # Peak directions
    p = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3))
    fvtk.add(r, p)

    p2 = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3),
                    np.random.rand(3, 3, 3, 5),
                    colors=(0, 1, 0))
    fvtk.add(r, p2)
コード例 #5
0
def test_fvtk_functions():
    # This tests will fail if any of the given actors changed inputs or do
    # not exist

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    slicer = fvtk.slicer(vol)
    slicer.display(50, None, None)
    fvtk.add(r, slicer)

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)

    fvtk.clear(r)

    # Peak directions
    p = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3))
    fvtk.add(r, p)

    p2 = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3),
                    np.random.rand(3, 3, 3, 5),
                    colors=(0, 1, 0))
    fvtk.add(r, p2)
コード例 #6
0
def show_both_bundles(bundles, colors=None, show=False, fname=None):

    ren = fvtk.ren()
    ren.SetBackground(1., 1, 1)
    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines = fvtk.streamtube(bundle, color, linewidth=0.3)
        lines.RotateX(-90)
        lines.RotateZ(90)
        fvtk.add(ren, lines)
    if show:
        fvtk.show(ren)
    if fname is not None:
        sleep(1)
        fvtk.record(ren, n_frames=1, out_path=fname, size=(900, 900))
コード例 #7
0
def show_both_bundles(bundles, colors=None, show=False, fname=None):

    ren = fvtk.ren()
    ren.SetBackground(1.0, 1, 1)
    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines = fvtk.streamtube(bundle, color, linewidth=0.3)
        lines.RotateX(-90)
        lines.RotateZ(90)
        fvtk.add(ren, lines)
    if show:
        fvtk.show(ren)
    if fname is not None:
        sleep(1)
        fvtk.record(ren, n_frames=1, out_path=fname, size=(900, 900))
コード例 #8
0
def show_bundles(streamlines, clusters, show_b=True):
    # Color each streamline according to the cluster they belong to.
    colormap = fvtk.create_colormap(np.arange(len(clusters)))
    colormap_full = np.ones((len(streamlines), 3))
    for cluster, color in zip(clusters, colormap):
        colormap_full[cluster.indices] = color

    ren = fvtk.ren()
    ren.SetBackground(1, 1, 1)
    fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
    fvtk.record(ren,
                n_frames=1,
                out_path='fornix_clusters_cosine.png',
                size=(600, 600))

    if show_b:
        fvtk.show(ren)
コード例 #9
0
def show_all_bundles(bundles, colors=None, show=True, fname=None):

    ren = fvtk.ren()
    ren.SetBackground(1., 1, 1)
    for (i, bundle) in enumerate(bundles):
        if colors is None:
            color = np.random.rand(3)
        else:
            color = colors[i]
        lines = fvtk.streamtube(bundle, color, linewidth=0.15 * 2)
        lines.RotateX(-90)
        lines.RotateZ(90)
        fvtk.add(ren, lines)
    #fvtk.add(ren, fvtk.axes((20, 20, 20)))
    if show:
        fvtk.show(ren)
    if fname is not None:
        fvtk.record(ren, n_frames=1, out_path=fname, size=(900, 900))
コード例 #10
0
ファイル: mriutil.py プロジェクト: sbrambati/toad
def createVtkPng(source, anatomical, roi):
    import vtk
    from dipy.viz.colormap import line_colors
    from dipy.viz import fvtk

    target = source.replace(".trk",".png")
    roiImage= nibabel.load(roi)
    anatomicalImage = nibabel.load(anatomical)

    sourceImage = [s[0] for s in nibabel.trackvis.read(source, points_space='voxel')[0]]
    try:
        sourceActor = fvtk.streamtube(sourceImage, line_colors(sourceImage))

        roiActor = fvtk.contour(roiImage.get_data(), levels=[1], colors=[(1., 1., 0.)], opacities=[1.])
        anatomicalActor = fvtk.slicer(anatomicalImage.get_data(),
                                  voxsz=(1.0, 1.0, 1.0),
                                  plane_i=None,
                                  plane_j=None,
                                  plane_k=[65],
                                  outline=False)
    except ValueError:
        return False
        
    sourceActor.RotateX(-70)
    sourceActor.RotateY(2.5)
    sourceActor.RotateZ(185)

    roiActor.RotateX(-70)
    roiActor.RotateY(2.5)
    roiActor.RotateZ(185)

    anatomicalActor.RotateX(-70)
    anatomicalActor.RotateY(2.5)
    anatomicalActor.RotateZ(185)

    ren = fvtk.ren()
    fvtk.add(ren, sourceActor)
    fvtk.add(ren, roiActor)
    fvtk.add(ren, anatomicalActor)
    fvtk.record(ren, out_path=target, size=(1200, 1200), n_frames=1, verbose=True, cam_pos=(90.03, 118.33, 700.59))
    return target
コード例 #11
0
def show_both_bundles(bundles, colors=None, show_b=False, fname=None):
    """
    Show both bundles
    bundles: return of --pyfat/algorithm/fiber_math/bundle_registration
    example:
    show_both_bundles([cb_subj1, cb_subj2_aligned],
                     colors=[fvtk.colors.orange, fvtk.colors.red],
                     fname='after_registration.png')
    """
    ren = fvtk.ren()
    ren.SetBackground(1., 1, 1)
    for (i, bundle) in enumerate(bundles):
        color = colors[i]
        lines = fvtk.streamtube(bundle, color, linewidth=0.3)
        lines.RotateX(-90)
        lines.RotateZ(90)
        fvtk.add(ren, lines)
    if show_b:
        fvtk.show(ren)
    if fname is not None:
        sleep(1)
        fvtk.record(ren, n_frames=1, out_path=fname, size=(900, 900))
コード例 #12
0
The streamlines that are entered into the model are termed 'candidate
streamliness' (or a 'candidate connectome'):

"""
"""

Let's visualize the initial candidate group of streamlines in 3D, relative to the
anatomical structure of this brain:

"""

from dipy.viz.colormap import line_colors
from dipy.viz import fvtk

candidate_streamlines_actor = fvtk.streamtube(candidate_sl,
                                              line_colors(candidate_sl))
cc_ROI_actor = fvtk.contour(cc_slice,
                            levels=[1],
                            colors=[(1., 1., 0.)],
                            opacities=[1.])

vol_actor = fvtk.slicer(t1_data)

vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)

# Add display objects to canvas
ren = fvtk.ren()
fvtk.add(ren, candidate_streamlines_actor)
fvtk.add(ren, cc_ROI_actor)
コード例 #13
0
qb = QuickBundles(threshold=2., metric=metric)
clusters = qb.cluster(streamlines)

"""
We will now visualize the clustering result.
"""

# Color each streamline according to the cluster they belong to.
colormap = fvtk.create_colormap(np.ravel(clusters.centroids))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

ren = fvtk.ren()
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren, n_frames=1, out_path='fornix_clusters_arclength.png', size=(600, 600))

"""
.. figure:: fornix_clusters_arclength.png
   :align: center

   **Showing the different clusters obtained by using the arc length**.


Extending `Metric`
==================
This section will guide you through the creation of a new metric that can be
used in the context of this clustering framework. For a list of available
metrics in Dipy see :ref:`example_segment_clustering_metrics`.
コード例 #14
0
t1_data = t1.get_data()
t1_aff = t1.affine
color = line_colors(streamlines)

"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""

from dipy.tracking.streamline import select_random_set_of_streamlines
plot_streamlines = select_random_set_of_streamlines(streamlines, 900)

streamlines_actor = fvtk.streamtube(
    list(move_streamlines(plot_streamlines, inv(t1_aff))),
    line_colors(streamlines), linewidth=0.1)

vol_actor = fvtk.slicer(t1_data)

vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)

ren = fvtk.ren()
fvtk.add(ren, streamlines_actor)
fvtk.add(ren, vol_actor)
fvtk.add(ren, vol_actor2)

fvtk.record(ren, n_frames=1, out_path='sfm_streamlines.png',
            size=(800, 800))
コード例 #15
0
# Extract feature of every streamline.
centers = np.asarray(map(feature.extract, streamlines))

# Color each center of mass according to the cluster they belong to.
rng = np.random.RandomState(42)
colormap = fvtk.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

# Visualization
ren = fvtk.ren()
fvtk.clear(ren)
ren.SetBackground(0, 0, 0)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
fvtk.add(ren, fvtk.point(centers[:, 0, :], colormap_full, point_radius=0.2))
fvtk.record(ren, n_frames=1, out_path='center_of_mass_feature.png', size=(600, 600))

"""
.. figure:: center_of_mass_feature.png
   :align: center

   **Showing the center of mass of each streamline and colored according to
   the QuickBundles results**.

.. _clustering-examples-MidpointFeature:

Midpoint Feature
================
**What:** Instances of `MidpointFeature` extract the middle point of a
コード例 #16
0
           [  67.67449188,   85.57660675,   79.98880005],
           [  65.69326782,   86.66771698,   77.44818115],
           [  64.02451324,   88.43942261,   75.0697403 ]], dtype=float32)

"""

"""
`clusters` has also attributes like `centroids` (cluster representatives), and
methods like `add`, `remove`, and `clear` to modify the clustering result.

Lets first show the initial dataset.
"""

ren = fvtk.ren()
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white))
fvtk.record(ren, n_frames=1, out_path='fornix_initial.png', size=(600, 600))

"""
.. figure:: fornix_initial.png
   :align: center

   **Initial Fornix dataset**.

Show the centroids of the fornix after clustering (with random colors):
"""

colormap = np.random.rand(len(clusters), 3)

fvtk.clear(ren)
ren.SetBackground(1, 1, 1)
コード例 #17
0
less curvy regions. In contrast with ``downsample`` it does not enforce that
segments should be of equal size.
"""

bundle_downsampled2 = [approx_polygon_track(s, 0.25) for s in bundle]
n_pts_ds2 = [len(streamline) for streamline in bundle_downsampled2]
"""
Both, ``downsample`` and ``approx_polygon_track`` can be thought as methods for
lossy compression of streamlines.
"""

from dipy.viz import fvtk

ren = fvtk.ren()
ren.SetBackground(*fvtk.colors.white)
bundle_actor = fvtk.streamtube(bundle, fvtk.colors.red, linewidth=0.3)

fvtk.add(ren, bundle_actor)

bundle_actor2 = fvtk.streamtube(bundle_downsampled,
                                fvtk.colors.red,
                                linewidth=0.3)
bundle_actor2.SetPosition(0, 40, 0)

bundle_actor3 = fvtk.streamtube(bundle_downsampled2,
                                fvtk.colors.red,
                                linewidth=0.3)
bundle_actor3.SetPosition(0, 80, 0)

fvtk.add(ren, bundle_actor2)
fvtk.add(ren, bundle_actor3)
コード例 #18
0
ファイル: streamline_length.py プロジェクト: alexsavio/dipy
segments should be of equal size.
"""

bundle_downsampled2 = [approx_polygon_track(s, 0.25) for s in bundle]
n_pts_ds2 = [len(streamline) for streamline in bundle_downsampled2]

"""
Both, ``downsample`` and ``approx_polygon_track`` can be thought as methods for
lossy compression of streamlines.
"""

from dipy.viz import fvtk

ren = fvtk.ren()
ren.SetBackground(*fvtk.colors.white)
bundle_actor = fvtk.streamtube(bundle, fvtk.colors.red, linewidth=0.3)

fvtk.add(ren, bundle_actor)

bundle_actor2 = fvtk.streamtube(bundle_downsampled, fvtk.colors.red, linewidth=0.3)
bundle_actor2.SetPosition(0, 40, 0)

bundle_actor3 = fvtk.streamtube(bundle_downsampled2, fvtk.colors.red, linewidth=0.3)
bundle_actor3.SetPosition(0, 80, 0)

fvtk.add(ren, bundle_actor2)
fvtk.add(ren, bundle_actor3)

fvtk.camera(ren, pos=(0, 0, 0), focal=(30, 0, 0))
fvtk.record(ren, out_path="simulated_cosine_bundle.png", size=(900, 900))
コード例 #19
0
ファイル: sfm_tracking.py プロジェクト: cnguyen/dipy
t1_data = t1.get_data()
t1_aff = t1.get_affine()
color = line_colors(streamlines)

"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""

from dipy.tracking.streamline import select_random_set_of_streamlines

plot_streamlines = select_random_set_of_streamlines(streamlines, 900)

streamlines_actor = fvtk.streamtube(list(move_streamlines(plot_streamlines, inv(t1_aff))), line_colors(streamlines))

vol_actor = fvtk.slicer(t1_data, voxsz=(1.0, 1.0, 1.0), plane_i=[40], plane_j=None, plane_k=[35], outline=False)

ren = fvtk.ren()
fvtk.add(ren, streamlines_actor)
fvtk.add(ren, vol_actor)
fvtk.record(ren, n_frames=1, out_path="sfm_streamlines.png", size=(800, 800))

"""
.. figure:: sfm_streamlines.png
   :align: center

   **Sparse Fascicle Model tracks**

Finally, we can save these streamlines to a 'trk' file, for use in other
コード例 #20
0
        e2 = dm_small2[np.triu_indices(dm_small2.shape[0],1)]

        spgk[i] = np.multiply.outer(np.exp(-e1), np.exp(-e2)).sum()
        print i, spgk[i]

    
    r = fvtk.ren()
    # lines = tracks
    # c = fvtk.streamtube(lines, fvtk.colors.red)
    # fvtk.add(r,c)
    # lines = tracks[np.argsort(spgk)[-30:]]
    # c = fvtk.streamtube(lines, fvtk.colors.red)
    # fvtk.add(r,c)
    
    lines = tracks[idx1]
    c = fvtk.streamtube(lines, fvtk.colors.red)
    fvtk.add(r,c)
    lines = [tracks[sid]]
    c = fvtk.streamtube(lines, fvtk.colors.cyan)
    fvtk.add(r,c)
    # lines = tracks[idx1]
    # c = fvtk.streamtube(lines, fvtk.colors.cyan)
    # fvtk.add(r,c)
    # lines = tracks[sid]
    # c = fvtk.streamtube(lines, fvtk.colors.cyan)
    # fvtk.add(r,c)

    best = np.argsort(spgk)[-1] # np.argmax(spgk)
    lines = tracks[kdt.query_radius(dp[best], radius)[0]]
    c = fvtk.streamtube(lines, fvtk.colors.carrot)
    fvtk.add(r,c)
コード例 #21
0
# We'll need to know where the corpus callosum is from these variables.
hardi_img, gtab, labels_img = read_stanford_labels()
labels = labels_img.get_data()
cc_slice = labels == 2
t1 = read_stanford_t1()
t1_data = t1.get_data()
data = hardi_img.get_data()

# Read the candidates from file in voxel space:
candidate_sl = [
    s[0]
    for s in nib.trackvis.read(args.input_trackvis, points_space='voxel')[0]
]
# Visualize the initial candidate group of streamlines
# in 3D, relative to the anatomical structure of this brain.
candidate_streamlines_actor = fvtk.streamtube(candidate_sl,
                                              line_colors(candidate_sl))
cc_ROI_actor = fvtk.contour(cc_slice,
                            levels=[1],
                            colors=[(1., 1., 0.)],
                            opacities=[1.])
vol_actor = fvtk.slicer(t1_data)
vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)
# Add display objects to canvas.
ren = fvtk.ren()
fvtk.add(ren, candidate_streamlines_actor)
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.add(ren, vol_actor2)
fvtk.record(ren, n_frames=1, out_path="life_candidates.png", size=(800, 800))
コード例 #22
0
ファイル: tracking_eudx_tensor.py プロジェクト: gsangui/dipy
"""

ren = fvtk.ren()

"""
Every streamline will be coloured according to its orientation
"""

from dipy.viz.colormap import line_colors

"""
fvtk.line adds a streamline actor for streamline visualization
and fvtk.add adds this actor in the scene
"""

fvtk.add(ren, fvtk.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))

print('Saving illustration as tensor_tracks.png')

ren.SetBackground(1, 1, 1)
fvtk.record(ren, n_frames=1, out_path='tensor_tracks.png', size=(600, 600))

"""
.. figure:: tensor_tracks.png
   :align: center

   **Deterministic streamlines with EuDX on a Tensor Field**.

.. include:: ../links_names.inc

"""
コード例 #23
0
ファイル: tract_group_reg.py プロジェクト: sinkpoint/sagit
ref_vec = set_number_of_points(data['streamlines'][ref_idx], p_per_strm)

srr = StreamlineLinearRegistration()

for i,strm in enumerate(data['streamlines']):
    print 'registering %d/%d' % (i,len(data['file'])-1)
    print '# streamlines = %d' %len(strm)
    if len(strm) == 0 or i==ref_idx:
        print 'skipping'
        continue
    mov_vec = set_number_of_points(strm, 20)
    srm = srr.optimize(static=ref_vec, moving=mov_vec)
    data['aligned_strms'].append(srm.transform(mov_vec))

from dipy.viz import fvtk
ren = fvtk.ren()
ren.SetBackground(1., 1, 1)

reflines = fvtk.streamtube(ref_vec, fvtk.colors.red, linewidth=0.2)
fvtk.add(ren, reflines)

for (i, bundle) in enumerate(data['aligned_strms']):
    lines = fvtk.streamtube(bundle, np.random.rand(3), linewidth=0.1)
    # lines.RotateX(-90)
    # lines.RotateZ(90)
    fvtk.add(ren, lines)


fvtk.show(ren)
コード例 #24
0
The streamlines that are entered into the model are termed 'candidate
streamliness' (or a 'candidate connectome'):

"""


"""

Let's visualize the initial candidate group of streamlines in 3D, relative to the
anatomical structure of this brain:

"""

from dipy.viz.colormap import line_colors
from dipy.viz import fvtk
candidate_streamlines_actor = fvtk.streamtube(candidate_sl,
                                       line_colors(candidate_sl))
cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
                            opacities=[1.])
vol_actor = fvtk.slicer(t1_data, voxsz=(1.0, 1.0, 1.0), plane_i=[40],
                        plane_j=None, plane_k=[35], outline=False)
# Add display objects to canvas
ren = fvtk.ren()
fvtk.add(ren, candidate_streamlines_actor)
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.record(ren, n_frames=1, out_path='life_candidates.png',
            size=(800, 800))

"""

.. figure:: life_candidates.png
コード例 #25
0
ファイル: map_cst.py プロジェクト: baothien/tiensy
    subject_A = 210
    subject_B = 205
    side = 'L' # or 'R'
    show = False
    k = 100
    
    filename = 'data_als/%d/tracks_dti_3M_linear.trk'
    filename_A = filename % subject_A
    filename_B = filename % subject_B

    streamlines_A, cst_ids_A, Pi_ids_A, dr_A = load_or_create(subject_A, side, k=k)
    streamlines_B, cst_ids_B, Pi_ids_B, dr_B = load_or_create(subject_B, side, k=k)

    if show:
        r = fvtk.ren()
        cst_viz_A = fvtk.streamtube(cst_streamlines_A, fvtk.colors.red)
        cst_viz_B = fvtk.streamtube(cst_streamlines_B, fvtk.colors.blue)
        fvtk.add(r, cst_viz_A)
        fvtk.add(r, cst_viz_B)
        fvtk.show(r)
        
    if show:
        r = fvtk.ren()
        Pi_viz_A = fvtk.streamtube(streamlines_A[Pi_ids_A], fvtk.colors.red)
        fvtk.add(r, Pi_viz_A)
        Pi_viz_B = fvtk.streamtube(streamlines_B[Pi_ids_B], fvtk.colors.blue)
        fvtk.add(r, Pi_viz_B)
        fvtk.show(r)

    print "Computing the distance matrix between Pi_A streamlines."
    dm_Pi_A = bundles_distances_mam(streamlines_A[Pi_ids_A], streamlines_A[Pi_ids_A])
コード例 #26
0
metric = SumPointwiseEuclideanMetric(feature=ArcLengthFeature())
qb = QuickBundles(threshold=2., metric=metric)
clusters = qb.cluster(streamlines)
"""
We will now visualize the clustering result.
"""

# Color each streamline according to the cluster they belong to.
colormap = fvtk.create_colormap(np.ravel(clusters.centroids))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

ren = fvtk.ren()
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren,
            n_frames=1,
            out_path='fornix_clusters_arclength.png',
            size=(600, 600))
"""
.. figure:: fornix_clusters_arclength.png
   :align: center

   **Showing the different clusters obtained by using the arc length**.


Extending `Metric`
==================
This section will guide you through the creation of a new metric that can be
used in the context of this clustering framework. For a list of available
コード例 #27
0
           [  78.98937225,   89.57682037,   85.63652039],
           [  74.72344208,   86.60827637,   84.9391861 ],
           [  70.40846252,   85.15874481,   82.4484024 ],
           [  66.74534607,   86.00262451,   78.82582092],
           [  64.02451324,   88.43942261,   75.0697403 ]], dtype=float32)


`clusters` has also attributes like `centroids` (cluster representatives), and
methods like `add`, `remove`, and `clear` to modify the clustering result.

Lets first show the initial dataset.
"""

ren = fvtk.ren()
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white))
fvtk.record(ren, n_frames=1, out_path='fornix_initial.png', size=(600, 600))
"""
.. figure:: fornix_initial.png
   :align: center

   Initial Fornix dataset.

Show the centroids of the fornix after clustering (with random colors):
"""

colormap = fvtk.create_colormap(np.arange(len(clusters)))

fvtk.clear(ren)
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
コード例 #28
0
# Extract feature of every streamline.
centers = np.asarray(map(feature.extract, streamlines))

# Color each center of mass according to the cluster they belong to.
rng = np.random.RandomState(42)
colormap = fvtk.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

# Visualization
ren = fvtk.ren()
fvtk.clear(ren)
ren.SetBackground(0, 0, 0)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
fvtk.add(ren, fvtk.point(centers[:, 0, :], colormap_full, point_radius=0.2))
fvtk.record(ren,
            n_frames=1,
            out_path='center_of_mass_feature.png',
            size=(600, 600))
"""
.. figure:: center_of_mass_feature.png
   :align: center

   **Showing the center of mass of each streamline and colored according to
   the QuickBundles results**.

.. _clustering-examples-MidpointFeature:

Midpoint Feature
コード例 #29
0
Create a scene.
"""

ren = fvtk.ren()
"""
Every streamline will be coloured according to its orientation
"""

from dipy.viz.colormap import line_colors
"""
fvtk.line adds a streamline actor for streamline visualization
and fvtk.add adds this actor in the scene
"""

fvtk.add(ren,
         fvtk.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))

print('Saving illustration as tensor_tracks.png')

ren.SetBackground(1, 1, 1)
fvtk.record(ren, n_frames=1, out_path='tensor_tracks.png', size=(600, 600))
"""
.. figure:: tensor_tracks.png
   :align: center

   **Deterministic streamlines with EuDX on a Tensor Field**.

.. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography", PhD thesis, University of Cambridge, 2012.

.. include:: ../links_names.inc
コード例 #30
0
    print "loss =", loss_coregistration_1nn

    if do_simulated_annealing_from_1nn:
        print
        print "Simulated Annealing"
        np.random.seed(1)  # this is the random seed of the optimization process
        initial_state = mapping12_coregistration_1nn
        mapping12_best_from_1nn, energy_best_from_1nn = anneal(
            initial_state=initial_state,
            energy_function=loss_function,
            neighbour=neighbour4,
            transition_probability=transition_probability,
            temperature=temperature_boltzmann,
            max_steps=iterations_anneal,
            energy_max=0.0,
            T0=200.0,
            log_every=1000,
        )

    if show:
        from dipy.viz import fvtk

        sid = 22
        r = fvtk.ren()
        # fvtk.add(r, fvtk.streamtube(tractography1, fvtk.colors.orange))
        # fvtk.add(r, fvtk.streamtube(tractography2, fvtk.colors.blue))
        fvtk.add(r, fvtk.streamtube([tractography1[sid]], fvtk.colors.red))
        fvtk.add(r, fvtk.streamtube([tractography2[mapping12_best[sid]]], fvtk.colors.cyan))
        fvtk.add(r, fvtk.streamtube([tractography2[mapping12_coregistration_1nn[sid]]], fvtk.colors.green))
        fvtk.show(r)