예제 #1
0
파일: test_fvtk.py 프로젝트: gsangui/dipy
def test_fvtk_functions():

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    fvtk.add(r, fvtk.slicer(vol, plane_i=[50]))

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)
예제 #2
0
def plot_tract(bundle, affine, num_class=1, pred=[], ignore=[]):
    # Visualize the results
    from dipy.viz import fvtk, actor
    from dipy.tracking.streamline import transform_streamlines

    # Create renderer
    r = fvtk.ren()

    if len(pred) > 0:
        colors = get_spaced_colors(num_class)
        for i in range(num_class):
            if i in ignore:
                continue
            idx = np.argwhere(pred == i).squeeze()
            bundle_native = transform_streamlines(
                bundle[idx], np.linalg.inv(affine))
            if len(bundle_native) == 0:
                continue
            lineactor = actor.line(
                bundle_native, colors[i], linewidth=0.2)
            fvtk.add(r, lineactor)
    elif num_class == 1:
        bundle_native = transform_streamlines(bundle, np.linalg.inv(affine))
        lineactor = actor.line(bundle_native, linewidth=0.2)
        fvtk.add(r, lineactor)

    # Show original fibers
    fvtk.camera(r, pos=(-264, 285, 155), focal=(0, -14, 9),
        viewup=(0, 0, 1), verbose=False)

    fvtk.show(r)
예제 #3
0
def label_streamlines(streamlines, labels, labels_Value, affine, hdr, f_name,
                      data_path):

    cc_slice = labels == labels_Value
    cc_streamlines = utils.target(streamlines, labels, affine=affine)
    cc_streamlines = list(cc_streamlines)

    other_streamlines = utils.target(streamlines,
                                     cc_slice,
                                     affine=affine,
                                     include=False)
    other_streamlines = list(other_streamlines)
    assert len(other_streamlines) + len(cc_streamlines) == len(streamlines)

    print("num of roi steamlines is %d", len(cc_streamlines))

    # Make display objects
    color = line_colors(cc_streamlines)
    cc_streamlines_actor = fvtk.line(cc_streamlines,
                                     line_colors(cc_streamlines))
    cc_ROI_actor = fvtk.contour(cc_slice,
                                levels=[1],
                                colors=[(1., 1., 0.)],
                                opacities=[1.])

    # Add display objects to canvas
    r = fvtk.ren()
    fvtk.add(r, cc_streamlines_actor)
    fvtk.add(r, cc_ROI_actor)

    # Save figures
    fvtk.record(r, n_frames=1, out_path=f_name + '_roi.png', size=(800, 800))
    fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1])
    fvtk.record(r, n_frames=1, out_path=f_name + '_roi.png', size=(800, 800))
    """"""

    csd_streamlines_trk = ((sl, None, None) for sl in cc_streamlines)
    csd_sl_fname = f_name + '_roi_streamline.trk'
    nib.trackvis.write(csd_sl_fname,
                       csd_streamlines_trk,
                       hdr,
                       points_space='voxel')
    #nib.save(nib.Nifti1Image(FA, img.get_affine()), 'FA_map2.nii.gz')
    print('Saving "_roi_streamline.trk" sucessful.')

    import tractconverter as tc
    input_format = tc.detect_format(csd_sl_fname)
    input = input_format(csd_sl_fname)
    output = tc.FORMATS['vtk'].create(csd_sl_fname + ".vtk", input.hdr)
    tc.convert(input, output)

    return cc_streamlines
예제 #4
0
def test_fvtk_functions():
    # This tests will fail if any of the given actors changed inputs or do
    # not exist

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    slicer = fvtk.slicer(vol)
    slicer.display(50, None, None)
    fvtk.add(r, slicer)

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)

    fvtk.clear(r)

    # Peak directions
    p = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3))
    fvtk.add(r, p)

    p2 = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3),
                    np.random.rand(3, 3, 3, 5),
                    colors=(0, 1, 0))
    fvtk.add(r, p2)
예제 #5
0
파일: test_fvtk.py 프로젝트: MPDean/dipy
def test_fvtk_functions():
    # This tests will fail if any of the given actors changed inputs or do
    # not exist

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    slicer = fvtk.slicer(vol)
    slicer.display(50, None, None)
    fvtk.add(r, slicer)

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)

    fvtk.clear(r)

    # Peak directions
    p = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3))
    fvtk.add(r, p)

    p2 = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3),
                    np.random.rand(3, 3, 3, 5),
                    colors=(0, 1, 0))
    fvtk.add(r, p2)
def label_streamlines(streamlines,labels,labels_Value,affine,hdr,f_name,data_path):  
      
    cc_slice=labels==labels_Value
    cc_streamlines = utils.target(streamlines, labels, affine=affine)
    cc_streamlines = list(cc_streamlines)

    other_streamlines = utils.target(streamlines, cc_slice, affine=affine,
                                 include=False)
    other_streamlines = list(other_streamlines)
    assert len(other_streamlines) + len(cc_streamlines) == len(streamlines)
    

    print ("num of roi steamlines is %d",len(cc_streamlines))
    

    # Make display objects
    color = line_colors(cc_streamlines)
    cc_streamlines_actor = fvtk.line(cc_streamlines, line_colors(cc_streamlines))
    cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
                            opacities=[1.])

    # Add display objects to canvas
    r = fvtk.ren()
    fvtk.add(r, cc_streamlines_actor)
    fvtk.add(r, cc_ROI_actor)

    # Save figures
    fvtk.record(r, n_frames=1, out_path=f_name+'_roi.png',
            size=(800, 800))
    fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1])
    fvtk.record(r, n_frames=1, out_path=f_name+'_roi.png',
            size=(800, 800))
    """"""

    csd_streamlines_trk = ((sl, None, None) for sl in cc_streamlines)
    csd_sl_fname = f_name+'_roi_streamline.trk'
    nib.trackvis.write(csd_sl_fname, csd_streamlines_trk, hdr, points_space='voxel')
    #nib.save(nib.Nifti1Image(FA, img.get_affine()), 'FA_map2.nii.gz')
    print('Saving "_roi_streamline.trk" sucessful.')

    import tractconverter as tc
    input_format=tc.detect_format(csd_sl_fname)
    input=input_format(csd_sl_fname)
    output=tc.FORMATS['vtk'].create(csd_sl_fname+".vtk",input.hdr)
    tc.convert(input,output)
    
    return cc_streamlines
예제 #7
0
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)

# Add display objects to canvas
r = fvtk.ren()
fvtk.add(r, vol_actor)
fvtk.add(r, vol_actor2)
fvtk.add(r, cc_streamlines_actor)
fvtk.add(r, cc_ROI_actor)

# Save figures
fvtk.record(r,
            n_frames=1,
            out_path='corpuscallosum_axial.png',
            size=(800, 800))
fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1])
fvtk.record(r,
            n_frames=1,
            out_path='corpuscallosum_sagittal.png',
            size=(800, 800))
"""
.. figure:: corpuscallosum_axial.png
   :align: center

   **Corpus Callosum Axial**

.. include:: ../links_names.inc

.. figure:: corpuscallosum_sagittal.png
   :align: center
예제 #8
0
# Original lines colored by LFBC
lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2)
fvtk.add(ren, lineactor)

# Horizontal (axial) slice of T1 data
vol_actor1 = fvtk.slicer(t1_data, affine=affine)
vol_actor1.display(None, None, 20)
fvtk.add(ren, vol_actor1)

# Vertical (sagittal) slice of T1 data
vol_actor2 = fvtk.slicer(t1_data, affine=affine)
vol_actor2.display(35, None, None)
fvtk.add(ren, vol_actor2)

# Show original fibers
fvtk.camera(ren, pos=(-264, 285, 155), focal=(0, -14, 9), viewup=(0, 0, 1),
            verbose=False)
fvtk.record(ren, n_frames=1, out_path='OR_before.png', size=(900, 900))

# Show thresholded fibers
fvtk.rm(ren, lineactor)
fvtk.add(ren, actor.line(fbc_sl_thres, clrs_thres, linewidth=0.2))
fvtk.record(ren, n_frames=1, out_path='OR_after.png', size=(900, 900))

"""
.. figure:: OR_before.png
   :align: center

   The optic radiation obtained through probabilistic tractography colored by
   local fiber to bundle coherence.

.. figure:: OR_after.png
예제 #9
0
# Original lines colored by LFBC
lineactor = actor.line(fbc_sl_orig, clrs_orig, linewidth=0.2)
fvtk.add(ren, lineactor)

# Horizontal (axial) slice of T1 data
vol_actor1 = fvtk.slicer(t1_data, affine=affine)
vol_actor1.display(None, None, 20)
fvtk.add(ren, vol_actor1)

# Vertical (sagittal) slice of T1 data
vol_actor2 = fvtk.slicer(t1_data, affine=affine)
vol_actor2.display(35, None, None)
fvtk.add(ren, vol_actor2)

# Show original fibers
fvtk.camera(ren, pos=(-264, 285, 155), focal=(0, -14, 9), viewup=(0, 0, 1),
            verbose=False)
fvtk.record(ren, n_frames=1, out_path='OR_before.png', size=(900, 900))

# Show thresholded fibers
fvtk.rm(ren, lineactor)
fvtk.add(ren, actor.line(fbc_sl_thres, clrs_thres, linewidth=0.2))
fvtk.record(ren, n_frames=1, out_path='OR_after.png', size=(900, 900))

"""
.. figure:: OR_before.png
   :align: center

   The optic radiation obtained through probabilistic tractography colored by 
   local fiber to bundle coherence.

.. figure:: OR_after.png
예제 #10
0
    scms.append(scm)

smoments = [0, 6]
lambdas = [0, 0.001, 0.01, 0.1, 1, 2]

for smoment in smoments:

    for (k, lambd) in enumerate(lambdas):

        for (i, radial_order) in enumerate(radial_orders):
            scm = scms[i]
            scm.lambd = lambd
            for (j, angle) in enumerate(angles):
                print(radial_order, angle)
                odfs[i + 1, j] = scm.fit(sim_data[j]).odf(sphere, smoment=smoment)

        odfs = odfs[:, None, :]

        ren = fvtk.ren()
        fvtk.add(ren, fvtk.sphere_funcs(odfs, sphere))

        fvtk.camera(ren, [0, -5, 0], [0, 0, 0], viewup=[-1, 0, 0])

        #fvtk.show(ren)
        fname = 'shore_cart_odfs_snr_' + str(SNR) + '_s_' + str(smoment) + '_' + str(k) + '_l_' + str(lambd) + '.png'

        fvtk.record(ren, n_frames=1, out_path=fname, size=(1000, 1000))

        odfs = np.squeeze(odfs)

예제 #11
0
ren = fvtk.ren()

# convolve kernel with delta spike
spike = np.zeros((7, 7, 7, k.get_orientations().shape[0]), dtype=np.float64)
spike[3, 3, 3, 0] = 1
spike_shm_conv = convolve(sf_to_sh(spike, k.get_sphere(), sh_order=8), k,
                          sh_order=8, test_mode=True)

sphere = get_sphere('symmetric724')
spike_sf_conv = sh_to_sf(spike_shm_conv, sphere, sh_order=8)
model_kernel = fvtk.sphere_funcs((spike_sf_conv * 6)[3,:,:,:],
                                  sphere,
                                  norm=False,
                                  radial_scale=True)
fvtk.add(ren, model_kernel)
fvtk.camera(ren, pos=(30, 0, 0), focal=(0, 0, 0), viewup=(0, 0, 1), verbose=False)
fvtk.record(ren, out_path='kernel.png', size=(900, 900))

"""
.. figure:: kernel.png
   :align: center

   Visualization of the contour enhancement kernel.
"""

"""
Shift-twist convolution is applied on the noisy data
"""

# Perform convolution
csd_shm_enh = convolve(csd_shm_noisy, k, sh_order=8)
예제 #12
0
ren = fvtk.ren()
ren.SetBackground(*fvtk.colors.white)
bundle_actor = fvtk.streamtube(bundle, fvtk.colors.red, linewidth=0.3)

fvtk.add(ren, bundle_actor)

bundle_actor2 = fvtk.streamtube(bundle_downsampled, fvtk.colors.red, linewidth=0.3)
bundle_actor2.SetPosition(0, 40, 0)

bundle_actor3 = fvtk.streamtube(bundle_downsampled2, fvtk.colors.red, linewidth=0.3)
bundle_actor3.SetPosition(0, 80, 0)

fvtk.add(ren, bundle_actor2)
fvtk.add(ren, bundle_actor3)

fvtk.camera(ren, pos=(0, 0, 0), focal=(30, 0, 0))
fvtk.record(ren, out_path="simulated_cosine_bundle.png", size=(900, 900))

"""
.. figure:: simulated_cosine_bundle.png
   :align: center

   **Initial bundle (down), downsampled at 12 equidistant points (middle), downsampled not equidistantly(up)**

From the figure above we can see that all 3 bundles look quite similar. However,
when we plot the histogram of the number of points used for each streamline, it
becomes obvious that we have managed to reduce in a great amount the size of the
initial dataset.
"""

import matplotlib.pyplot as plt
fvtk.add(ren, bundle_actor)

bundle_actor2 = fvtk.streamtube(bundle_downsampled,
                                fvtk.colors.red,
                                linewidth=0.3)
bundle_actor2.SetPosition(0, 40, 0)

bundle_actor3 = fvtk.streamtube(bundle_downsampled2,
                                fvtk.colors.red,
                                linewidth=0.3)
bundle_actor3.SetPosition(0, 80, 0)

fvtk.add(ren, bundle_actor2)
fvtk.add(ren, bundle_actor3)

fvtk.camera(ren, pos=(0, 0, 0), focal=(30, 0, 0))
fvtk.record(ren, out_path='simulated_cosine_bundle.png', size=(900, 900))
"""
.. figure:: simulated_cosine_bundle.png
   :align: center

   Initial bundle (down), downsampled at 12 equidistant points (middle),
   downsampled not equidistantly (up).

From the figure above we can see that all 3 bundles look quite similar. However,
when we plot the histogram of the number of points used for each streamline, it
becomes obvious that we have managed to reduce in a great amount the size of the
initial dataset.
"""

import matplotlib.pyplot as plt
예제 #14
0
# Make display objects
color = line_colors(cc_streamlines)
cc_streamlines_actor = fvtk.line(cc_streamlines, line_colors(cc_streamlines))
cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
                            opacities=[1.])

# Add display objects to canvas
r = fvtk.ren()
fvtk.add(r, cc_streamlines_actor)
fvtk.add(r, cc_ROI_actor)

# Save figures
fvtk.record(r, n_frames=1, out_path='corpuscallosum_axial.png',
            size=(800, 800))
fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1])
fvtk.record(r, n_frames=1, out_path='corpuscallosum_sagittal.png',
            size=(800, 800))

"""
.. figure:: corpuscallosum_axial.png
   :align: center

   **Corpus Callosum Axial**

.. include:: ../links_names.inc

.. figure:: corpuscallosum_sagittal.png
   :align: center

   **Corpus Callosum Sagittal**
hdr['voxel_order'] = 'LAS'
hdr['dim'] = FA.shape

tensor_streamlines_trk = ((sl,None, None) for sl in tensor_streamlines)
ten_sl_fname = sys.argv[3]+'_streamlines.trk'

nib.trackvis.write(ten_sl_fname, tensor_streamlines_trk, hdr, points_space='voxel')
from dipy.viz import fvtk
ren = fvtk.ren()
position_1=(5.35,59.07,362.79)
focal_point_1=(55.35,59.07,29.54)
viewup_1=(0.00,1.00,0.00)
camera1=fvtk.vtk.vtkCamera()
camera1.SetPosition(position_1)
camera1.SetFocalPoint(focal_point_1)

position_2=(50.35,59.07,362.79)
focal_point_2=(55.35,59.07,29.54)
viewup_2=(0.00,1.00,0.00)
camera2=fvtk.vtk.vtkCamera()
camera2.SetPosition(position_2)
camera2.SetFocalPoint(focal_point_2)
ren.SetActiveCamera(camera1)
ren.SetActiveCamera(camera2)
from dipy.viz.colormap import line_colors
fvtk.add(ren,fvtk.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))
ren.SetBackground(1,1,1)
camera=fvtk.camera(ren)
fvtk.record(ren,n_frames=1, out_path=sys.argv[3]+'_tracks.png',size=(600,600))
fvtk.show(ren)
                   tensor_streamlines_trk,
                   hdr,
                   points_space='voxel')
from dipy.viz import fvtk
ren = fvtk.ren()
position_1 = (5.35, 59.07, 362.79)
focal_point_1 = (55.35, 59.07, 29.54)
viewup_1 = (0.00, 1.00, 0.00)
camera1 = fvtk.vtk.vtkCamera()
camera1.SetPosition(position_1)
camera1.SetFocalPoint(focal_point_1)

position_2 = (50.35, 59.07, 362.79)
focal_point_2 = (55.35, 59.07, 29.54)
viewup_2 = (0.00, 1.00, 0.00)
camera2 = fvtk.vtk.vtkCamera()
camera2.SetPosition(position_2)
camera2.SetFocalPoint(focal_point_2)
ren.SetActiveCamera(camera1)
ren.SetActiveCamera(camera2)
from dipy.viz.colormap import line_colors
fvtk.add(ren,
         fvtk.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))
ren.SetBackground(1, 1, 1)
camera = fvtk.camera(ren)
fvtk.record(ren,
            n_frames=1,
            out_path=sys.argv[3] + '_tracks.png',
            size=(600, 600))
fvtk.show(ren)
예제 #17
0
ren = fvtk.ren()

# convolve kernel with delta spike
spike = np.zeros((7, 7, 7, k.get_orientations().shape[0]), dtype=np.float64)
spike[3, 3, 3, 0] = 1
spike_shm_conv = convolve(sf_to_sh(spike, k.get_sphere(), sh_order=8), k,
                          sh_order=8, test_mode=True)

sphere = get_sphere('symmetric724')
spike_sf_conv = sh_to_sf(spike_shm_conv, sphere, sh_order=8)
model_kernel = fvtk.sphere_funcs((spike_sf_conv * 6)[3,:,:,:],
                                  sphere,
                                  norm=False,
                                  radial_scale=True)
fvtk.add(ren, model_kernel)
fvtk.camera(ren, pos=(30, 0, 0), focal=(0, 0, 0), viewup=(0, 0, 1), verbose=False)
fvtk.record(ren, out_path='kernel.png', size=(900, 900))

"""
.. figure:: kernel.png
   :align: center

   Visualization of the contour enhancement kernel.
"""

"""
Shift-twist convolution is applied on the noisy data
"""

# Perform convolution
csd_shm_enh = convolve(csd_shm_noisy, k, sh_order=8)
예제 #18
0
centroids = qb.centroids
clusters = qb.clusters()
colormap = np.random.rand(len(centroids),3)


#print npts

#ren = vtk.vtkRenderer()
#renwin = vtk.vtkRenderWindow()
#renwin.AddRenderer(ren)

#c1 = clusters[0]
#print c1['hidden']
ren = fvtk.ren()
cam = fvtk.camera(ren, pos=(0,0,-1), viewup=(0,1,0))

fvtk.clear(ren)
fvtk.add(ren, fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05)))
#fvtk.add(ren, fvtk.add(ren, fvtk.dots(c1['hidden'], fvtk.colors.red, opacity=0.5, dot_size=1)))
fvtk.add(ren, fvtk.add(ren, fvtk.streamtube(centroids, colormap, linewidth=0.4)))


#mapper = vtk.vtkPolyDataMapper()
#mapper.SetInput(reader.GetOutput())

#actor=vtk.vtkActor()
#actor.SetMapper(mapper)

#ren.AddActor(actor)