Пример #1
0
def show_signals(data,bvals,gradients,sticks=None):
    
    s=data[1:]
    s0=data[0]    
    ls=np.log(s)-np.log(s0)
    ind=np.arange(1,data.shape[-1])
    ob=-1/bvals[1:]
    #lg=np.log(s[1:])-np.log(s0)
    d=ob*(np.log(s)-np.log(s0))    
    r=fvtk.ren()
    all=fvtk.crossing(s,ind,gradients,scale=1)
    #fvtk.add(r,fvtk.line(all,fvtk.coral))    
    #d=d-d.min()        
    #all3=fvtk.crossing(d,ind,gradients,scale=10**4)
    #fvtk.add(r,fvtk.line(all3,fvtk.red))    
    #d=d-d.min()        
    all2=fvtk.crossing(d,ind,gradients,scale=10**4)
    fvtk.add(r,fvtk.line(all2,fvtk.green))    
    #"""
    #d2=d*10**4
    #print d2.min(),d2.mean(),d2.max(),d2.std()    
    for a in all2:    
        fvtk.label(r,str(np.round(np.linalg.norm(a[0]),2)),pos=a[0],scale=(.2,.2,.2),color=(1,0,0))        
    if sticks!=None:
        for s in sticks:
            ln=np.zeros((2,3))
            ln[1]=s
            fvtk.add(r,fvtk.line(d.max()*10**4*ln,fvtk.blue)) 
    #data2=data.reshape(1,len(data))
    #pdi=ProjectiveDiffusivity(data2,bvals,gradients,dotpow=6,width=6,sincpow=2)
    #pd=pdi.spherical_diffusivity(data)
    #print pd
    #"""    
    fvtk.show(r)
def visualize(ren, tract1, tract2, mapping):
    
    #c = fvtk.line(lines, fvtk.green)    
    #fvtk.add(r,c)
    colors = [fvtk.red,fvtk.green, fvtk.blue, fvtk.white,fvtk.yellow, fvtk.gray,fvtk.hot_pink]#fvtk.cyan,fvtk.dark_blue,fvtk.dark_green,fvtk.dark_red,fvtk.golden,
    for i in np.arange(len(tract1)):
        fvtk.add(ren, fvtk.line(tract1[i], colors[i % len(colors)], opacity=1.0))
        fvtk.add(ren, fvtk.line(tract2[mapping[i]], colors[i % len(colors)], opacity=1.0))
     
    return ren
Пример #3
0
def visualize(streamlines_A, streamlines_B, mappingAB, line='line', shift=np.array([0.0, 0.0, 200.0]), color_A=fvtk.colors.white, color_B=fvtk.colors.green, color_line=fvtk.colors.yellow):
    assert(len(mappingAB) == len(streamlines_A))
    assert(mappingAB.max() <= len(streamlines_B))
    if line == 'line':
        line = fvtk.line
        linewidth = 2.0
        linekwargs = {'linewidth':linewidth}
    elif line == 'tube':
        line = fvtk.streamtube
        linewidth = 1.0
        linekwargs = {'linewidth':linewidth, 'lod':False}
    else:
        raise Exception

    if color_A == 'auto':
        color_A = line_colors(streamlines_A)

    if color_B == 'auto':
        color_B = line_colors(streamlines_B)
            
    streamlines_B_shifted = np.array([s + shift for s in streamlines_B])
    midpointA = [s[int(s.shape[0] / 2)] for s in streamlines_A]
    midpointB = [s[int(s.shape[0] / 2)] for s in streamlines_B_shifted]

    ren = fvtk.ren()
    fvtk.add(ren, line(streamlines_A.tolist(), colors=color_A, **linekwargs))
    fvtk.add(ren, line(streamlines_B_shifted.tolist(), colors=color_B, **linekwargs))
    fvtk.add(ren, fvtk.line(zip(midpointA, midpointB), colors=color_line, opacity=0.5, linewidth=0.5))
    fvtk.show(ren)
Пример #4
0
def test_fvtk_functions():
    
    # Create a renderer
    r=fvtk.ren()    
    
    # Create 2 lines with 2 different colors
    lines=[np.random.rand(10,3),np.random.rand(20,3)]    
    colors=np.random.rand(2,3)
    c=fvtk.line(lines,colors)    
    fvtk.add(r,c)    

    # Create a volume and return a volumetric actor using volumetric rendering        
    vol=100*np.random.rand(100,100,100)
    vol=vol.astype('uint8')    
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r,v)
    
    # Remove all objects
    fvtk.rm_all(r)
    
    # Put some text    
    l=fvtk.label(r,text='Yes Men')
    fvtk.add(r,l)

    # Show everything
    #fvtk.show(r)

    
Пример #5
0
def test_fvtk_functions():

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    fvtk.add(r, fvtk.slicer(vol, plane_i=[50]))

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)
Пример #6
0
def visualize_bundles(trk, ren=None, inline=True, interact=False):
    """
    Visualize bundles in 3D using fvtk


    """
    if isinstance(trk, str):
        trk = nib.streamlines.load(trk)

    if ren is None:
        ren = fvtk.ren()

    for b in np.unique(trk.tractogram.data_per_streamline['bundle']):
        idx = np.where(trk.tractogram.data_per_streamline['bundle'] == b)[0]
        this_sl = list(trk.streamlines[idx])
        sl_actor = fvtk.line(this_sl, Tableau_20.colors[int(b)])
        fvtk.add(ren, sl_actor)

    if inline:
        tdir = tempfile.gettempdir()
        fname = op.join(tdir, "fig.png")
        fvtk.record(ren, out_path=fname)
        display.display_png(display.Image(fname))

    if interact:
        fvtk.show(ren)

    return ren
Пример #7
0
def show_signal(r,S,gradients,offset=np.zeros(3)):
    PS = np.dot(np.diag(S),gradients)
#for (i,s) in enumerate(S):
    fvtk.add(r,fvtk.point(offset+PS/np.max(PS),fvtk.cyan,point_radius=0.05,theta=8,phi=8))
    fvtk.add(r,fvtk.point([offset],fvtk.green,point_radius=0.1,theta=8,phi=8))    
    fvtk.add(r,fvtk.axes((1,1,1)))
    lines = fvtk.line([1.5*np.row_stack((needles3d[0],-needles3d[0])), \
                   1.5*np.row_stack((needles3d[1],-needles3d[1]))], \
                   colors=np.row_stack((fvtk.golden,fvtk.aquamarine)),linewidth=10)
    fvtk.add(r,lines)
def show_all_bundles_fnames(fnames, colors=None):

    ren = fvtk.ren()
    for (i, fname) in enumerate(fnames):
        streamlines = read_bundles(fname)
        if colors is None:
            color = np.random.rand(3)
        else:
            color = colors[i]
        fvtk.add(ren, fvtk.line(streamlines, color))
    fvtk.show(ren)
Пример #9
0
def see_skeletons(fskel):
    
    C=load_pickle(fskel)
    tracks=[C[c]['most'] for c in C if C[c]['N'] > 10 ]
    
    r=fvtk.ren()    
    colors=np.array([t[0]-t[-1] for t in tracks])
    colors=colormap.orient2rgb(colors)
    fvtk.add(r,fvtk.line(tracks,colors))
    
    fvtk.show(r)
Пример #10
0
def test_fvtk_functions():
    # This tests will fail if any of the given actors changed inputs or do
    # not exist

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # create streamtubes of the same lines and shift them a bit
    c2 = fvtk.streamtube(lines, colors)
    c2.SetPosition(2, 0, 0)
    fvtk.add(r, c2)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)

    # Slice the volume
    slicer = fvtk.slicer(vol)
    slicer.display(50, None, None)
    fvtk.add(r, slicer)

    # Change the position of the active camera
    fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)

    fvtk.clear(r)

    # Peak directions
    p = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3))
    fvtk.add(r, p)

    p2 = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3),
                    np.random.rand(3, 3, 3, 5),
                    colors=(0, 1, 0))
    fvtk.add(r, p2)
Пример #11
0
def show_zero_level(r,bundle,dist):

    T=[downsample(b,12) for b in bundle]
    C=local_skeleton_clustering(T,dist)
    vs=[]
    colors=np.zeros((len(T),3))
    for c in C:
        vs.append(C[c]['hidden']/C[c]['N'])
        color=np.random.rand(3,)
        #fvtk.add(r,fvtk.line(vs,color,linewidth=4.5))
        for i in C[c]['indices']:
            colors[i]=color
            fvtk.label(r,text=str(i),pos=(bundle[i][-1]),scale=(.5,.5,.5),color=(color[0],color[1],color[2]))    
    fvtk.add(r,fvtk.line(T,colors,linewidth=2.))    
Пример #12
0
def show(T,A,IND,VERTS,scale):
    
    r=fvtk.ren()
    fvtk.clear(r)
    fvtk.add(r,fvtk.line(T,fvtk.red))
    fvtk.show(r)
    
    Td=[downsample(t,20) for t in T]
    C=local_skeleton_clustering(Td,3)
    fvtk.clear(r)
    lent=float(len(T))
    
    for c in C:
        color=np.random.rand(3)
        virtual=C[c]['hidden']/float(C[c]['N'])
        if length(virtual)> virtual_thr: 
            linewidth=100*len(C[c]['indices'])/lent
            if linewidth<1.:
                linewidth=1
            #fvtk.add(r,fvtk.line(virtual,color,linewidth=linewidth))
            #fvtk.add(r,fvtk.label(r,str(len(C[c]['indices'])),pos=virtual[0],scale=3,color=color ))
        #print C[c]['hidden'].shape
    
    print A.shape
    print IND.shape
    print VERTS.shape
    
    all,allo=fvtk.crossing(A,IND,VERTS,scale,True)
    colors=np.zeros((len(all),3))
    for (i,a) in enumerate(all):
        if allo[i][0]==0 and allo[i][1]==0 and allo[i][2]==1:
            pass
        else:            
            colors[i]=cm.boys2rgb(allo[i])
    
    fvtk.add(r,fvtk.line(all,colors))    
    fvtk.show(r)
Пример #13
0
def visualize_tract_transparence(ren, tract, color=None, tran = 1.0, lwidth=1.0):  
    if color == None:
        dipy_ver = dipy_version()
        #print dipy_ver        
        from distutils.version import StrictVersion
        minimize_version = StrictVersion('0.7') 
        
        if dipy_ver > minimize_version:
            color = fvtk.colors.red       
        else:
            color = fvtk.red            

    for i in np.arange(len(tract)):
        fvtk.add(ren, fvtk.line(tract[i], color, opacity=tran, linewidth=lwidth))        
    return ren
Пример #14
0
def check_bigger_clusters():

    avirtuals={}
    
    for (i,out) in enumerate(outs):
        C=load_pickle(dout+out+'.skl')
        cinds=np.zeros(len(C))
        for c in C:
            cinds[c]=len(C[c]['indices'])
        
        descend=np.argsort(cinds)[::-1]
        desc=descend[:400]
        virtuals=[]
        for c in desc:
            v=C[c]['hidden']/float(C[c]['N'])
            virtuals.append(v)        
        avirtuals[i]=virtuals
    
        
    r=fvtk.ren()
    fvtk.add(r,fvtk.line(avirtuals[0],fvtk.red))
    fvtk.add(r,fvtk.line(avirtuals[9],fvtk.yellow))
    fvtk.add(r,fvtk.line(avirtuals[5],fvtk.green))
    fvtk.show(r)
def fosvtk_show_fibres_with_labels(fibres, labels):
	"""
	STILL IN DEVELOPMENT
	"""
	#from dipy.tracking import metrics as tm
	#from dipy.trackivs import distances as td
	from dipy.viz import fvtk
	#from nibabel import trackvis as tv
	
	## load trackvis streams
	#streams,hdr=tv.read(trk_name)
	## copy tracks
	## downsample - will avoid that
	r = fvtk.ren()
	## 'colors' is an array of numbers the same size as the number of trackvs
	fvtk.add(r,fvtk.line(fibres, labels, opacity=1))
def label_streamlines(streamlines,labels,labels_Value,affine,hdr,f_name,data_path):  
      
    cc_slice=labels==labels_Value
    cc_streamlines = utils.target(streamlines, labels, affine=affine)
    cc_streamlines = list(cc_streamlines)

    other_streamlines = utils.target(streamlines, cc_slice, affine=affine,
                                 include=False)
    other_streamlines = list(other_streamlines)
    assert len(other_streamlines) + len(cc_streamlines) == len(streamlines)
    

    print ("num of roi steamlines is %d",len(cc_streamlines))
    

    # Make display objects
    color = line_colors(cc_streamlines)
    cc_streamlines_actor = fvtk.line(cc_streamlines, line_colors(cc_streamlines))
    cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
                            opacities=[1.])

    # Add display objects to canvas
    r = fvtk.ren()
    fvtk.add(r, cc_streamlines_actor)
    fvtk.add(r, cc_ROI_actor)

    # Save figures
    fvtk.record(r, n_frames=1, out_path=f_name+'_roi.png',
            size=(800, 800))
    fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1])
    fvtk.record(r, n_frames=1, out_path=f_name+'_roi.png',
            size=(800, 800))
    """"""

    csd_streamlines_trk = ((sl, None, None) for sl in cc_streamlines)
    csd_sl_fname = f_name+'_roi_streamline.trk'
    nib.trackvis.write(csd_sl_fname, csd_streamlines_trk, hdr, points_space='voxel')
    #nib.save(nib.Nifti1Image(FA, img.get_affine()), 'FA_map2.nii.gz')
    print('Saving "_roi_streamline.trk" sucessful.')

    import tractconverter as tc
    input_format=tc.detect_format(csd_sl_fname)
    input=input_format(csd_sl_fname)
    output=tc.FORMATS['vtk'].create(csd_sl_fname+".vtk",input.hdr)
    tc.convert(input,output)
    
    return cc_streamlines
Пример #17
0
def renderCentroids(streamlines, clusters):
    from dipy.viz import fvtk
    import numpy as np
    
    ren = fvtk.ren()
    ren.SetBackground(0, 0, 0)
    colormap = fvtk.create_colormap(np.arange(len(clusters)))

    colormap_full = np.ones((len(streamlines), 3))
    for cluster in clusters:
        colormap_full[cluster.indices] = np.random.rand(3)

    #fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
    fvtk.add(ren, fvtk.line(clusters.centroids, linewidth=0.4, opacity=1))
    #fvtk.record(ren, n_frames=1, out_path='fornix_centroids.png', size=(600, 600))
    fvtk.show(ren)
    fvtk.clear(ren)
Пример #18
0
def renderBundles(streamlines, clusters):
    from dipy.viz import fvtk
    import numpy as np
    
    ren = fvtk.ren()
    ren.SetBackground(0, 0, 0)

    colormap = fvtk.create_colormap(np.arange(len(clusters)))

    colormap_full = np.ones((len(streamlines), 3))
    for cluster in clusters:
        colormap_full[cluster.indices] = np.random.rand(3)

    fvtk.add(ren, fvtk.line(streamlines, colormap_full))
    #fvtk.record(ren, n_frames=1, out_path='fornix_clusters.png', size=(600, 600))
    fvtk.show(ren)
    fvtk.clear(ren)
def show_tract(segmented_tract_positive, color_positive, color_negative,
               segmented_tract_negative):
    """Visualization of the segmented tract.
   """
    ren = fvtk.ren()
    fvtk.add(
        ren,
        fvtk.line(segmented_tract_positive.tolist(),
                  colors=color_positive,
                  linewidth=2,
                  opacity=0.3))
    #   fvtk.add(ren, fvtk.line(segmented_tract_negative.tolist(),
    #                           colors=color_negative,
    #                           linewidth=2,
    #                           opacity=0.3))
    fvtk.show(ren)
    fvtk.clear(ren)
Пример #20
0
def draw_needles(r,sticks0,sc=60,w=5,off=np.array([0,0,0])):
    if len(sticks0)==3:
        fvtk.add(r,fvtk.line(off+sc*np.array([-sticks0[0],sticks0[0]]),fvtk.red,linewidth=w))
        fvtk.add(r,fvtk.line(off+sc*np.array([-sticks0[1],sticks0[1]]),fvtk.green,linewidth=w))
        fvtk.add(r,fvtk.line(off+sc*np.array([-sticks0[2],sticks0[2]]),fvtk.blue,linewidth=w))
    if len(sticks0)==2:
        fvtk.add(r,fvtk.line(off+sc*np.array([-sticks0[0],sticks0[0]]),fvtk.red,linewidth=w))
        fvtk.add(r,fvtk.line(off+sc*np.array([-sticks0[1],sticks0[1]]),fvtk.green,linewidth=w))
    if len(sticks0)==1:
        fvtk.add(r,fvtk.line(off+sc*np.array([-sticks0[0],sticks0[0]]),fvtk.red,linewidth=w))
    if len([])==0:
        pass
Пример #21
0
def show_streamlines(streamlines, cmap='orient', opacity=1., r=None):

    if r is None:
        ren = fvtk.ren()
    else:
        ren = r
    
    if cmap == 'orient':
        colors = line_colors(streamlines)

    line_actor = fvtk.line(streamlines, colors,
                           opacity=opacity)
    fvtk.add(ren, line_actor)
        
    if r is None:
        fvtk.show(ren)
    else:
        return ren
Пример #22
0
def show_peak_directions(fpng, peaks, scale=0.3, x=10, y=0, z=10):
    r = fvtk.ren()

    for index in ndindex(peaks.shape[:-1]):
        peak = peaks[index]
        directions = peak.reshape(peak.shape[0] / 3, 3)

        # pos = np.array(index)
        for i in xrange(directions.shape[0]):
            if norm(directions[i]) != 0:
                line_actor = fvtk.line(
                    index + scale * np.vstack((-directions[i], directions[i])), abs(directions[i] / norm(directions[i]))
                )
                line_actor.RotateX(-90)
                fvtk.add(r, line_actor)

    fvtk.show(r)
    fvtk.record(r, out_path=fpng, size=(900, 900))
    fvtk.clear(r)
Пример #23
0
def visualize(streamlines_A,
              streamlines_B,
              mappingAB,
              line='line',
              shift=np.array([0.0, 0.0, 200.0]),
              color_A=fvtk.colors.white,
              color_B=fvtk.colors.green,
              color_line=fvtk.colors.yellow):
    assert (len(mappingAB) == len(streamlines_A))
    assert (mappingAB.max() <= len(streamlines_B))
    if line == 'line':
        line = fvtk.line
        linewidth = 2.0
        linekwargs = {'linewidth': linewidth}
    elif line == 'tube':
        line = fvtk.streamtube
        linewidth = 1.0
        linekwargs = {'linewidth': linewidth, 'lod': False}
    else:
        raise Exception

    if color_A == 'auto':
        color_A = line_colors(streamlines_A)

    if color_B == 'auto':
        color_B = line_colors(streamlines_B)

    streamlines_B_shifted = np.array([s + shift for s in streamlines_B])
    midpointA = [s[int(s.shape[0] / 2)] for s in streamlines_A]
    midpointB = [s[int(s.shape[0] / 2)] for s in streamlines_B_shifted]

    ren = fvtk.ren()
    fvtk.add(ren, line(streamlines_A.tolist(), colors=color_A, **linekwargs))
    fvtk.add(
        ren, line(streamlines_B_shifted.tolist(), colors=color_B,
                  **linekwargs))
    fvtk.add(
        ren,
        fvtk.line(zip(midpointA, midpointB),
                  colors=color_line,
                  opacity=0.5,
                  linewidth=0.5))
    fvtk.show(ren)
Пример #24
0
def see_spherical_intersections(fdpy,fsr):
    
    dpr=Dpy(fdpy,'r')
    T=dpr.read_tracks()
    dpr.close()
    
    SR=load_pickle(fsr)
    
    r=fvtk.ren()
    
    for key in SR:
        ind=SR[key]['indices']
        intersT=[T[i] for i in ind]
        fvtk.add(r,fvtk.line(intersT,np.random.rand(3)))    
        centerw=SR[key]['centerw']
        radius=SR[key]['radiusw']
        fvtk.add(r,fvtk.sphere(position=centerw,radius=radius))
        
    fvtk.show(r)
Пример #25
0
def see_tracks(fdpy,N=2000):
    
    
    dpr=Dpy(fdpy,'r')
    #T=dpr.read_tracksi(range(N))
    T=dpr.read_tracks()
    dpr.close()    
    
    T=[downsample(t,5) for t in T]    

    r=fvtk.ren()
    colors=np.ones((len(T),3)).astype('f4')
    for (i,c) in enumerate(T):        
        orient=c[0]-c[-1]
        orient=np.abs(orient/np.linalg.norm(orient))
        colors[i,:3]=orient    
    fvtk.add(r,fvtk.line(T,colors,opacity=0.5))
    #fos.add(r,fos.sphere((0,0,0),10))
    fvtk.show(r)
Пример #26
0
def show_peak_directions(peaks, scale=0.3, x=10, y=0, z=10):
    """ visualize peak directions

    Parameters
    ----------
    peaks : ndarray,
            (X, Y, Z, 15)
    scale : float
            voxel scaling (0 =< `scale` =< 1)
    x : int,
        x slice (0 <= x <= X-1)
    y : int,
        y slice (0 <= y <= Y-1)
    z : int,
        z slice (0 <= z <= Z-1)

    Notes
    -----
    If x, y, z are Nones then the full volume is shown.

    """
    # if x is None and y is None and z is None:
    #    raise ValueError('A slice should be provided')

    r = fvtk.ren()

    for index in ndindex(peaks.shape[:-1]):
        peak = peaks[index]
        directions = peak.reshape(peak.shape[0] / 3, 3)

        #pos = np.array(index)
        for i in xrange(directions.shape[0]):
            if norm(directions[i]) != 0:
                line_actor = fvtk.line(index +
                                       scale * vstack((-directions[i], directions[i])),
                                       abs(directions[i] / norm(directions[i])))
                line_actor.RotateX(-90)
                fvtk.add(r, line_actor)

    fvtk.show(r)
Пример #27
0
def test_fvtk_functions():

    # Create a renderer
    r = fvtk.ren()

    # Create 2 lines with 2 different colors
    lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
    colors = np.random.rand(2, 3)
    c = fvtk.line(lines, colors)
    fvtk.add(r, c)

    # Create a volume and return a volumetric actor using volumetric rendering
    vol = 100 * np.random.rand(100, 100, 100)
    vol = vol.astype('uint8')
    r = fvtk.ren()
    v = fvtk.volume(vol)
    fvtk.add(r, v)

    # Remove all objects
    fvtk.rm_all(r)

    # Put some text
    l = fvtk.label(r, text='Yes Men')
    fvtk.add(r, l)
Пример #28
0
def show_gt_streamlines(streamlines, radii, cmap='orient', r=None):

    if cmap is None:
        np.random.seed(42)
        colors = np.random.rand(len(streamlines), 3)
    if cmap == 'orient':
        colors = line_colors(streamlines)

    if r is None:
        ren = fvtk.ren()
    else:
        ren = r

    for i in range(len(streamlines)):
        line_actor = fvtk.line(streamlines[i],
                               colors[i],
                               linewidth=(radii[i, 1] ** 2) / 2.)
        fvtk.add(ren, line_actor)
        label_actor = fvtk.label(ren, text=str(np.round(radii[i, 1], 2)),
                                 pos=(streamlines[i][0]),
                                 scale=(.8, .8, .8),
                                 color=(colors[i]))

        fvtk.add(ren, label_actor)

        label_actor_id = fvtk.label(ren, text='[' + str(i) + ']',
                                    pos=(streamlines[i][-1]),
                                    scale=(.8, .8, .8),
                                    color=(colors[i]))

        fvtk.add(ren, label_actor_id)

    if r is None:
        fvtk.show(ren)
    else:
        return ren
Пример #29
0
def runStream(csd_peaks, roi_file, roi_label=1, output_file="tracts.dpy", ang_thr=45., a_low=0.2, step_size=0.1, seeds_per_voxel=30):

    img = nib.load(roi_file)
    roidata = img.get_data()
    p = np.asarray(np.where(roidata == roi_label))
    p = p.transpose()

    seed_points = None
    for i in p:
        points = np.random.uniform(size=[seeds_per_voxel,3]) + (i-0.5)
        if seed_points is None:
            seed_points = points
        else:
            seed_points = np.concatenate([seed_points, points], axis=0)

    sphere = get_sphere('symmetric724')
    print "seed eudx tractography"
    eu = EuDX(csd_peaks.peak_values,
              csd_peaks.peak_indices,
              odf_vertices=sphere.vertices,
              step_sz=step_size,
              seeds=seed_points,
              ang_thr=ang_thr,
              a_low=a_low)

    csa_streamlines_mult_peaks = [streamline for streamline in eu]

    ren = fvtk.ren()

    fvtk.add(ren, fvtk.line(csa_streamlines_mult_peaks, line_colors(csa_streamlines_mult_peaks)))
    fvtk.show(ren)

    dpw = Dpy(output_file, 'w')
    dpw.write_tracks(csa_streamlines_mult_peaks)

    return csa_streamlines_mult_peaks
Пример #30
0
"""

del streams,hdr

"""
Perform QuickBundles clustering with a 10mm threshold:
"""

qb=QuickBundles(tracks, dist_thr=10., pts=None)

"""
Show the initial *Fornix* dataset:
"""

r=fvtk.ren()
fvtk.add(r,fvtk.line(T, fvtk.white, opacity=1, linewidth=3))
#fvtk.show(r)
fvtk.record(r,n_frames=1,out_path='fornix_initial',size=(600,600))
fvtk.clear(r)
"""
.. figure:: fornix_initial1000000.png
   :align: center

   **Initial Fornix dataset**.
"""

"""
Show the centroids of the *Fornix* after clustering (with random colors):
"""

Пример #31
0
C1=get_skeleton('C1')
C3=get_skeleton('C3')

"""
We create a diagram with the two skeletons offset [100,0,0] apart
"""

from dipy.viz import fvtk
r=fvtk.ren()

T1=[]
for c in C1:
    T1.append(C1[c]['most'])

fvtk.add(r,fvtk.line(T1,fvtk.gray))

T3=[]
for c in C3:
    T3.append(C3[c]['most'])

T3s=[t+ np.array([100,0,0]) for t in T3]

fvtk.add(r,fvtk.line(T3s,fvtk.gray))

# To show now use:
#fvtk.show(r)

"""
For each track in T1 find the minimum average distance to all the 
tracks in T3 and put information about it in ``track2track``. 
Пример #32
0
pointD = [0.25058964, 0.83944661, 0.16528851]
pointE = [0.61336066, 0.28185135, 0.94522671]

example_xyz = np.array([pointA, pointB, pointC, pointD, pointE])

ren = fvtk.ren()
point_actor = fvtk.point(example_xyz, fvtk.colors.blue_light)
fvtk.add(ren, point_actor)

lineAB = np.array([pointA, pointB])
lineBC = np.array([pointB, pointC])
lineCD = np.array([pointC, pointD])
lineCE = np.array([pointC, pointE])

line_color = [0.9, 0.97, 1.0]
line_actor_AB = fvtk.line(lineAB, line_color)
line_actor_BC = fvtk.line(lineBC, line_color)
line_actor_CD = fvtk.line(lineCD, line_color)
line_actor_CE = fvtk.line(lineCE, line_color)

fvtk.add(ren, line_actor_AB)
fvtk.add(ren, line_actor_BC)
fvtk.add(ren, line_actor_CD)
fvtk.add(ren, line_actor_CE)

#fvtk.show(ren)

fvtk.camera(ren, [-1, -1, 0], [0, 0, 0], viewup=[0, 0, 1])
fvtk.record(ren,
            n_frames=1,
            out_path='simple_network_example.png',
Пример #33
0
"""

gqs_tracks_asobj=np.array(gqs_tracks,dtype=np.object)
np.save('gqs_tracks.npy',gqs_tracks_asobj)
print('QA tracks saved in gqs_tracks.npy')

"""
**This is the end of this very simple example** You can reload the saved tracks using 
``np.load`` from your current directory. You can optionaly install ``python-vtk``
and visualize the tracks using ``fvtk``:
"""

from dipy.viz import fvtk
r=fvtk.ren()
fvtk.add(r,fvtk.line(ten_tracks,fvtk.red,opacity=0.05))
gqs_tracks2=[t+np.array([10,0,0]) for t in gqs_tracks]
fvtk.add(r,fvtk.line(gqs_tracks2,fvtk.green,opacity=0.05))

"""
Press 's' to save this screenshot when you have displayed it with ``fvtk.show``.
Or you can even record a video using ``fvtk.record``.

You would show the figure with something like::

    fvtk.show(r,png_magnify=1,size=(600,600))

To record a video of 50 frames of png, something like::

    fvtk.record(r,cam_pos=(0,40,-40),cam_focal=(5,0,0),n_frames=50,magnification=1,out_path='nii_2_tracks',size=(600,600),bgr_color=(0,0,0))
Пример #34
0
"""
Delete unnecessary data:
"""

del streams, hdr
"""
Perform QuickBundles clustering with a 10mm threshold:
"""

qb = QuickBundles(tracks, dist_thr=10., pts=None)
"""
Show the initial *Fornix* dataset:
"""

r = fvtk.ren()
fvtk.add(r, fvtk.line(T, fvtk.white, opacity=1, linewidth=3))
#fvtk.show(r)
fvtk.record(r, n_frames=1, out_path='fornix_initial', size=(600, 600))
fvtk.clear(r)
"""
.. figure:: fornix_initial1000000.png
   :align: center

   **Initial Fornix dataset**.
"""
"""
Show the centroids of the *Fornix* after clustering (with random colors):
"""

centroids = qb.centroids
colormap = np.ones((len(centroids), 3))
Пример #35
0
all_streamlines_threshold_classifier = LocalTracking(dg,
                                                     threshold_classifier,
                                                     seeds,
                                                     affine,
                                                     step_size=.5,
                                                     return_all=True)

save_trk("deterministic_threshold_classifier_all.trk",
         all_streamlines_threshold_classifier,
         affine,
         labels.shape)

streamlines = [sl for sl in all_streamlines_threshold_classifier]

fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='all_streamlines_threshold_classifier.png',
            size=(600, 600))

"""
.. figure:: all_streamlines_threshold_classifier.png
 :align: center

 **Deterministic tractography using a thresholded fractional anisotropy.**
"""


"""
Binary Tissue Classifier
------------------------
A binary mask can be used to define where the tracking stops. The binary
Пример #36
0
Perform QuickBundles clustering with a 10mm distance threshold after having
downsampled the streamlines to have only 12 points.
"""

qb = QuickBundles(streamlines, dist_thr=10., pts=18)
"""
qb has attributes like `centroids` (cluster representatives), `total_clusters`
(total number of clusters) and methods like `partitions` (complete description
of all clusters) and `label2tracksids` (provides the indices of the streamlines
which belong in a specific cluster).

Lets first show the initial dataset.
"""

r = fvtk.ren()
fvtk.add(r, fvtk.line(streamlines, fvtk.white, opacity=1, linewidth=3))
fvtk.record(r, n_frames=1, out_path='fornix_initial.png', size=(600, 600))
"""
.. figure:: fornix_initial.png
   :align: center

   **Initial Fornix dataset**.

Show the centroids of the fornix after clustering (with random colors):
"""

centroids = qb.centroids
colormap = np.random.rand(len(centroids), 3)

fvtk.clear(r)
fvtk.add(r, fvtk.line(centroids, colormap, opacity=1., linewidth=5))
Пример #37
0
lines_color = [205 / 255.0, 247 / 255.0, 255 / 255.0]
points_color = [2 / 255.0, 128 / 255.0, 232 / 255.0]

lines = []
for columnNumber in range(86):
    for rowNumber in range(86):
        if labelsConnectivity[columnNumber][rowNumber] > 20:
            lines.append([label_coords[columnNumber], label_coords[rowNumber]])

ren = fvtk.ren()
pointActors = fvtk.point(label_coords,
                         points_color,
                         opacity=0.8,
                         point_radius=3)
lineActors = fvtk.line(lines, lines_color, opacity=0.2, linewidth=2)

fvtk.add(ren, pointActors)
fvtk.add(ren, lineActors)

# to explore the data in 3D interactive way
# fvtk.show(ren)

# save figure

fvtk.camera(ren, [-1, -1, 0], [0, 0, 0], viewup=[0, 0, 1])
fvtk.record(ren,
            n_frames=1,
            out_path='brain_network_example.png',
            size=(600, 600))
Пример #38
0
display the resulting streamlines using the fvtk module.
"""

from dipy.tracking.local import LocalTracking
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

# Initialization of LocalTracking. The computation happens in the next step.
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine, step_size=.5)

# Compute streamlines and store as a list.
streamlines = list(streamlines)

# Prepare the display objects.
color = line_colors(streamlines)
streamlines_actor = fvtk.line(streamlines, line_colors(streamlines))

# Create the 3d display.
r = fvtk.ren()
fvtk.add(r, streamlines_actor)

# Save still images for this static example. Or for interactivity use fvtk.show
fvtk.record(r, n_frames=1, out_path='deterministic.png',
            size=(800, 800))

"""
.. figure:: deterministic.png
   :align: center

   **Corpus Callosum Deterministic**
Пример #39
0
def get_fvtk_streamlines_actor(streamlines):
    streamlines_actor = fvtk.line(streamlines)
    return streamlines_actor
Пример #40
0
csa_sl_fname = 'csa_streamline.trk'

nib.trackvis.write(csa_sl_fname,
                   csa_streamlines_trk,
                   hdr,
                   points_space='voxel')
"""
Visualize the streamlines with fvtk (python vtk is required).
"""

from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

r = fvtk.ren()

fvtk.add(r, fvtk.line(csa_streamlines, line_colors(csa_streamlines)))

print('Saving illustration as tensor_tracks.png')

fvtk.record(r, n_frames=1, out_path='csa_tracking.png', size=(600, 600))
"""
.. figure:: csa_tracking.png
   :align: center

   **Deterministic streamlines with EuDX on ODF peaks field modulated by GFA**.

It is also possible to use EuDX with multiple ODF peaks, which is very helpful when
tracking in crossing areas.
"""

eu = EuDX(csapeaks.peak_values,
# part for changing including streamlines only longer than  particular length, here 50

from dipy.tracking.metrics import length

longer_streamlines = []
for tract in streamlines:
    if length(tract) > 50.0:
        longer_streamlines.append(tract)

# Streamlines visualization

from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

# Make display objects

streamlines_actor = fvtk.line(longer_streamlines,
                              line_colors(longer_streamlines))

# Add display objects to canvas
r = fvtk.ren()
fvtk.add(r, streamlines_actor)

# Save figure
fvtk.camera(r, [-1, 0, 0], [0, 0, 0], viewup=[0, 0, 1])
fvtk.record(r,
            n_frames=1,
            out_path='streamlines_saggital.png',
            size=(800, 800))
Пример #42
0
hdr['dim'] = csapeaks.gfa.shape[:3]
"""
Save the streamlines.
"""

csa_streamlines_trk = ((sl, None, None) for sl in csa_streamlines)

csa_sl_fname = 'csa_prob_streamline.trk'

nib.trackvis.write(csa_sl_fname, csa_streamlines_trk, hdr)
"""
Visualize the streamlines with fvtk (python vtk is required).
"""

from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

r = fvtk.ren()

fvtk.add(r, fvtk.line(csa_streamlines, line_colors(csa_streamlines)))

print('Saving illustration as csa_prob_tracks.png')

fvtk.record(r, n_frames=1, out_path='csa_prob_tracks.png', size=(600, 600))
"""
.. figure:: csa_prob_tracks.png
   :align: center

   **Probabilistic streamlines applied on an ODF field modulated by GFA**.
"""
Пример #43
0
def show_tract(segmented_tract, color):
    ren = fvtk.ren()           
    fvtk.add(ren, fvtk.line(segmented_tract.tolist(),colors=color, linewidth=2,opacity=0.3))
    fvtk.show(ren)
    fvtk.clear(ren)
Пример #44
0
In ``dipy`` it is very easy to count the number of crossings in a voxel, volume or region of interest

"""

gqs_tracks_asobj = np.array(gqs_tracks, dtype=np.object)
np.save('gqs_tracks.npy', gqs_tracks_asobj)
print('QA tracks saved in gqs_tracks.npy')
"""
**This is the end of this very simple example** You can reload the saved tracks using 
``np.load`` from your current directory. You can optionaly install ``python-vtk``
and visualize the tracks using ``fvtk``:
"""

from dipy.viz import fvtk
r = fvtk.ren()
fvtk.add(r, fvtk.line(ten_tracks, fvtk.red, opacity=0.05))
gqs_tracks2 = [t + np.array([10, 0, 0]) for t in gqs_tracks]
fvtk.add(r, fvtk.line(gqs_tracks2, fvtk.green, opacity=0.05))
"""
Press 's' to save this screenshot when you have displayed it with ``fvtk.show``.
Or you can even record a video using ``fvtk.record``.

You would show the figure with something like::

    fvtk.show(r,png_magnify=1,size=(600,600))

To record a video of 50 frames of png, something like::

    fvtk.record(r,cam_pos=(0,40,-40),cam_focal=(5,0,0),n_frames=50,magnification=1,out_path='nii_2_tracks',size=(600,600),bgr_color=(0,0,0))

.. figure:: nii_2_tracks1000000.png
Пример #45
0
def life(dwifile, bvecsfile, bvalsfile, tractogramfile, outdir,
         display_tracks=False, verbose=0):
    """ Linear fascicle evaluation (LiFE)
    Evaluating the results of tractography algorithms is one of the biggest
    challenges for diffusion MRI. One proposal for evaluation of tractography
    results is to use a forward model that predicts the signal from each of a
    set of streamlines, and then fit a linear model to these simultaneous
    prediction.

    Parameters
    ----------
    dwifile: str
        the path to the diffusion dataset.
    bvecsfile: str
        the path to the diffusion b-vectors.
    bvalsfile: str
        the path to the diffusion b-values.
    tractogramfile: str
        the path to the tractogram.
    outdir: str
        the destination folder.
    display_tracks: bool, default False
        if True render the tracks.
    verbose: int, default 0
        the verbosity level.

    Returns
    -------
    life_weights_file: str
        a file containing the fiber track weights.
    life_weights_snap: str
        a snap with the distrubution of weights.
    spatial_error_file: str
        the model root mean square error.
    tracks_snap: str
        a snap with the tracks.
    """
    # Load diffusion data and tractogram
    bvecs = numpy.loadtxt(bvecsfile)
    bvals = numpy.loadtxt(bvalsfile)
    gtab = gradient_table(bvals, bvecs)
    im = nibabel.load(dwifile)
    data = im.get_data()
    trk = nibabel.streamlines.load(tractogramfile)
    if verbose > 0:
        print("[info] Diffusion shape: {0}".format(data.shape))
        print("[info] Number of tracks: {0}".format(len(trk.streamlines)))

    # Express the tractogram in voxel coordiantes
    inv_affine = numpy.linalg.inv(trk.affine)
    trk = [
        numpy.dot(
            numpy.concatenate(
                (
                    streamline,
                    numpy.ones(
                        (len(streamline), 1)
                    )
                ), axis=1
            ),
            inv_affine) for streamline in trk.streamlines]
    trk = [track[..., :3] for track in trk]

    # Create a viewer
    tracks_snap = None
    if display_tracks:
        nb_tracks = len(trk)
        if nb_tracks < 5000:
            downsampling = 1
        else:
            downsampling = nb_tracks // 5000
        tracks_snap = os.path.join(outdir, "tracks.png")
        streamlines_actor = fvtk.line(trk[::downsampling],
                                      line_colors(trk[::downsampling]))
        vol_actor = fvtk.slicer(data[..., 0])

        vol_actor.display(data.shape[0] // 2, None, None)
        ren = fvtk.ren()
        fvtk.add(ren, streamlines_actor)
        fvtk.add(ren, vol_actor)
        fvtk.record(ren, n_frames=1, out_path=tracks_snap, size=(800, 800))
        if verbose > 1:
            fvtk.show(ren)

    # Fit the Life model and save the associated weights
    fiber_model = dpilife.FiberModel(gtab)
    fiber_fit = fiber_model.fit(data, trk, affine=numpy.eye(4))
    life_weights = fiber_fit.beta
    life_weights_file = os.path.join(outdir, "life_weights.txt")
    numpy.savetxt(life_weights_file, life_weights)
    life_weights_snap = os.path.join(outdir, "life_weights.png")
    fig, ax = plt.subplots(1)
    ax.hist(life_weights, bins=100, histtype="step")
    ax.set_xlabel("Fiber weights")
    ax.set_ylabel("# Fibers")
    fig.savefig(life_weights_snap)

    # Invert the model and predict back either the data that was used to fit
    # the model: compute the prediction error of the diffusion-weighted data
    # and calculate the root of the mean squared error.
    model_predict = fiber_fit.predict()
    model_error = model_predict - fiber_fit.data
    model_rmse = numpy.sqrt(numpy.mean(model_error[:, 10:] ** 2, -1))
    data_rmse = numpy.ones(data.shape[:3]) * numpy.nan
    data_rmse[fiber_fit.vox_coords[:, 0],
              fiber_fit.vox_coords[:, 1],
              fiber_fit.vox_coords[:, 2]] = model_rmse
    model_error_file = os.path.join(outdir, "model_rmse.nii.gz")
    error_im = nibabel.Nifti1Image(data_rmse, im.affine)
    nibabel.save(error_im, model_error_file)

    # As a baseline against which we can compare, we assume that the weight
    # for each streamline is equal to zero. This produces the naive prediction
    # of the mean of the signal in each voxel.
    life_weights_baseline = numpy.zeros(life_weights.shape[0])
    pred_weighted = numpy.reshape(
        opt.spdot(fiber_fit.life_matrix, life_weights_baseline),
        (fiber_fit.vox_coords.shape[0], numpy.sum(~gtab.b0s_mask)))
    mean_pred = numpy.empty(
        (fiber_fit.vox_coords.shape[0], gtab.bvals.shape[0]))
    S0 = fiber_fit.b0_signal

    # Since the fitting is done in the demeaned S/S0 domain, we need to add
    # back the mean and then multiply by S0 in every voxels.
    mean_pred[..., gtab.b0s_mask] = S0[:, None]
    mean_pred[..., ~gtab.b0s_mask] = (
            (pred_weighted + fiber_fit.mean_signal[:, None]) * S0[:, None])
    mean_error = mean_pred - fiber_fit.data
    mean_rmse = numpy.sqrt(numpy.mean(mean_error ** 2, -1))
    data_rmse = numpy.ones(data.shape[:3]) * numpy.nan
    data_rmse[fiber_fit.vox_coords[:, 0],
              fiber_fit.vox_coords[:, 1],
              fiber_fit.vox_coords[:, 2]] = mean_rmse
    mean_error_file = os.path.join(outdir, "mean_rmse.nii.gz")
    error_im = nibabel.Nifti1Image(data_rmse, im.affine)
    nibabel.save(error_im, mean_error_file)

    # Compute the improvment array
    data_rmse = numpy.ones(data.shape[:3]) * numpy.nan
    data_rmse[fiber_fit.vox_coords[:, 0],
              fiber_fit.vox_coords[:, 1],
              fiber_fit.vox_coords[:, 2]] = mean_rmse - model_rmse
    improvment_error_file = os.path.join(outdir, "improvment_rmse.nii.gz")
    error_im = nibabel.Nifti1Image(data_rmse, im.affine)
    nibabel.save(error_im, improvment_error_file)

    return (life_weights_file, life_weights_snap, model_error_file,
            mean_error_file, improvment_error_file, tracks_snap)
Пример #46
0
C=td.local_skeleton_clustering(tracks,d_thr=5)
print('Done in %.2f s'  % (time.clock()-now,))


"""
Reduce the number of points for faster visualization using the ``approx_polygon_track`` algorithm which retains points depending on how much they are need to define the shape of the track:
"""

T=[td.approx_polygon_track(t) for t in T]

"""
Show the initial *Fornix* dataset:
"""

r=fvtk.ren()
fvtk.add(r,fvtk.line(T,fvtk.white,opacity=1))
#fvtk.show(r)
fvtk.record(r,n_frames=1,out_path='fornix_initial',size=(600,600))

"""
.. figure:: fornix_initial1000000.png
   :align: center

   **Initial Fornix dataset**.
"""

"""
Show the *Fornix* after clustering (with random bundle colors):
"""

fvtk.clear(r)
Пример #47
0
    raise ImportError('Python vtk module is not installed')
    sys.exit()
"""
Create a scene.
"""

r = fvtk.ren()
"""
Every streamline will be coloured according to its orientation
"""

from dipy.viz.colormap import line_colors
"""
fvtk.line adds a streamline actor for streamline visualization
and fvtk.add adds this actor in the scene
"""

fvtk.add(r, fvtk.line(tensor_streamlines, line_colors(tensor_streamlines)))

print('Saving illustration as tensor_tracks.png')
fvtk.record(r, n_frames=1, out_path='tensor_tracking.png', size=(600, 600))
"""
.. figure:: tensor_tracking.png
   :align: center

   **Deterministic streamlines with EuDX on a Tensor Field**.

.. include:: ../links_names.inc

"""
Пример #48
0
                                 affine=affine,
                                 include=False)
other_streamlines = list(other_streamlines)
assert len(other_streamlines) + len(cc_streamlines) == len(streamlines)
"""
We can use some of dipy's visualization tools to display the ROI we targeted
above and all the streamlines that pass though that ROI. The ROI is the yellow
region near the center of the axial image.
"""

from dipy.viz import fvtk
from dipy.viz.colormap import line_colors

# Make display objects
color = line_colors(cc_streamlines)
cc_streamlines_actor = fvtk.line(cc_streamlines, line_colors(cc_streamlines))
cc_ROI_actor = fvtk.contour(cc_slice,
                            levels=[1],
                            colors=[(1., 1., 0.)],
                            opacities=[1.])

vol_actor = fvtk.slicer(t1_data)

vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)

# Add display objects to canvas
r = fvtk.ren()
fvtk.add(r, vol_actor)
fvtk.add(r, vol_actor2)
Пример #49
0
- the Boys' colour map (see ``colormap.boys2rgb.py``)
- the orientation colour map (see ``colormap.orient2rgb.py`` with red: left-right; green: anteroposterior; blue: superior-inferior.

"""

#3,8,4 no crossing
no_cross = fvtk.crossing(QA[3, 8, 4], IN[3, 8, 4], verts, 1)

#3,8,5 crossing
cross = fvtk.crossing(QA[3, 8, 5], IN[3, 8, 5], verts, 1)

#3,8,6 double crossing
dcross = fvtk.crossing(QA[3, 8, 6], IN[3, 8, 6], verts, 1)

all, allo = fvtk.crossing(QA, IN, verts, 1, True)
fvtk.add(r, fvtk.line(all, fvtk.azure, linewidth=1.))

no_cross_shift = [c + np.array([3, 8, 4]) for c in no_cross]
cross_shift = [c + np.array([3, 8, 5]) for c in cross]
dcross_shift = [c + np.array([3, 8, 6]) for c in dcross]

fvtk.add(r, fvtk.line(no_cross_shift, fvtk.blue, linewidth=5.))
fvtk.add(r, fvtk.line(cross_shift, fvtk.indigo, linewidth=5.))
fvtk.add(r, fvtk.line(dcross_shift, fvtk.red, linewidth=5.))

from dipy.viz import colormap as cm

all_shift = [c + np.array([10, 0, 0]) for c in all]
all_shift2 = [c + np.array([20, 0, 0]) for c in all]

colors = np.zeros((len(all), 3))
Пример #50
0
"""

now = time.clock()
C = td.local_skeleton_clustering(tracks, d_thr=5)
print('Done in %.2f s' % (time.clock() - now, ))
"""
Reduce the number of points for faster visualization using the ``approx_polygon_track`` algorithm which retains points depending on how much they are need to define the shape of the track:
"""

T = [td.approx_polygon_track(t) for t in T]
"""
Show the initial *Fornix* dataset:
"""

r = fvtk.ren()
fvtk.add(r, fvtk.line(T, fvtk.white, opacity=1))
#fvtk.show(r)
fvtk.record(r, n_frames=1, out_path='fornix_initial', size=(600, 600))
"""
.. figure:: fornix_initial1000000.png
   :align: center

   **Initial Fornix dataset**.
"""
"""
Show the *Fornix* after clustering (with random bundle colors):
"""

fvtk.clear(r)
colors = np.zeros((len(T), 3))
for c in C:
Пример #51
0
def get_fvtk_streamlines_actor(streamlines):
    """Get vtk streamline actor."""
    streamlines_actor = fvtk.line(streamlines)
    return streamlines_actor