Beispiel #1
0
def test_LSCv2(verbose=False):
    xyz1 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]], dtype='float32')
    xyz2 = np.array([[1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz3 = np.array([[1.1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz4 = np.array([[1, 0, 0], [2.1, 0, 0], [3, 0, 0]], dtype='float32')

    xyz5 = np.array([[100, 0, 0], [200, 0, 0], [300, 0, 0]], dtype='float32')
    xyz6 = np.array([[0, 20, 0], [0, 40, 0], [300, 50, 0]], dtype='float32')

    T = [xyz1, xyz2, xyz3, xyz4, xyz5, xyz6]
    pf.local_skeleton_clustering(T, 0.2)

    pf.local_skeleton_clustering_3pts(T, 0.2)

    for i in range(40):
        xyz = np.random.rand(3, 3).astype('f4')
        T.append(xyz)

    from time import time
    t1 = time()
    C3 = pf.local_skeleton_clustering(T, .5)
    t2 = time()
    if verbose:
        print(t2 - t1)
        print(len(C3))

    t1 = time()
    C4 = pf.local_skeleton_clustering_3pts(T, .5)
    t2 = time()
    if verbose:
        print(t2 - t1)
        print(len(C4))

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden'] - C4[c]['hidden']), 0)

    T2 = []
    for i in range(10**4):
        xyz = np.random.rand(10, 3).astype('f4')
        T2.append(xyz)
    t1 = time()
    C5 = pf.local_skeleton_clustering(T2, .5)
    t2 = time()
    if verbose:
        print(t2 - t1)
        print(len(C5))

    fname = get_fnames('fornix')
    fornix = load_tractogram(fname, 'same', bbox_valid_check=False).streamlines

    T3 = set_number_of_points(fornix, 6)

    if verbose:
        print('lenT3', len(T3))

    C = pf.local_skeleton_clustering(T3, 10.)

    if verbose:
        print('lenC', len(C))
    """
Beispiel #2
0
def test_LSCv2():
    xyz1 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]], dtype='float32')
    xyz2 = np.array([[1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz3 = np.array([[1.1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz4 = np.array([[1, 0, 0], [2.1, 0, 0], [3, 0, 0]], dtype='float32')

    xyz5 = np.array([[100, 0, 0], [200, 0, 0], [300, 0, 0]], dtype='float32')
    xyz6 = np.array([[0, 20, 0], [0, 40, 0], [300, 50, 0]], dtype='float32')

    T = [xyz1, xyz2, xyz3, xyz4, xyz5, xyz6]
    C = pf.local_skeleton_clustering(T, 0.2)

    #print C
    #print len(C)

    C2 = pf.local_skeleton_clustering_3pts(T, 0.2)

    #print C2
    #print len(C2)

    #"""

    for i in range(40):
        xyz = np.random.rand(3, 3).astype('f4')
        T.append(xyz)

    from time import time
    t1 = time()
    C3 = pf.local_skeleton_clustering(T, .5)
    t2 = time()
    print t2 - t1
    print len(C3)

    t1 = time()
    C4 = pf.local_skeleton_clustering_3pts(T, .5)
    t2 = time()
    print t2 - t1
    print len(C4)

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden'] - C4[c]['hidden']), 0)

    T2 = []
    for i in range(10**4):
        xyz = np.random.rand(10, 3).astype('f4')
        T2.append(xyz)
    t1 = time()
    C5 = pf.local_skeleton_clustering(T2, .5)
    t2 = time()
    print t2 - t1
    print len(C5)

    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError, e:
        raise nose.plugins.skip.SkipTest('Fails to import dipy.viz due to %s' %
                                         str(e))
Beispiel #3
0
def test_LSCv2():
    xyz1=np.array([[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
    xyz2=np.array([[1,0,0],[1,2,0],[1,3,0]],dtype='float32')
    xyz3=np.array([[1.1,0,0],[1,2,0],[1,3,0]],dtype='float32')
    xyz4=np.array([[1,0,0],[2.1,0,0],[3,0,0]],dtype='float32')
    
    xyz5=np.array([[100,0,0],[200,0,0],[300,0,0]],dtype='float32')
    xyz6=np.array([[0,20,0],[0,40,0],[300,50,0]],dtype='float32')
    
    T=[xyz1,xyz2,xyz3,xyz4,xyz5,xyz6]
    C=pf.local_skeleton_clustering(T,0.2)
    
    #print C
    #print len(C)
    
    C2=pf.local_skeleton_clustering_3pts(T,0.2)
    
    #print C2
    #print len(C2)
            
    #"""
    
    for i in range(40):
        xyz=np.random.rand(3,3).astype('f4')
        T.append(xyz)
            
    from time import time
    t1=time()
    C3=pf.local_skeleton_clustering(T,.5)
    t2=time()
    print t2-t1
    print len(C3)
    
    t1=time()
    C4=pf.local_skeleton_clustering_3pts(T,.5)
    t2=time()
    print t2-t1
    print len(C4)

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden']-C4[c]['hidden']),0)
    
    T2=[]
    for i in range(10**4):
        xyz=np.random.rand(10,3).astype('f4')
        T2.append(xyz)
    t1=time()
    C5=pf.local_skeleton_clustering(T2,.5)
    t2=time()
    print t2-t1
    print len(C5)
    
    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError, e:
        raise nose.plugins.skip.SkipTest(
            'Fails to import dipy.viz due to %s' % str(e))
Beispiel #4
0
 def __init__(self,tracks,dist_thr=4.,pts=12):
     """ Highly efficient trajectory clustering 
     
     Parameters
     -----------
     tracks : sequence of (N,3) ... (M,3) arrays,
                 trajectories (or tractography or streamlines)
                 
     dist_thr : float, 
                 distance threshold in the space of the tracks
     pts : int, 
             number of points for simplifying the tracks 
                    
     Methods
     --------
     clustering() returns a dict holding with the clustering result
     virtuals() gives the virtuals (track centroids) of the clusters
     exemplars() gives the exemplars (track medoids) of the clusters        
     
     Citation
     ---------
     
     E.Garyfallidis, "Towards an accurate brain tractography", PhD thesis, 2011 
     
     """        
            
     if pts!=None:                        
         self.tracksd=[downsample(track,pts) for track in tracks]
     else:
         self.tracksd=tracks                    
     self.clustering=local_skeleton_clustering(self.tracksd,dist_thr)
     self.virts=None
     self.exemps=None                
Beispiel #5
0
def skeletonize(fdpy,flsc,points=3):

    dpr=Dpy(fdpy,'r')    
    T=dpr.read_tracks()
    dpr.close()    
    print len(T)
    Td=[downsample(t,points) for t in T]
    C=local_skeleton_clustering(Td,d_thr=10.,points=points)    
    #Tobject=np.array(T,dtype=np.object)
    

    #'''
    #r=fvtk.ren()    
    skeleton=[]    
    for c in C:
        #color=np.random.rand(3)
        if C[c]['N']>0:
            Ttmp=[]
            for i in C[c]['indices']:
                Ttmp.append(T[i])
            si,s=most_similar_track_mam(Ttmp,'avg')
            print si,C[c]['N']    
            C[c]['most']=Ttmp[si]            
            #fvtk.add(r,fvtk.line(Ttmp[si],color))            
    print len(skeleton)
    #r=fos.ren()
    #fos.add(r,fos.line(skeleton,color))    
    #fos.add(r,fos.line(T,fos.red))    
    #fvtk.show(r)
    #'''
    
    save_pickle(flsc,C)
Beispiel #6
0
 def __init__(self, tracks, dist_thr=4., pts=12):
     """ Highly efficient trajectory clustering 
     
     Parameters
     -----------
     tracks : sequence of (N,3) ... (M,3) arrays,
                 trajectories (or tractography or streamlines)
                 
     dist_thr : float, 
                 distance threshold in the space of the tracks
     pts : int, 
             number of points for simplifying the tracks 
                    
     Methods
     --------
     clustering() returns a dict holding with the clustering result
     virtuals() gives the virtuals (track centroids) of the clusters
     exemplars() gives the exemplars (track medoids) of the clusters        
     
     Citation
     ---------
     
     E.Garyfallidis, "Towards an accurate brain tractography", PhD thesis, 2011 
     
     """
     self.dist_thr = dist_thr
     self.pts = pts
     if pts != None:
         self.tracksd = [downsample(track, self.pts) for track in tracks]
     else:
         self.tracksd = tracks
     self.clustering = local_skeleton_clustering(self.tracksd,
                                                 self.dist_thr)
     self.virts = None
     self.exemps = None
Beispiel #7
0
def converging_lsc(inp):

    C0=load_pickle(dout+outs[inp]+'.skl')
    print len(C0)
    v0,i0,l0=bring_virtuals(C0)
    v=v0
    #print len(v0)
    not_converged=1
    Cs=[]
    while not_converged:
        lv_before=len(v)    
        C=local_skeleton_clustering(v,4.)    
        v,i,l=bring_virtuals(C)
        #print '=',len(v)
        #for (i,v_) in enumerate(v):
        #    if length(v_)<50.:
        #        del v[i]
        #c=[v_ for v_ in v if length(v_)>50.]
        lv=len(v)
        #print lv
        if len(v)==lv_before:
            not_converged=0
        else:
            Cs.append(C)
    return Cs
Beispiel #8
0
    def aggregate(self, ttracks):
        """
        """
        # Pull out np object arrays from the TrackDataset
        tracks = ttracks.tracks
        # Holds the cluster assignments for each track
        clusters = []

        # Run DiPy's local skeleton aggregation
        tep = tracks_to_endpoints(tracks/2)
        C = td.local_skeleton_clustering(tep,self.dthr)

        # Populate the clusters list
        labels = np.zeros(len(tracks),dtype=np.int)
        clustnum = 0
        for k in C.keys():
            # I think the coordinates are added to the 'hidden' field
            if C[k]['N'] < self.min_tracks: 
                label = -1
            else:
                label = clustnum
                clustnum += 1
            indices = C[k]['indices']
            labels[indices] = clustnum
        return labels
Beispiel #9
0
def taleton_old(T,dist=[8.]):
    
    Cs=[]
    id=0
    C=local_skeleton_clustering(T,dist[id])
    Cs.append(C)    
    vs=[C[c]['hidden']/float(C[c]['N']) for c in C]    
    #ls=[len(C[c]['indices']) for c in C]    
    not_converged=1
    id+=1
    while id<len(dist):                         
        C=local_skeleton_clustering(vs,dist[id])
        vs=[C[c]['hidden']/float(C[c]['N']) for c in C]
        #ls=[len(C[c]['indices']) for c in C]
        Cs.append(C)
        id+=1            
    return Cs
Beispiel #10
0
def show_2_generations(C,thr=10.,cthr=0.000442707065162):
    r=fvtk.ren()
    vs,cinds,ls=bring_virtuals(C)
    lvs=len(vs)    
    vs=[vs[i] for (i,c) in enumerate(cinds) if c>=cthr] #c/ls>=cthr]    
    #fvtk.add(r,fvtk.line(vs,fvtk.red))
    C2=local_skeleton_clustering(vs,thr)
    vs2,inds2,ls2=bring_virtuals(C2)            
    #fvtk.add(r,fvtk.line(vs2,fvtk.green,opacity=0.5))
    print 'Initial',lvs,'Thresholded',len(vs),'Recalculated',len(vs2),'Total_Tracks',int(ls)
    #fvtk.show(r)
    return np.array([lvs,len(vs),len(vs2),int(ls)])
Beispiel #11
0
def find_matches():
    
    fsolid='/home/ian/Data/LSC_stability/solid_1M.npy'
    T=np.load(fsolid)

    samplesize = 10**3
    lscdist = 4.
    labels1 = np.arange(0*samplesize,1*samplesize)
    T1=T[labels1]
    C1=local_skeleton_clustering(T1,lscdist)

    labels2 = np.arange(1*samplesize,2*samplesize)
    T2=T[labels2]
    C2=local_skeleton_clustering(T2,lscdist)

    v1,l1,tot1 = bring_virtuals(C1)
    v2,l2,tot2 = bring_virtuals(C2)

    d12 = bundles_distances_mdf(v1,v2)        

    mv21 = np.argmin(d12,axis=0)
    
    print mv21[0], C2[0]['indices'], C1[mv21[0]]['indices']
Beispiel #12
0
def show_zero_level(r,bundle,dist):

    T=[downsample(b,12) for b in bundle]
    C=local_skeleton_clustering(T,dist)
    vs=[]
    colors=np.zeros((len(T),3))
    for c in C:
        vs.append(C[c]['hidden']/C[c]['N'])
        color=np.random.rand(3,)
        #fvtk.add(r,fvtk.line(vs,color,linewidth=4.5))
        for i in C[c]['indices']:
            colors[i]=color
            fvtk.label(r,text=str(i),pos=(bundle[i][-1]),scale=(.5,.5,.5),color=(color[0],color[1],color[2]))    
    fvtk.add(r,fvtk.line(T,colors,linewidth=2.))    
Beispiel #13
0
def taleton(T,dist=4*np.ones(1000)):
    
    Cs=[]
    id=0
    C=local_skeleton_clustering(T,dist[id])
    Cs.append(C)    
    vs=[C[c]['hidden']/float(C[c]['N']) for c in C]    
    ls=[len(C[c]['indices']) for c in C]
        
    vs_change=True
    id+=1
    while vs_change:
        lv_prev=len(vs)
        C=local_skeleton_clustering(vs,dist[id])
        vs=[C[c]['hidden']/float(C[c]['N']) for c in C]
        #ls=[len(C[c]['indices']) for c in C]
        for c in C:
            tmpi=C[c]['indices']
            tmpi2=[]
            
            tmph=np.zeros(C[c]['hidden'].shape)
            
            for i in tmpi:
                tmpi2+=Cs[id-1][i]['indices']
                tmph+=Cs[id-1][i]['hidden']
                
            C[c]['indices']=tmpi2
            C[c]['hidden']=tmph
            C[c]['N']=len(tmpi2)            
        
        if len(vs)!=lv_prev:
            Cs.append(C)            
            id+=1
        else:
            vs_change=False   
    return Cs
Beispiel #14
0
def multiple_comparisons(T,samplesize = 10**4, lscdist = 4., replications = 2,subj='1'):
    labels1 = np.random.permutation(np.arange(len(T)))[:samplesize]
    #labels1 = np.arange(0*samplesize,1*samplesize)
    T1=T[labels1]
    print 'Number of tracks is %d' % (len(T1))
    lists = {}
    C = {}
    results = {}
    C_size = {}

    for replication in np.arange(replications):
        print '... preparing LSC(%d)' % (replication)
        rearrangement = np.random.permutation(np.arange(samplesize))
        #print '... min %d, max %d, len %d' % (np.min(rearrangement), np.max(rearrangement), len(rearrangement))
        rearrangement = list(rearrangement)
        C[replication] = local_skeleton_clustering(T1[rearrangement],lscdist)
        print '... skeleton size %d' % (len(C[replication]))
        lists[replication] = rearrangement
        C_size[replication] = len(C[replication])

    save_pickle('labels'+str(samplesize)+'_'+subj+'.pkl',labels1)
    save_pickle('C'+str(samplesize)+'_'+subj+'.pkl',C)
    save_pickle('lists'+str(samplesize)+'_'+subj+'.pkl',lists)
    save_pickle('C_size'+str(samplesize)+'_'+subj+'.pkl',C_size)

    for rep1 in np.arange(replications-1):
        for rep2 in np.arange(rep1+1,replications):
            #report_comparisons(C[rep1],lists[rep1],C[rep2],lists[rep2])
            print 'comparing %d and %d' % (rep1,rep2)
            results[(rep1,rep2)] = return_comparisons(C[rep1],lists[rep1],C[rep2],lists[rep2])

    '''
    labels2 = np.arange(1*samplesize,2*samplesize)
    T2=T[labels2]
    list21 = np.random.permutation(np.arange(samplesize))
    C21=local_skeleton_clustering(T2[list21],lscdist)
    list22 = np.random.permutation(np.arange(samplesize))
    C22=local_skeleton_clustering(T2[list22],lscdist)
    '''
    #print 'C11 vs C12'
    '''
    print 'C21 vs C22'
    report_comparisons(C21,list21,C22,list22)
    '''
    return results
Beispiel #15
0
def plot_timings():
    
    #dir='/home/eg309/Data/LSC_limits/full_1M'
    dir='/tmp/full_1M'
    fs=['.npy','_2.npy','_3.npy','_4.npy','_5.npy']#,'_6.npy','_7.npy','_8.npy','_9.npy','_10.npy']
    
    T=[]
    for f in fs:
        fs1=dir+f
        T+=change_dtype(list(np.load(fs1)))
    #return T
    T=T[:100000]
    
    print len(T)    
    #dists=[4.,6.,8.,10.]
    dists=[8.]
    #pts=[3,6,12,18]
    pts=[12]
    #sub=10**5
    sub=10**3
    res={}
    for p in pts:
        print p
        res[p]={}
        for d in dists:
            print d
            res[p][d]={}
            res[p][d]['time']=[]
            res[p][d]['len']=[]
            step=0
            while step <= len(T):
                print step
                Td=[downsample(t,p) for t in T[0:step+sub]]
                t1=time()
                C=local_skeleton_clustering(Td,d)
                t2=time()
                res[p][d]['time'].append(t2-t1)
                res[p][d]['len'].append(len(C))       
                step=step+sub
    
    
    save_pickle('/tmp/res.pkl',res)
    print('Result saved in /tmp/res.pkl')
    #return res
    save_pickle('/tmp/res.pkl',res)
Beispiel #16
0
def generate_skeletons():

    img=nib.load(fbet)
    data=img.get_data()
    affine=img.get_affine()
    bvals=np.loadtxt(fbvals)
    bvecs=np.loadtxt(fbvecs).T
    t=time()
    gqs=GeneralizedQSampling(data,bvals,bvecs)
    print 'gqs time',time()-t,'s'
    for (i,sds) in enumerate(seeds):
        print i,sds
        t=time()
        eu=EuDX(gqs.qa(),gqs.ind(),seeds=sds,a_low=0.0239)
        T=[downsample(e,12) for e in eu]
        #np.save(dout+outs[i]+'.npy',np.array(T,dtype=np.object))
        C=local_skeleton_clustering(T,4.)
        save_pickle(dout+outs[i]+'.skl',C)
        print time()-t
        del T
    print outs
Beispiel #17
0
    def __init__(self,tracks,dist_thr=4.,pts=12):
        """ Highly efficient trajectory clustering 
        
        Parameters
        -----------
        tracks : sequence of (N,3) ... (M,3) arrays,
                    trajectories (or tractography or streamlines)
                    
        dist_thr : float, 
                    distance threshold in the space of the tracks
        pts : int, 
                number of points for simplifying the tracks 
                       
        Methods
        --------
        clustering() returns a dict holding with the clustering result
        virtuals() gives the virtuals (track centroids) of the clusters
        exemplars() gives the exemplars (track medoids) of the clusters        
        
        Citation
        ---------
        
        E.Garyfallidis, "Towards an accurate brain tractography", PhD thesis, 2012 
        
        """
        warn(DeprecationWarning("Class 'dipy.segment.quickbundles.QuickBundles'"
                                " is deprecated, use module "
                                "'dipy.segment.clustering.QuickBundles'"
                                " instead"))

        self.dist_thr = dist_thr
        self.pts = pts
        if pts!=None:                        
            self.tracksd=[downsample(track,self.pts) for track in tracks]
        else:
            self.tracksd=tracks                    
        self.clustering=local_skeleton_clustering(self.tracksd, self.dist_thr)
        self.virts=None
        self.exemps=None                
Beispiel #18
0
def show(T,A,IND,VERTS,scale):
    
    r=fvtk.ren()
    fvtk.clear(r)
    fvtk.add(r,fvtk.line(T,fvtk.red))
    fvtk.show(r)
    
    Td=[downsample(t,20) for t in T]
    C=local_skeleton_clustering(Td,3)
    fvtk.clear(r)
    lent=float(len(T))
    
    for c in C:
        color=np.random.rand(3)
        virtual=C[c]['hidden']/float(C[c]['N'])
        if length(virtual)> virtual_thr: 
            linewidth=100*len(C[c]['indices'])/lent
            if linewidth<1.:
                linewidth=1
            #fvtk.add(r,fvtk.line(virtual,color,linewidth=linewidth))
            #fvtk.add(r,fvtk.label(r,str(len(C[c]['indices'])),pos=virtual[0],scale=3,color=color ))
        #print C[c]['hidden'].shape
    
    print A.shape
    print IND.shape
    print VERTS.shape
    
    all,allo=fvtk.crossing(A,IND,VERTS,scale,True)
    colors=np.zeros((len(all),3))
    for (i,a) in enumerate(all):
        if allo[i][0]==0 and allo[i][1]==0 and allo[i][2]==1:
            pass
        else:            
            colors[i]=cm.boys2rgb(allo[i])
    
    fvtk.add(r,fvtk.line(all,colors))    
    fvtk.show(r)
Beispiel #19
0
    def __init__(self, tracks, dist_thr=4., pts=12):
        """ Highly efficient trajectory clustering [Garyfallidis12]_.

        Parameters
        ----------
        tracks : sequence of (N,3) ... (M,3) arrays
            trajectories (or tractography or streamlines)
        dist_thr : float
            distance threshold in the space of the tracks
        pts : int
            number of points for simplifying the tracks

        Methods
        -------
        clustering() returns a dict holding with the clustering result
        virtuals() gives the virtuals (track centroids) of the clusters
        exemplars() gives the exemplars (track medoids) of the clusters

        References
        ----------
        .. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
                            tractography simplification,
                            Frontiers in Neuroscience, vol 6, no 175, 2012.
        """
        warn(DeprecationWarning(deprecation_msg))

        self.dist_thr = dist_thr
        self.pts = pts
        if pts is not None:
            self.tracksd = [downsample(track, self.pts) for track in tracks]
        else:
            self.tracksd = tracks
        self.clustering = local_skeleton_clustering(self.tracksd,
                                                    self.dist_thr)
        self.virts = None
        self.exemps = None
T=[]
for s in streams:
    T.append(s[0])


r=fvtk.ren()
linea=fvtk.line(T,fvtk.red)
fvtk.add(r,linea)
fvtk.show(r)


#for more complicated visualizations use mayavi
#or the new fos when released

dT=[tm.downsample(t,10) for t in T]
C=td.local_skeleton_clustering(dT,d_thr=5)

ldT=[tm.length(t) for t in dT]
#average length
avg_ldT=sum(ldT)/len(dT)
print(avg_ldT)

"""
r=fvtk.ren()
#fvtk.clear(r)
colors=np.zeros((len(T),3))
for c in C:
    color=np.random.rand(1,3)
    for i in C[c]['indices']:
        colors[i]=color
fvtk.add(r,fvtk.line(T,colors,opacity=1))
"""
Downsample tracks to just 3 points:
"""

tracks = [tm.downsample(t, 3) for t in T]
"""
Delete unnecessary data:
"""

del streams, hdr
"""
Perform Local Skeleton Clustering (LSC) with a 5mm threshold:
"""

now = time.clock()
C = td.local_skeleton_clustering(tracks, d_thr=5)
print('Done in %.2f s' % (time.clock() - now, ))
"""
Reduce the number of points for faster visualization using the ``approx_polygon_track`` algorithm which retains points depending on how much they are need to define the shape of the track:
"""

T = [td.approx_polygon_track(t) for t in T]
"""
Show the initial *Fornix* dataset:
"""

r = fvtk.ren()
fvtk.add(r, fvtk.line(T, fvtk.white, opacity=1))
#fvtk.show(r)
fvtk.record(r, n_frames=1, out_path='fornix_initial', size=(600, 600))
"""
Beispiel #22
0
r=fvtk.ren()

r.SetBackground(1,1,1.)

fvtk.add(r,fvtk.line(bundle,fvtk.red,linewidth=3))
fvtk.add(r,fvtk.line(bundle3,fvtk.green,linewidth=3))
fvtk.add(r,fvtk.line(bundle4,fvtk.blue,linewidth=3))
fvtk.show(r,size=(800,800))


from LSC_limits import bring_virtuals

Td=[downsample(t,80) for t in bun3]

C8=local_skeleton_clustering(Td,8)
vs,ls,tot=bring_virtuals(C8)
vs2=shift(vs,np.array([0,0,0],'f4'))

"""
wi2=Window(bgcolor=(1.,1.,1.,1.),width=1000,height=1000)
wi3=Window(bgcolor=(1.,1.,1.,1.),width=1000,height=1000)
w2=World()
w3=World()
wi2.attach(w2)
wi3.attach(w3)
w2.add(line(vs2,np.array([[1,0,1,1],[0,1,0,1]],'f4')))
"""

fvtk.clear(r)
fvtk.add(r,fvtk.line(vs2,np.array([[1,0,1],[0,1,0]],'f4'),linewidth=3))
Beispiel #23
0
def test_LSCv2():
    xyz1 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]], dtype='float32')
    xyz2 = np.array([[1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz3 = np.array([[1.1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz4 = np.array([[1, 0, 0], [2.1, 0, 0], [3, 0, 0]], dtype='float32')

    xyz5 = np.array([[100, 0, 0], [200, 0, 0], [300, 0, 0]], dtype='float32')
    xyz6 = np.array([[0, 20, 0], [0, 40, 0], [300, 50, 0]], dtype='float32')

    T = [xyz1, xyz2, xyz3, xyz4, xyz5, xyz6]
    pf.local_skeleton_clustering(T, 0.2)

    # print C
    # print len(C)

    pf.local_skeleton_clustering_3pts(T, 0.2)

    # print C2
    # print len(C2)

    # """

    for i in range(40):
        xyz = np.random.rand(3, 3).astype('f4')
        T.append(xyz)

    from time import time
    t1 = time()
    C3 = pf.local_skeleton_clustering(T, .5)
    t2 = time()
    print(t2-t1)
    print(len(C3))

    t1 = time()
    C4 = pf.local_skeleton_clustering_3pts(T, .5)
    t2 = time()
    print(t2-t1)
    print(len(C4))

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden']-C4[c]['hidden']), 0)

    T2 = []
    for i in range(10**4):
        xyz = np.random.rand(10, 3).astype('f4')
        T2.append(xyz)
    t1 = time()
    C5 = pf.local_skeleton_clustering(T2, .5)
    t2 = time()
    print(t2-t1)
    print(len(C5))

    from dipy.data import get_fnames
    from nibabel import trackvis as tv

    streams, hdr = tv.read(get_fnames('fornix'))
    T3 = [tm.downsample(s[0], 6) for s in streams]

    print('lenT3', len(T3))

    C = pf.local_skeleton_clustering(T3, 10.)

    print('lenC', len(C))

    """
Beispiel #24
0
def test_LSCv2():
    xyz1 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]], dtype='float32')
    xyz2 = np.array([[1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz3 = np.array([[1.1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz4 = np.array([[1, 0, 0], [2.1, 0, 0], [3, 0, 0]], dtype='float32')

    xyz5 = np.array([[100, 0, 0], [200, 0, 0], [300, 0, 0]], dtype='float32')
    xyz6 = np.array([[0, 20, 0], [0, 40, 0], [300, 50, 0]], dtype='float32')

    T = [xyz1, xyz2, xyz3, xyz4, xyz5, xyz6]
    C = pf.local_skeleton_clustering(T, 0.2)

    # print C
    # print len(C)

    C2 = pf.local_skeleton_clustering_3pts(T, 0.2)

    # print C2
    # print len(C2)

    # """

    for i in range(40):
        xyz = np.random.rand(3, 3).astype('f4')
        T.append(xyz)

    from time import time
    t1 = time()
    C3 = pf.local_skeleton_clustering(T, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C3))

    t1 = time()
    C4 = pf.local_skeleton_clustering_3pts(T, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C4))

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden'] - C4[c]['hidden']), 0)

    T2 = []
    for i in range(10**4):
        xyz = np.random.rand(10, 3).astype('f4')
        T2.append(xyz)
    t1 = time()
    C5 = pf.local_skeleton_clustering(T2, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C5))

    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import window, actor
    except ImportError as e:
        raise nose.plugins.skip.SkipTest('Fails to import dipy.viz due to %s' %
                                         str(e))

    streams, hdr = tv.read(get_data('fornix'))
    T3 = [tm.downsample(s[0], 6) for s in streams]

    print('lenT3', len(T3))

    C = pf.local_skeleton_clustering(T3, 10.)

    print('lenC', len(C))
    """
Beispiel #25
0
    
    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError, e:
        raise nose.plugins.skip.SkipTest(
            'Fails to import dipy.viz due to %s' % str(e))
    
    streams,hdr=tv.read(get_data('fornix'))
    T3=[tm.downsample(s[0],6) for s in streams]    
    

    print 'lenT3',len(T3)
    
    C=pf.local_skeleton_clustering(T3,10.)
    
    print 'lenC',len(C)
    
    """
    
    r=fvtk.ren()
    colors=np.zeros((len(C),3))
    for c in C:
        color=np.random.rand(3)
        for i in C[c]['indices']:
            fvtk.add(r,fvtk.line(T3[i],color))
        colors[c]=color
    fvtk.show(r)
    fvtk.clear(r)
    skeleton=[]
"""

tracks=[tm.downsample(t,3) for t in T]

"""
Delete unnecessary data:
"""

del streams,hdr

"""
Perform Local Skeleton Clustering (LSC) with a 5mm threshold:
"""

now=time.clock()
C=td.local_skeleton_clustering(tracks,d_thr=5)
print('Done in %.2f s'  % (time.clock()-now,))


"""
Reduce the number of points for faster visualization using the ``approx_polygon_track`` algorithm which retains points depending on how much they are need to define the shape of the track:
"""

T=[td.approx_polygon_track(t) for t in T]

"""
Show the initial *Fornix* dataset:
"""

r=fvtk.ren()
fvtk.add(r,fvtk.line(T,fvtk.white,opacity=1))
Beispiel #27
0
    print len(C5)

    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError, e:
        raise nose.plugins.skip.SkipTest('Fails to import dipy.viz due to %s' %
                                         str(e))

    streams, hdr = tv.read(get_data('fornix'))
    T3 = [tm.downsample(s[0], 6) for s in streams]

    print 'lenT3', len(T3)

    C = pf.local_skeleton_clustering(T3, 10.)

    print 'lenC', len(C)
    """
    
    r=fvtk.ren()
    colors=np.zeros((len(C),3))
    for c in C:
        color=np.random.rand(3)
        for i in C[c]['indices']:
            fvtk.add(r,fvtk.line(T3[i],color))
        colors[c]=color
    fvtk.show(r)
    fvtk.clear(r)
    skeleton=[]