def measure_overlap(static_center, moving_center, show=True, vol_size=(256, 256, 256)):
    static_center = [downsample(s, 100) for s in static_center]
    moving_center = [downsample(s, 100) for s in moving_center]
    vol = np.zeros(vol_size)

    ci, cj, ck = vol_size[0] / 2, vol_size[1] / 2, vol_size[2] / 2

    spts = np.concatenate(static_center, axis=0)
    spts = np.round(spts).astype(np.int) + np.array([ci, cj, ck])

    mpts = np.concatenate(moving_center, axis=0)
    mpts = np.round(mpts).astype(np.int) + np.array([ci, cj, ck])

    for index in spts:
        i, j, k = index
        vol[i, j, k] = 1

    vol2 = np.zeros(vol_size)
    for index in mpts:
        i, j, k = index
        vol2[i, j, k] = 1

    vol_and = np.logical_and(vol, vol2)
    overlap = np.sum(vol_and) / float(np.sum(vol2))
    if show:
        viz_vol(vol_and)
    return 100 * overlap
Exemplo n.º 2
0
def skeletonize(fdpy,flsc,points=3):

    dpr=Dpy(fdpy,'r')    
    T=dpr.read_tracks()
    dpr.close()    
    print len(T)
    Td=[downsample(t,points) for t in T]
    C=local_skeleton_clustering(Td,d_thr=10.,points=points)    
    #Tobject=np.array(T,dtype=np.object)
    

    #'''
    #r=fvtk.ren()    
    skeleton=[]    
    for c in C:
        #color=np.random.rand(3)
        if C[c]['N']>0:
            Ttmp=[]
            for i in C[c]['indices']:
                Ttmp.append(T[i])
            si,s=most_similar_track_mam(Ttmp,'avg')
            print si,C[c]['N']    
            C[c]['most']=Ttmp[si]            
            #fvtk.add(r,fvtk.line(Ttmp[si],color))            
    print len(skeleton)
    #r=fos.ren()
    #fos.add(r,fos.line(skeleton,color))    
    #fos.add(r,fos.line(T,fos.red))    
    #fvtk.show(r)
    #'''
    
    save_pickle(flsc,C)
Exemplo n.º 3
0
    def aggregate(self, track_dataset):
        """
        An example implementation of the k-means algorithm implemented in 
        DSI Studio.  This function is automatically applied to all 
        TrackDatasets returned from a query.
  
        Parameters:
        -----------
        track_dataset:dsi2.streamlines.track_dataset.TrackDataset
        """
        # extract the streamline data
        tracks = track_dataset.tracks

        # Make a matrix of downsampled streamlines
        points = np.array([ downsample(trk, 3).flatten() \
                                    for trk in tracks])

        # Calculate the length of each streamline
        lengths = np.array([len(trk) for trk in tracks]).reshape(-1, 1)

        # Concatenate the points and the track lengths
        features = np.hstack((points, lengths))

        # Initialize the k-means algorithm
        kmeans = MiniBatchKMeans(n_clusters=self.k, compute_labels=True)
        kmeans.fit(features)

        # Return the labels
        return kmeans.labels_
Exemplo n.º 4
0
 def __init__(self,tracks,dist_thr=4.,pts=12):
     """ Highly efficient trajectory clustering 
     
     Parameters
     -----------
     tracks : sequence of (N,3) ... (M,3) arrays,
                 trajectories (or tractography or streamlines)
                 
     dist_thr : float, 
                 distance threshold in the space of the tracks
     pts : int, 
             number of points for simplifying the tracks 
                    
     Methods
     --------
     clustering() returns a dict holding with the clustering result
     virtuals() gives the virtuals (track centroids) of the clusters
     exemplars() gives the exemplars (track medoids) of the clusters        
     
     Citation
     ---------
     
     E.Garyfallidis, "Towards an accurate brain tractography", PhD thesis, 2011 
     
     """        
            
     if pts!=None:                        
         self.tracksd=[downsample(track,pts) for track in tracks]
     else:
         self.tracksd=tracks                    
     self.clustering=local_skeleton_clustering(self.tracksd,dist_thr)
     self.virts=None
     self.exemps=None                
Exemplo n.º 5
0
   def aggregate(self, track_dataset):
       """
       An example implementation of the k-means algorithm implemented in 
       DSI Studio.  This function is automatically applied to all 
       TrackDatasets returned from a query.
 
       Parameters:
       -----------
       track_dataset:dsi2.streamlines.track_dataset.TrackDataset
       """
       # extract the streamline data
       tracks = track_dataset.tracks
       
       # Make a matrix of downsampled streamlines
       points = np.array([ downsample(trk, 3).flatten() \
                                   for trk in tracks])
 
       # Calculate the length of each streamline
       lengths = np.array([len(trk) for trk in tracks]).reshape(-1,1)
       
       # Concatenate the points and the track lengths
       features = np.hstack((points, lengths))
       
       # Initialize the k-means algorithm
       kmeans = MiniBatchKMeans(n_clusters=self.k, compute_labels=True)
       kmeans.fit(features)
 
       # Return the labels
       return kmeans.labels_      
Exemplo n.º 6
0
 def __init__(self, tracks, dist_thr=4., pts=12):
     """ Highly efficient trajectory clustering 
     
     Parameters
     -----------
     tracks : sequence of (N,3) ... (M,3) arrays,
                 trajectories (or tractography or streamlines)
                 
     dist_thr : float, 
                 distance threshold in the space of the tracks
     pts : int, 
             number of points for simplifying the tracks 
                    
     Methods
     --------
     clustering() returns a dict holding with the clustering result
     virtuals() gives the virtuals (track centroids) of the clusters
     exemplars() gives the exemplars (track medoids) of the clusters        
     
     Citation
     ---------
     
     E.Garyfallidis, "Towards an accurate brain tractography", PhD thesis, 2011 
     
     """
     self.dist_thr = dist_thr
     self.pts = pts
     if pts != None:
         self.tracksd = [downsample(track, self.pts) for track in tracks]
     else:
         self.tracksd = tracks
     self.clustering = local_skeleton_clustering(self.tracksd,
                                                 self.dist_thr)
     self.virts = None
     self.exemps = None
Exemplo n.º 7
0
 def __init__(self,tracks,num_clusters=10,pts = 12):
     """ Highly efficient trajectory clustering 
     
     Parameters
     -----------
     tracks : sequence of (N,3) ... (M,3) arrays,
                 trajectories (or tractography or streamlines)
                 
     num_clusters : int, 
                      number of clusters of the tracks        
                    
     Methods
     --------
     clustering() returns a dict holding with the clustering result
     virtuals() gives the virtuals (track centroids) of the clusters
     exemplars() gives the exemplars (track medoids) of the clusters        
     
     Citation
     ---------
     
     E.Garyfallidis, "Towards an accurate brain tractography", PhD thesis, 2012 
     
     """
     self.num_clusters = num_clusters
     self.pts = pts
     if pts!=None:                        
         self.tracksd=[downsample(track,self.pts) for track in tracks]
     else:
         self.tracksd=tracks                    
     self.clustering=kmeans(self.tracksd, self.num_clusters)
     self.virts=None
     self.exemps=None                
Exemplo n.º 8
0
def resample_streamlines(streamlines, num_points=0, arc_length=False):
    """
    Resample streamlines using number of points per streamline

    Parameters
    ----------
    streamlines: list
        List of list of 3D points.
    num_points: int
        Number of points per streamline in the output.
    arc_length: bool
        Whether to downsample using arc length parametrization.

    Return
    ------
    resampled_streamlines: list
        List of resampled streamlines.
    """
    resampled_streamlines = []
    for streamline in streamlines:
        if arc_length:
            line = set_number_of_points(streamline, num_points)
        else:
            line = downsample(streamline, num_points)
        resampled_streamlines.append(line)

    return resampled_streamlines
Exemplo n.º 9
0
    def freeze(self):
        print(
            "Freezing current expanded real tracks, then doing QB on them, then restarting."
        )
        print("Selected virtuals: %s" % self.selected)
        tracks_frozen = []
        tracks_frozen_ids = []
        for tid in self.selected:
            print tid
            part_tracks = self.qb.label2tracks(self.tracks, tid)
            part_tracks_ids = self.qb.label2tracksids(tid)
            print("virtual %s represents %s tracks." % (tid, len(part_tracks)))
            tracks_frozen += part_tracks
            tracks_frozen_ids += part_tracks_ids
        print "frozen tracks size:", len(tracks_frozen)
        print "Computing quick bundles...",
        self.unselect_track('all')
        self.tracks = tracks_frozen
        self.tracks_ids = self.tracks_ids[
            tracks_frozen_ids]  # range(len(self.tracks))

        root = Tkinter.Tk()
        root.wm_title('QuickBundles threshold')
        ts = ThresholdSelector(root, default_value=self.qb.dist_thr / 2.0)
        root.wait_window()

        #print "Threshold value ",ts.value
        #self.qb = QuickBundles(self.tracks, dist_thr=qb.dist_thr/2., pts=self.qb.pts)
        self.qb = QuickBundles(self.tracks, dist_thr=ts.value, pts=self.qb.pts)
        #self.qb.dist_thr = qb.dist_thr/2.
        self.qb.dist_thr = ts.value
        if self.reps == 'virtuals':
            self.virtuals = qb.virtuals()
        if self.reps == 'exemplars':
            self.virtuals, self.ex_ids = self.qb.exemplars()
        print len(self.virtuals), 'virtuals'
        self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count = self.compute_buffers(
            self.virtuals, self.virtuals_alpha)
        #compute buffers
        self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.compute_buffers(
            self.tracks, self.tracks_alpha)
        # self.unselect_track('all')
        self.selected = []
        self.old_color = {}
        self.expand = False
        self.history.append([
            self.qb, self.tracks, self.tracks_ids, self.virtuals_buffer,
            self.virtuals_colors, self.virtuals_first, self.virtuals_count,
            self.tracks_buffer, self.tracks_colors, self.tracks_first,
            self.tracks_count
        ])
        if self.vol_shape is not None:
            print("Shifting!")
            self.virtuals_shifted = [
                downsample(t + np.array(self.vol_shape) / 2., 30)
                for t in self.virtuals
            ]
        else:
            self.virtuals_shifted = None
Exemplo n.º 10
0
def test_downsample_deprecated():
    with warnings.catch_warnings(record=True) as cw:
        warnings.simplefilter("always", DeprecationWarning)
        streamline = [np.array([[0, 0, 0], [1, 1, 1]])]

        streamline_12 = tm.downsample(streamline, 12)
        npt.assert_equal(len(streamline_12[0]), 12)
        npt.assert_(issubclass(cw[0].category, DeprecationWarning))
Exemplo n.º 11
0
    def __init__(self, name,qb, tracks, reps='exemplars',colors=None, vol_shape=None, virtuals_line_width=5.0, tracks_line_width=2.0, virtuals_alpha=1.0, tracks_alpha=0.6, affine=None, verbose=False):
        """TrackLabeler is meant to explore and select subsets of the
        tracks. The exploration occurs through QuickBundles (qb) in
        order to simplify the scene.
        """
        super(StreamlineLabeler, self).__init__(name)

        if affine is None: self.affine = np.eye(4, dtype = np.float32)
        else: self.affine = affine      
         
        self.mouse_x=None
        self.mouse_y=None
        self.cache = {}
        self.qb = qb
        self.reps = reps
        #virtual tracks
        if self.reps=='virtuals':
            self.virtuals=qb.virtuals()
        if self.reps=='exemplars':
            self.virtuals,self.ex_ids = qb.exemplars()
        self.virtuals_alpha = virtuals_alpha
        self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count = self.compute_buffers(self.virtuals, self.virtuals_alpha)
        #full tractography (downsampled at 12 pts per track)
        self.tracks = tracks
        self.tracks_alpha = tracks_alpha
        self.tracks_ids = np.arange(len(self.tracks), dtype=np.int)
        self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.compute_buffers(self.tracks, self.tracks_alpha)
        #calculate boundary box for entire tractography
        self.min = np.min(self.tracks_buffer,axis=0)
        self.max = np.max(self.tracks_buffer,axis=0)      
        self.vertices=self.tracks_buffer
        #coord1 = np.array([self.tracks_buffer[:,0].min(),self.tracks_buffer[:,1].min(),self.tracks_buffer[:,2].min()], dtype = 'f4')        
        #coord2 = np.array([self.tracks_buffer[:,0].max(),self.tracks_buffer[:,1].max(),self.tracks_buffer[:,2].max()], dtype = 'f4')
        #self.make_aabb((coord1,coord2),0)
        #show size of tractography buffer
        print('MBytes %f' % (self.tracks_buffer.nbytes/2.**20,))
        self.position = (0,0,0)
        #buffer for selected virtual tracks
        self.selected = []
        self.virtuals_line_width = virtuals_line_width
        self.tracks_line_width = tracks_line_width
        self.old_color = {}
        self.hide_virtuals = False
        self.expand = False
        self.verbose = verbose
        self.tracks_visualized_first = np.array([], dtype='i4')
        self.tracks_visualized_count = np.array([], dtype='i4')
        self.history = [[self.qb, self.tracks, self.tracks_ids, self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count, self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count]]
        #shifting of track is necessary for dipy.tracking.vox2track.track_counts
        #we also upsample using 30 points in order to increase the accuracy of track counts
        self.vol_shape = vol_shape
        if self.vol_shape !=None:
            #self.tracks_shifted =[t+np.array(vol_shape)/2. for t in self.tracks]
            self.virtuals_shifted =[downsample(t+np.array(self.vol_shape)/2.,30) for t in self.virtuals]

        else:
            #self.tracks_shifted=None
            self.virtuals_shifted=None
Exemplo n.º 12
0
 def freeze(self):
     print("Freezing current expanded real tracks, then doing QB on them, then restarting.")
     print("Selected virtuals: %s" % self.selected)
     tracks_frozen = []
     tracks_frozen_ids = []
     for tid in self.selected:
         print tid
         part_tracks = self.qb.label2tracks(self.tracks, tid)
         part_tracks_ids = self.qb.label2tracksids(tid)
         print("virtual %s represents %s tracks." % (tid, len(part_tracks)))
         tracks_frozen += part_tracks
         tracks_frozen_ids += part_tracks_ids
     print "frozen tracks size:", len(tracks_frozen)
     print "Computing quick bundles...",
     self.unselect_track('all')
     self.tracks = tracks_frozen
     self.tracks_ids = self.tracks_ids[tracks_frozen_ids] # range(len(self.tracks))
     
     root = Tkinter.Tk()
     root.wm_title('QuickBundles threshold')
     ts = ThresholdSelector(root, default_value=self.qb.dist_thr/2.0)
     root.wait_window()
     
     #print "Threshold value ",ts.value
     #self.qb = QuickBundles(self.tracks, dist_thr=qb.dist_thr/2., pts=self.qb.pts)
     self.qb = QuickBundles(self.tracks, dist_thr=ts.value, pts=self.qb.pts)
     #self.qb.dist_thr = qb.dist_thr/2.
     self.qb.dist_thr = ts.value
     if self.reps=='virtuals':
         self.virtuals=qb.virtuals()
     if self.reps=='exemplars':
         self.virtuals,self.ex_ids = self.qb.exemplars()
     print len(self.virtuals), 'virtuals'
     self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count = self.compute_buffers(self.virtuals, self.virtuals_alpha)
     #compute buffers
     self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.compute_buffers(self.tracks, self.tracks_alpha)
     # self.unselect_track('all')
     self.selected = []
     self.old_color = {}
     self.expand = False
     self.history.append([self.qb, 
                         self.tracks, 
                         self.tracks_ids, 
                         self.virtuals_buffer, 
                         self.virtuals_colors, 
                         self.virtuals_first, 
                         self.virtuals_count, 
                         self.tracks_buffer, 
                         self.tracks_colors, 
                         self.tracks_first, 
                         self.tracks_count])
     if self.vol_shape is not None:
         print("Shifting!")
         self.virtuals_shifted = [downsample(t + np.array(self.vol_shape) / 2., 30) for t in self.virtuals]
     else:
         self.virtuals_shifted = None
Exemplo n.º 13
0
def streamline_based_registration(source_tractography_streamlines,
                                  target_tractography_streamlines,
                                  subject_pair):

    intersection_voxel_list = []
    target_voxel_list = []
    n_points = 20
    srr = StreamlineLinearRegistration()
    SAr = [downsample(i, n_points) for i in source_tractography]
    SBr = [downsample(i, n_points) for i in target_tractography]
    srm = srr.optimize(static=SBr, moving=SAr)
    transformed_tractography = srm.transform(source_tractography)

    print len(transformed_tractography)
    temp_index = 0
    for i in range(len(source_tractography_streamlines)):

        voxel_and, voxel_target = voxel_measure(
            transformed_tractography[
                temp_index:temp_index + source_tractography_streamlines[
                    source + '_' + subject_tracts[str(subject_pair)][i]][1]],
            target_tractography_streamlines[
                target + '_' + subject_tracts[str(subject_pair)][i]][0])

        temp_index = temp_index + source_tractography_streamlines[
            source + '_' + subject_tracts[str(subject_pair)][i]][1]

        intersection_voxel_list.append(voxel_and)
        target_voxel_list.append(voxel_target)

    total_intersection_voxel_list = np.sum(np.array(intersection_voxel_list))
    total_target_voxel_list = np.sum(np.array(target_voxel_list))

    print "Number of voxel per tract"
    print intersection_voxel_list, target_voxel_list

    print "Number of voxel"
    print total_intersection_voxel_list, total_target_voxel_list
    TDA_all_voxel_registration = np.divide(total_intersection_voxel_list,
                                           total_target_voxel_list)

    print "Modified-TDR-for-all"
    print TDA_all_voxel_registration
Exemplo n.º 14
0
def show_zero_level(r,bundle,dist):

    T=[downsample(b,12) for b in bundle]
    C=local_skeleton_clustering(T,dist)
    vs=[]
    colors=np.zeros((len(T),3))
    for c in C:
        vs.append(C[c]['hidden']/C[c]['N'])
        color=np.random.rand(3,)
        #fvtk.add(r,fvtk.line(vs,color,linewidth=4.5))
        for i in C[c]['indices']:
            colors[i]=color
            fvtk.label(r,text=str(i),pos=(bundle[i][-1]),scale=(.5,.5,.5),color=(color[0],color[1],color[2]))    
    fvtk.add(r,fvtk.line(T,colors,linewidth=2.))    
Exemplo n.º 15
0
def half_split_comparisons():

    res = {}

    for id in range(len(tractography_sizes)):
        res[id] = {}
        first, second = split_halves(id)
        res[id]["lengths"] = [len(first), len(second)]
        print len(first), len(second)
        first_qb = QuickBundles(first, qb_threshold, downsampling)
        n_clus = first_qb.total_clusters
        res[id]["nclusters"] = n_clus
        print "QB for first half has", n_clus, "clusters"
        second_down = [downsample(s, downsampling) for s in second]
        matched_random = get_random_streamlines(first_qb.downsampled_tracks(), n_clus)
        neighbours_first = count_close_tracks(first_qb.virtuals(), first_qb.downsampled_tracks(), adjacency_threshold)
        neighbours_second = count_close_tracks(first_qb.virtuals(), second_down, adjacency_threshold)
        neighbours_random = count_close_tracks(matched_random, second_down, adjacency_threshold)

        maxclose = np.int(np.max(np.hstack((neighbours_first, neighbours_second, neighbours_random))))

        # The numbers of tracks 0, 1, 2, ... 'close' subset tracks
        counts = np.array(
            [
                (
                    np.int(n),
                    len(find(neighbours_first == n)),
                    len(find(neighbours_second == n)),
                    len(find(neighbours_random == n)),
                )
                for n in range(maxclose + 1)
            ],
            dtype="f",
        )
        totals = np.sum(counts[:, 1:], axis=0)
        res[id]["totals"] = totals
        res[id]["counts"] = counts
        # print totals
        # print counts
        missed_fractions = counts[0, 1:] / totals
        res[id]["missed_fractions"] = missed_fractions
        means = np.sum(counts[:, 1:] * counts[:, [0, 0, 0]], axis=0) / totals
        # print means
        res[id]["means"] = means
        # print res
    return res
Exemplo n.º 16
0
def plot_timings():
    
    #dir='/home/eg309/Data/LSC_limits/full_1M'
    dir='/tmp/full_1M'
    fs=['.npy','_2.npy','_3.npy','_4.npy','_5.npy']#,'_6.npy','_7.npy','_8.npy','_9.npy','_10.npy']
    
    T=[]
    for f in fs:
        fs1=dir+f
        T+=change_dtype(list(np.load(fs1)))
    #return T
    T=T[:100000]
    
    print len(T)    
    #dists=[4.,6.,8.,10.]
    dists=[8.]
    #pts=[3,6,12,18]
    pts=[12]
    #sub=10**5
    sub=10**3
    res={}
    for p in pts:
        print p
        res[p]={}
        for d in dists:
            print d
            res[p][d]={}
            res[p][d]['time']=[]
            res[p][d]['len']=[]
            step=0
            while step <= len(T):
                print step
                Td=[downsample(t,p) for t in T[0:step+sub]]
                t1=time()
                C=local_skeleton_clustering(Td,d)
                t2=time()
                res[p][d]['time'].append(t2-t1)
                res[p][d]['len'].append(len(C))       
                step=step+sub
    
    
    save_pickle('/tmp/res.pkl',res)
    print('Result saved in /tmp/res.pkl')
    #return res
    save_pickle('/tmp/res.pkl',res)
Exemplo n.º 17
0
def see_tracks(fdpy,N=2000):
    
    
    dpr=Dpy(fdpy,'r')
    #T=dpr.read_tracksi(range(N))
    T=dpr.read_tracks()
    dpr.close()    
    
    T=[downsample(t,5) for t in T]    

    r=fvtk.ren()
    colors=np.ones((len(T),3)).astype('f4')
    for (i,c) in enumerate(T):        
        orient=c[0]-c[-1]
        orient=np.abs(orient/np.linalg.norm(orient))
        colors[i,:3]=orient    
    fvtk.add(r,fvtk.line(T,colors,opacity=0.5))
    #fos.add(r,fos.sphere((0,0,0),10))
    fvtk.show(r)
Exemplo n.º 18
0
def generate_random_tracks(rand_no):

    img=nib.load(fbet)
    data=img.get_data()
    affine=img.get_affine()
    bvals=np.loadtxt(fbvals)
    bvecs=np.loadtxt(fbvecs).T
    t=time()
    gqs=GeneralizedQSampling(data,bvals,bvecs)
    print 'gqs time',time()-t,'s'
    for (i,sds) in enumerate(seeds):
        print i,sds
        t=time()
        eu=EuDX(gqs.qa(),gqs.ind(),seeds=sds,a_low=0.0239)
        T=[downsample(e,12) for e in eu]        
        np.save('/tmp/random_T.npy',np.array(T,dtype=np.object))
        ###################
        
        print time()-t
        del T
    print outs
Exemplo n.º 19
0
def generate_skeletons():

    img=nib.load(fbet)
    data=img.get_data()
    affine=img.get_affine()
    bvals=np.loadtxt(fbvals)
    bvecs=np.loadtxt(fbvecs).T
    t=time()
    gqs=GeneralizedQSampling(data,bvals,bvecs)
    print 'gqs time',time()-t,'s'
    for (i,sds) in enumerate(seeds):
        print i,sds
        t=time()
        eu=EuDX(gqs.qa(),gqs.ind(),seeds=sds,a_low=0.0239)
        T=[downsample(e,12) for e in eu]
        #np.save(dout+outs[i]+'.npy',np.array(T,dtype=np.object))
        C=local_skeleton_clustering(T,4.)
        save_pickle(dout+outs[i]+'.skl',C)
        print time()-t
        del T
    print outs
Exemplo n.º 20
0
    def __init__(self,tracks,dist_thr=4.,pts=12):
        """ Highly efficient trajectory clustering 
        
        Parameters
        -----------
        tracks : sequence of (N,3) ... (M,3) arrays,
                    trajectories (or tractography or streamlines)
                    
        dist_thr : float, 
                    distance threshold in the space of the tracks
        pts : int, 
                number of points for simplifying the tracks 
                       
        Methods
        --------
        clustering() returns a dict holding with the clustering result
        virtuals() gives the virtuals (track centroids) of the clusters
        exemplars() gives the exemplars (track medoids) of the clusters        
        
        Citation
        ---------
        
        E.Garyfallidis, "Towards an accurate brain tractography", PhD thesis, 2012 
        
        """
        warn(DeprecationWarning("Class 'dipy.segment.quickbundles.QuickBundles'"
                                " is deprecated, use module "
                                "'dipy.segment.clustering.QuickBundles'"
                                " instead"))

        self.dist_thr = dist_thr
        self.pts = pts
        if pts!=None:                        
            self.tracksd=[downsample(track,self.pts) for track in tracks]
        else:
            self.tracksd=tracks                    
        self.clustering=local_skeleton_clustering(self.tracksd, self.dist_thr)
        self.virts=None
        self.exemps=None                
Exemplo n.º 21
0
def show(T,A,IND,VERTS,scale):
    
    r=fvtk.ren()
    fvtk.clear(r)
    fvtk.add(r,fvtk.line(T,fvtk.red))
    fvtk.show(r)
    
    Td=[downsample(t,20) for t in T]
    C=local_skeleton_clustering(Td,3)
    fvtk.clear(r)
    lent=float(len(T))
    
    for c in C:
        color=np.random.rand(3)
        virtual=C[c]['hidden']/float(C[c]['N'])
        if length(virtual)> virtual_thr: 
            linewidth=100*len(C[c]['indices'])/lent
            if linewidth<1.:
                linewidth=1
            #fvtk.add(r,fvtk.line(virtual,color,linewidth=linewidth))
            #fvtk.add(r,fvtk.label(r,str(len(C[c]['indices'])),pos=virtual[0],scale=3,color=color ))
        #print C[c]['hidden'].shape
    
    print A.shape
    print IND.shape
    print VERTS.shape
    
    all,allo=fvtk.crossing(A,IND,VERTS,scale,True)
    colors=np.zeros((len(all),3))
    for (i,a) in enumerate(all):
        if allo[i][0]==0 and allo[i][1]==0 and allo[i][2]==1:
            pass
        else:            
            colors[i]=cm.boys2rgb(allo[i])
    
    fvtk.add(r,fvtk.line(all,colors))    
    fvtk.show(r)
Exemplo n.º 22
0
    def __init__(self, tracks, dist_thr=4., pts=12):
        """ Highly efficient trajectory clustering [Garyfallidis12]_.

        Parameters
        ----------
        tracks : sequence of (N,3) ... (M,3) arrays
            trajectories (or tractography or streamlines)
        dist_thr : float
            distance threshold in the space of the tracks
        pts : int
            number of points for simplifying the tracks

        Methods
        -------
        clustering() returns a dict holding with the clustering result
        virtuals() gives the virtuals (track centroids) of the clusters
        exemplars() gives the exemplars (track medoids) of the clusters

        References
        ----------
        .. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
                            tractography simplification,
                            Frontiers in Neuroscience, vol 6, no 175, 2012.
        """
        warn(DeprecationWarning(deprecation_msg))

        self.dist_thr = dist_thr
        self.pts = pts
        if pts is not None:
            self.tracksd = [downsample(track, self.pts) for track in tracks]
        else:
            self.tracksd = tracks
        self.clustering = local_skeleton_clustering(self.tracksd,
                                                    self.dist_thr)
        self.virts = None
        self.exemps = None
Exemplo n.º 23
0
S = np.dot(Rot, S.T).T + np.array([1, 0, 0])
print "Dummy translated"
print invariant_angles(S)

from dipy.viz import fvtk

r = fvtk.ren()
fvtk.add(r, fvtk.line(Sinit, fvtk.red))
fvtk.add(r, fvtk.line(S, fvtk.green))
fvtk.add(r, fvtk.axes())
fvtk.show(r)

# helix
theta = 2 * np.pi * np.linspace(0, 2, 100)
x = np.cos(theta)
y = np.sin(theta)
z = theta / (2 * np.pi)
Shel = np.vstack((x, y, z)).T

from dipy.tracking.metrics import downsample

Shel = downsample(Shel, 12)

print "Helix standard"
print invariant_angles(Shel)
Shel2 = np.dot(Rot, Shel.T).T

print "Helix translated"
print invariant_angles(Shel2)
Exemplo n.º 24
0
def subsample_streamlines(streamlines,
                          min_length=0.,
                          max_length=0.,
                          max_streamlines=0,
                          num_points=0,
                          arc_length=False,
                          rng=None):
    """
    Parameters
    ----------
    streamlines: list
        List of list of 3D points.
    min_length: float
        Minimum length of streamlines.
    max_length: float
        Maximum length of streamlines.
    max_streamlines: int
        Maximum number of streamlines to output.
    num_points: int
        Number of points per streamline in the output.
    arc_length: bool
        Whether to downsample using arc length parametrization.
    rng: RandomState object
        Random number generator to use for shuffling the data.
        By default, a constant seed is used.

    Return
    ------
    average: list
        List of subsampled streamlines.
    """

    if rng is None:
        rng = np.random.RandomState(1234)

    num_streamlines = len(streamlines)
    if max_streamlines <= 0:
        max_streamlines = num_streamlines

    lengths = np.zeros(num_streamlines)
    for i in np.arange(num_streamlines):
        lengths[i] = dipy.tracking.metrics.length(streamlines[i])

    ind = range(0, num_streamlines)
    rng.shuffle(ind)
    results = []

    while len(ind) > 0 and len(results) < max_streamlines:
        i = ind.pop()
        if (lengths[i] >= min_length
                and (max_length <= 0. or lengths[i] <= max_length)):
            if num_points:
                if arc_length:
                    line = set_number_of_points(streamlines[i], num_points)
                else:
                    line = downsample(streamlines[i], num_points)
                results.append(line)
            else:
                results.append(streamlines[i])

    return results
Exemplo n.º 25
0
print len(bun3)
"""

r=fvtk.ren()

r.SetBackground(1,1,1.)

fvtk.add(r,fvtk.line(bundle,fvtk.red,linewidth=3))
fvtk.add(r,fvtk.line(bundle3,fvtk.green,linewidth=3))
fvtk.add(r,fvtk.line(bundle4,fvtk.blue,linewidth=3))
fvtk.show(r,size=(800,800))


from LSC_limits import bring_virtuals

Td=[downsample(t,80) for t in bun3]

C8=local_skeleton_clustering(Td,8)
vs,ls,tot=bring_virtuals(C8)
vs2=shift(vs,np.array([0,0,0],'f4'))

"""
wi2=Window(bgcolor=(1.,1.,1.,1.),width=1000,height=1000)
wi3=Window(bgcolor=(1.,1.,1.,1.),width=1000,height=1000)
w2=World()
w3=World()
wi2.attach(w2)
wi3.attach(w3)
w2.add(line(vs2,np.array([[1,0,1,1],[0,1,0,1]],'f4')))
"""
Exemplo n.º 26
0
Next, let's find the number of points that each streamline has.
"""

n_pts = [len(streamline) for streamline in bundle]

"""
Often, streamlines are represented with more points than what is actually
necessary for specific applications. Also, sometimes every streamline has
different number of points which could be of a trouble for some algorithms
. The function ``downsample`` can be used to set the number of points of a
streamline at a specific number and at the same time enforce that all the
segments of the streamline will have equal length.
"""

bundle_downsampled = [downsample(s, 12) for s in bundle]
n_pts_ds = [len(s) for s in bundle_downsampled]

"""
Alternatively, the function ``approx_polygon_track`` allows to reduce the number
of points so that they are more points in curvy regions and less points in
less curvy regions. In contrast with ``downsample`` it does not enforce that
segments should be of equal size.
"""

bundle_downsampled2 = [approx_polygon_track(s, 0.25) for s in bundle]
n_pts_ds2 = [len(streamline) for streamline in bundle_downsampled2]

"""
Both, ``downsample`` and ``approx_polygon_track`` can be thought as methods for
lossy compression of streamlines.
Exemplo n.º 27
0
Load Trackvis file for *Fornix*:
"""

streams, hdr = tv.read(fname)
"""
Copy tracks:
"""

T = [i[0] for i in streams]

#T=T[:1000]
"""
Downsample tracks to just 3 points:
"""

tracks = [tm.downsample(t, 3) for t in T]
"""
Delete unnecessary data:
"""

del streams, hdr
"""
Perform Local Skeleton Clustering (LSC) with a 5mm threshold:
"""

now = time.clock()
C = td.local_skeleton_clustering(tracks, d_thr=5)
print('Done in %.2f s' % (time.clock() - now, ))
"""
Reduce the number of points for faster visualization using the ``approx_polygon_track`` algorithm which retains points depending on how much they are need to define the shape of the track:
"""
Exemplo n.º 28
0
def vectorize_streamlines(streamlines, no_pts):
    """ Resample all streamlines to the same number of points
    """
    return [downsample(s, no_pts) for s in streamlines]
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    if not os.path.isfile(args.tracts):
        parser.error("Tracts file: {0} does not exist.".format(args.tracts))

    if not os.path.isfile(args.aparc):
        parser.error("Label file: {0} does not exist.".format(args.aparc))

    if not os.path.isfile(args.labels):
        parser.error("Requested region file: {0} does not exist.".format(
            args.labels))

    if not os.path.isfile(args.lut):
        parser.error("Freesurfer LUT file: {0} does not exist.".format(
            args.lut))

    if not os.path.isfile(args.faimage):
        parser.error("FA Image file: {0} does not exist.".format(args.faimage))

    if not os.path.isfile(args.mdimage):
        parser.error("MD Image file: {0} does not exist.".format(args.mdimage))

    # Validate that tracts can be processed
    if not validate_coordinates(args.aparc, args.tracts, nifti_compliant=True):
        parser.error("The tracts file contains points that are invalid.\n" +
                     "Use the remove_invalid_coordinates.py script to clean.")

    # Load label image
    labels_img = nib.load(args.aparc)
    full_labels = labels_img.get_data().astype('int')

    # Load fibers
    tract_format = tc.detect_format(args.tracts)
    tract = tract_format(args.tracts, args.aparc)

    affine = compute_affine_for_dipy_functions(args.aparc, args.tracts)

    #load FA and MD image
    fa_img = nib.load(args.faimage)
    fa_data = fa_img.get_data()

    md_img = nib.load(args.mdimage)
    md_data = md_img.get_data()

    # ========= processing streamlines =================
    fiberlen_range = np.asarray([args.minlen, args.maxlen])

    streamlines = [t for t in tract]
    print "Subject " + args.sub_id + " has " + str(
        len(streamlines)) + " raw streamlines."

    f_streamlines = []  #filtered streamlines
    lenrecord = []
    idx = 0
    for sl in streamlines:
        # Avoid streamlines having only one point, as they crash the
        # Dipy connectivity matrix function.
        if sl.shape[0] > 1:
            flen = length(sl)
            # get fibers having length between 20mm and 200mm
            if (flen > fiberlen_range[0]) & (flen < fiberlen_range[1]):
                f_streamlines.append(sl)
                lenrecord.append(flen)
                idx = idx + 1

    print "Subject " + args.sub_id + " has " + str(
        idx) + " streamlines with lengths between " + str(
            args.minlen) + " and " + str(args.maxlen) + "."

    # ============= process the parcellation =====================
    dilation_para = np.array([args.dilation_dist, args.dilation_windsize])

    # Compute the mapping from label name to label id
    label_id_mapping = compute_labels_map(args.lut)

    # Find which labels were requested by the user.
    requested_labels_mapping = compute_requested_labels(
        args.labels, label_id_mapping)

    # Filter to keep only needed ones
    filtered_labels = np.zeros(full_labels.shape, dtype='int')
    for label_val in requested_labels_mapping:
        if sum(sum(sum(full_labels == label_val))) == 0:
            print label_val
            print requested_labels_mapping[label_val]

        filtered_labels[full_labels == label_val] = label_val

    #cortex band dilation
    dilated_labels = cortexband_dilation_wm(filtered_labels, full_labels,
                                            dilation_para)

    # Reduce the range of labels to avoid a sparse matrix,
    # because the ids of labels can range from 0 to the 12000's.
    reduced_labels, labels_lut = dpu.reduce_labels(filtered_labels)
    reduced_dilated_labels, labels_lut = dpu.reduce_labels(dilated_labels)

    # Compute connectivity matrix and extract the fibers
    M, grouping = nconnectivity_matrix(f_streamlines,
                                       reduced_dilated_labels,
                                       fiberlen_range,
                                       args.cnpoint,
                                       affine=affine,
                                       symmetric=True,
                                       return_mapping=True,
                                       mapping_as_streamlines=True)

    Msize = len(M)
    CM_before_outlierremove = M[1:, 1:]
    nstream_bf = np.sum(CM_before_outlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix before outlier removal.'

    #===================== process the streamlines =============
    print 'Processing streamlines to remove outliers ..............'

    outlier_para = 3
    average_thrd = 8

    M_after_ourlierremove = np.zeros((Msize, Msize))
    #downsample streamlines
    cell_streamlines = []
    cell_id = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_streamlines = grouping[i, j]
            tmp_streamlines = list(tmp_streamlines)
            #downsample
            tmp_streamlines_downsampled = [
                downsample(s, 100) for s in tmp_streamlines
            ]
            #remove outliers, we need to rewrite the QuickBundle method to speed up this process

            qb = QuickBundles(threshold=average_thrd)
            clusters = qb.cluster(tmp_streamlines_downsampled)
            outlier_clusters = clusters < outlier_para  #small clusters
            nonoutlier_clusters = clusters[np.logical_not(outlier_clusters)]

            tmp_nonoutlier_index = []
            for tmp_cluster in nonoutlier_clusters:
                tmp_nonoutlier_index = tmp_nonoutlier_index + tmp_cluster.indices

            clean_streamline_downsampled = [
                tmp_streamlines_downsampled[ind]
                for ind in tmp_nonoutlier_index
            ]
            cell_streamlines.append(clean_streamline_downsampled)
            cell_id.append([i, j])
            M_after_ourlierremove[i, j] = len(clean_streamline_downsampled)

    CM_after_ourlierremove = M_after_ourlierremove[1:, 1:]
    nstream_bf = np.sum(CM_after_ourlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix after outlier removal.'

    #save streamlines and count matrix

    cmCountMatrix_fname = args.sub_id + "_" + args.pre + "_cm_count_raw.mat"
    cmCountMatrix_processed_fname = args.sub_id + "_" + args.pre + "_cm_count_processed.mat"
    cmStreamlineMatrix_fname = args.sub_id + "_" + args.pre + "_cm_streamlines.mat"
    reduced_labels_fname = args.sub_id + "_" + args.pre + "_reduced_labels.nii.gz"
    dilated_labels_fname = args.sub_id + "_" + args.pre + "_dilated_labels.nii.gz"
    RoiInfo_fname = args.sub_id + "_" + args.pre + "_RoiInfo.mat"

    # save the raw count matrix
    CM = M[1:, 1:]
    sio.savemat(cmCountMatrix_fname, {'cm': CM})
    sio.savemat(cmCountMatrix_processed_fname, {'cm': CM_after_ourlierremove})

    # save the streamline matrix
    sio.savemat(cmStreamlineMatrix_fname, {'slines': cell_streamlines})
    sio.savemat(RoiInfo_fname, {'ROIinfo': cell_id})
    print args.sub_id + 'cell_streamlines.mat, ROIinfo.mat has been saved'

    filtered_labels_img = nib.Nifti1Image(filtered_labels,
                                          labels_img.get_affine(),
                                          labels_img.get_header())
    nib.save(filtered_labels_img, reduced_labels_fname)
    print args.sub_id + 'filtered labels have saved'

    dilated_labels_img = nib.Nifti1Image(dilated_labels,
                                         labels_img.get_affine(),
                                         labels_img.get_header())
    nib.save(dilated_labels_img, dilated_labels_fname)
    print args.sub_id + 'dilated labels have saved'

    # ===================== process the streamlines and extract features =============
    cm_fa_curve = fa_extraction_use_cellinput(cell_streamlines,
                                              cell_id,
                                              fa_data,
                                              Msize,
                                              affine=affine)
    (tmp_cm_fa_mean, tmp_cm_fa_max,
     cm_count) = fa_mean_extraction(cm_fa_curve, Msize)

    # extract MD values along the streamlines
    cm_md_curve = fa_extraction_use_cellinput(cell_streamlines,
                                              cell_id,
                                              md_data,
                                              Msize,
                                              affine=affine)
    (tmp_cm_md_mean, tmp_cm_md_max,
     testcm) = fa_mean_extraction(cm_md_curve, Msize)

    #connected surface area
    # extract the connective volume ratio
    (tmp_cm_volumn,
     tmp_cm_volumn_ratio) = rois_connectedvol_cellinput(reduced_labels,
                                                        Msize,
                                                        cell_streamlines,
                                                        cell_id,
                                                        affine=affine)

    #fiber length
    tmp_connectcm_len = rois_fiberlen_cellinput(Msize, cell_streamlines)

    #save cm features
    cm_md_mean = tmp_cm_md_mean[1:, 1:]
    cm_md_max = tmp_cm_md_max[1:, 1:]

    cm_fa_mean = tmp_cm_fa_mean[1:, 1:]
    cm_fa_max = tmp_cm_fa_max[1:, 1:]

    cm_volumn = tmp_cm_volumn[1:, 1:]
    cm_volumn_ratio = tmp_cm_volumn_ratio[1:, 1:]

    connectcm_len = tmp_connectcm_len[1:, 1:]

    sio.savemat(args.pre + "_cm_processed_mdmean_100.mat",
                {'cm_mdmean': cm_md_mean})
    sio.savemat(args.pre + "_cm_processed_mdmax_100.mat",
                {'cm_mdmax': cm_md_max})
    sio.savemat(args.pre + "_cm_processed_famean_100.mat",
                {'cm_famean': cm_fa_mean})
    sio.savemat(args.pre + "_cm_processed_famax_100.mat",
                {'cm_famax': cm_fa_max})
    sio.savemat(args.pre + "_cm_processed_volumn_100.mat",
                {'cm_volumn': cm_volumn})
    sio.savemat(args.pre + "_cm_processed_volumn_ratio_100.mat",
                {'cm_volumn_ratio': cm_volumn_ratio})
    sio.savemat(args.pre + "_cm_processed_volumn_ratio_100.mat",
                {'cm_len': connectcm_len})

    # save the diffusion functions matrix
    cell_fa = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_fa = cm_fa_curve[i, j]
            tmp_fa = list(tmp_fa)
            cell_fa.append(tmp_fa)

    sio.savemat(args.pre + "_cm_processed_sfa_100.mat", {'sfa': cell_fa})
    print 'cell_fa.mat, fa_roiinfo.mat have been saved'

    cell_md = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_md = cm_md_curve[i, j]
            tmp_md = list(tmp_md)
            cell_md.append(tmp_md)

    sio.savemat(args.pre + "_cm_processed_smd_100.mat", {'smd': cell_md})
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    if not os.path.isfile(args.tracts):
        parser.error("Tracts file: {0} does not exist.".format(args.tracts))

    if not os.path.isfile(args.org_aparc):
        parser.error("Original label file: {0} does not exist.".format(
            args.org_aparc))

    if not os.path.isfile(args.dilated_aparc):
        parser.error("Dilated label file: {0} does not exist.".format(
            args.dilated_aparc))

    if not os.path.isfile(args.subcortical_labels):
        parser.error("Requested region file: {0} does not exist.".format(
            args.subcortical_labels))

    if not os.path.isfile(args.lut):
        parser.error("Freesurfer LUT file: {0} does not exist.".format(
            args.lut))

    if not os.path.isfile(args.faimage):
        parser.error("FA Image file: {0} does not exist.".format(args.faimage))

    if not os.path.isfile(args.mdimage):
        parser.error("MD Image file: {0} does not exist.".format(args.mdimage))

    # Validate that tracts can be processed
    if not validate_coordinates(
            args.org_aparc, args.tracts, nifti_compliant=True):
        parser.error("The tracts file contains points that are invalid.\n" +
                     "Use the remove_invalid_coordinates.py script to clean.")

    # Load label images
    org_labels_img = nib.load(args.org_aparc)
    org_labels_data = org_labels_img.get_data().astype('int')

    dilated_labels_img = nib.load(args.dilated_aparc)
    dilated_labels_data = dilated_labels_img.get_data().astype('int')

    # Load fibers
    tract_format = tc.detect_format(args.tracts)
    tract = tract_format(args.tracts, args.org_aparc)
    affine = compute_affine_for_dipy_functions(args.org_aparc, args.tracts)

    #load FA and MD image
    fa_img = nib.load(args.faimage)
    fa_data = fa_img.get_data()

    md_img = nib.load(args.mdimage)
    md_data = md_img.get_data()

    # ========= processing streamlines =================
    fiberlen_range = np.asarray([args.minlen, args.maxlen])

    streamlines = [t for t in tract]
    print "Subjeect " + args.sub_id + " has " + str(
        len(streamlines)) + " streamlines."

    f_streamlines = []  #filtered streamlines
    lenrecord = []
    idx = 0
    for sl in streamlines:
        # Avoid streamlines having only one point, as they crash the
        # Dipy connectivity matrix function.
        if sl.shape[0] > 1:
            flen = length(sl)
            # get fibers having length between 20mm and 200mm
            if (flen > fiberlen_range[0]) & (flen < fiberlen_range[1]):
                f_streamlines.append(sl)
                lenrecord.append(flen)
                idx = idx + 1

    print "Subject " + args.sub_id + " has " + str(
        idx - 1) + " streamlines after filtering."

    # ============= process the parcellation =====================

    # Compute the mapping from label name to label id
    label_id_mapping = compute_labels_map(args.lut)

    # Find which labels were requested by the user.
    requested_labels_mapping = compute_requested_labels(
        args.subcortical_labels, label_id_mapping)

    # Increase aparc_filtered_labels with subcortical regions
    # 17 LH_Hippocampus
    # 53 RH_Hippocampus
    # 11 LH_Caudate
    # 50 RH_Caudate
    # 12 LH_Putamen
    # 51 RH_Putamen
    # 13 LH_Pallidum
    # 52 RH_Pallidum
    # 18 LH_Amygdala
    # 54 RH_Amygdala
    # 26 LH_Accumbens
    # 58 RH_Accumbens
    # 10 LH_Thalamus-Proper
    # 49 RH_Thalamus-Proper
    # 4 LH_Lateral-Ventricle
    # 43 RH_Lateral-Ventricle
    # 8 LH_Cerebellum-Cortex
    # 47 RH_Cerebellum-Cortex
    #
    # 16 _Brain-Stem (# 7,8 LH_Cerebellum) (# 41 RH_Cerebellum)

    sub_cortical_labels = [
        17, 53, 11, 50, 12, 51, 13, 52, 18, 54, 26, 58, 10, 49, 4, 43, 8, 47
    ]  # 16
    Brain_Stem_cerebellum = [16]  #1

    aparc_filtered_labels = dilated_labels_data
    for label_val in requested_labels_mapping:
        if sum(sum(sum(org_labels_data == label_val))) == 0:
            print label_val
            print requested_labels_mapping[label_val]

        aparc_filtered_labels[org_labels_data == label_val] = label_val

    for brain_stem_id in Brain_Stem_cerebellum:
        if sum(sum(sum(org_labels_data == brain_stem_id))) == 0:
            print 'no labels of '
            print brain_stem_id
        aparc_filtered_labels[
            org_labels_data ==
            brain_stem_id] = 99  # let the brain stem's label be 30

    # Reduce the range of labels to avoid a sparse matrix,
    # because the ids of labels can range from 0 to the 12000's.
    reduced_dilated_labels, labels_lut = dpu.reduce_labels(
        aparc_filtered_labels)

    #dilated_labels_fname = args.sub_id + "_" + args.pre + "_dilated_allbrain_labels.nii.gz"
    #dilated_labels_img = nib.Nifti1Image(aparc_filtered_labels, org_labels_img.get_affine(),org_labels_img.get_header())
    #nib.save(dilated_labels_img,dilated_labels_fname)
    #print args.sub_id + 'dilated labels have saved'
    #pdb.set_trace()

    # Compute connectivity matrix and extract the fibers
    M, grouping = nconnectivity_matrix(f_streamlines,
                                       reduced_dilated_labels,
                                       fiberlen_range,
                                       args.cnpoint,
                                       affine=affine,
                                       symmetric=True,
                                       return_mapping=True,
                                       mapping_as_streamlines=True,
                                       keepfiberinroi=True)

    Msize = len(M)
    CM_before_outlierremove = M[1:, 1:]
    nstream_bf = np.sum(CM_before_outlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix before outlier removal.'

    # ===================== process the streamlines =============
    print 'Processing streamlines to remove outliers ..............'

    outlier_para = 3
    average_thrd = 8

    M_after_ourlierremove = np.zeros((Msize, Msize))
    # downsample streamlines
    cell_streamlines = []
    cell_id = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_streamlines = grouping[i, j]
            tmp_streamlines = list(tmp_streamlines)
            # downsample
            tmp_streamlines_downsampled = [
                downsample(s, 100) for s in tmp_streamlines
            ]
            # remove outliers, we need to rewrite the QuickBundle method to speed up this process

            qb = QuickBundles(threshold=average_thrd)
            clusters = qb.cluster(tmp_streamlines_downsampled)
            outlier_clusters = clusters < outlier_para  # small clusters
            nonoutlier_clusters = clusters[np.logical_not(outlier_clusters)]

            tmp_nonoutlier_index = []
            for tmp_cluster in nonoutlier_clusters:
                tmp_nonoutlier_index = tmp_nonoutlier_index + tmp_cluster.indices

            clean_streamline_downsampled = [
                tmp_streamlines_downsampled[ind]
                for ind in tmp_nonoutlier_index
            ]
            cell_streamlines.append(clean_streamline_downsampled)
            cell_id.append([i, j])
            M_after_ourlierremove[i, j] = len(clean_streamline_downsampled)

    CM_after_ourlierremove = M_after_ourlierremove[1:, 1:]
    nstream_bf = np.sum(CM_after_ourlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix after outlier removal.'

    #===================== save the data =======================

    if (args.saving_indicator == 1):  # save the whole brain connectivity

        cmCountMatrix_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_cm_count_raw.mat"
        cmCountMatrix_processed_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_cm_count_processed.mat"
        cmStreamlineMatrix_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_cm_streamlines.mat"
        reduced_dilated_labels_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_reduced_dilated_labels.nii.gz"
        RoiInfo_fname = args.sub_id + "_" + args.pre + "_allbrain_RoiInfo.mat"

        # save the raw count matrix
        CM = M[1:, 1:]
        sio.savemat(cmCountMatrix_fname, {'cm': CM})
        sio.savemat(cmCountMatrix_processed_fname,
                    {'cm': CM_after_ourlierremove})

        # save the streamline matrix
        sio.savemat(cmStreamlineMatrix_fname, {'slines': cell_streamlines})
        sio.savemat(RoiInfo_fname, {'ROIinfo': cell_id})
        print args.sub_id + 'cell_streamlines.mat, ROIinfo.mat has been saved'

        filtered_labels_img = nib.Nifti1Image(aparc_filtered_labels,
                                              org_labels_img.get_affine(),
                                              org_labels_img.get_header())
        nib.save(filtered_labels_img, reduced_dilated_labels_fname)
        print args.sub_id + 'all brain dilated labels have saved'

        # ===================== process the streamlines and extract features =============
        cm_fa_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  fa_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_fa_mean, tmp_cm_fa_max,
         cm_count) = fa_mean_extraction(cm_fa_curve, Msize)

        # extract MD values along the streamlines
        cm_md_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  md_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_md_mean, tmp_cm_md_max,
         testcm) = fa_mean_extraction(cm_md_curve, Msize)

        # connected surface area
        # extract the connective volume ratio
        (tmp_cm_volumn, tmp_cm_volumn_ratio) = rois_connectedvol_cellinput(
            reduced_dilated_labels,
            Msize,
            cell_streamlines,
            cell_id,
            affine=affine)

        # fiber length
        tmp_connectcm_len = rois_fiberlen_cellinput(Msize, cell_streamlines)

        # save cm features
        cm_md_mean = tmp_cm_md_mean[1:, 1:]
        cm_md_max = tmp_cm_md_max[1:, 1:]

        cm_fa_mean = tmp_cm_fa_mean[1:, 1:]
        cm_fa_max = tmp_cm_fa_max[1:, 1:]

        cm_volumn = tmp_cm_volumn[1:, 1:]
        cm_volumn_ratio = tmp_cm_volumn_ratio[1:, 1:]

        connectcm_len = tmp_connectcm_len[1:, 1:]

        sio.savemat(args.pre + "_allbrain" + "_cm_processed_mdmean_100.mat",
                    {'cm_mdmean': cm_md_mean})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_mdmax_100.mat",
                    {'cm_mdmax': cm_md_max})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_famean_100.mat",
                    {'cm_famean': cm_fa_mean})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_famax_100.mat",
                    {'cm_famax': cm_fa_max})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_volumn_100.mat",
                    {'cm_volumn': cm_volumn})
        sio.savemat(
            args.pre + "_allbrain" + "_cm_processed_volumn_ratio_100.mat",
            {'cm_volumn_ratio': cm_volumn_ratio})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_fiberlen_100.mat",
                    {'cm_len': connectcm_len})

        # save the streamline matrix
        cell_fa = []
        for i in range(1, Msize):
            for j in range(i + 1, Msize):
                tmp_fa = cm_fa_curve[i, j]
                tmp_fa = list(tmp_fa)
                cell_fa.append(tmp_fa)

        sio.savemat(args.pre + "_allbrain" + "_cm_processed_sfa_100.mat",
                    {'sfa': cell_fa})
        print args.pre + '_allbrain" + "_cm_processed_sfa_100.mat' + ' has been saved'

        cell_md = []
        for i in range(1, Msize):
            for j in range(i + 1, Msize):
                tmp_md = cm_md_curve[i, j]
                tmp_md = list(tmp_md)
                cell_md.append(tmp_md)

        sio.savemat(args.pre + "_allbrain" + "_cm_processed_smd_100.mat",
                    {'smd': cell_md})
        print args.pre + '_allbrain" + "_cm_processed_smd_100.mat' + ' has been saved'

    if (
            args.saving_indicator == 0
    ):  # save the part of the connection: connection between subcortical region

        Nsubcortical_reg = len(sub_cortical_labels) + 1  # should be 19

        cmCountMatrix_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_cm_count_raw.mat"
        cmCountMatrix_processed_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_cm_count_processed.mat"
        cmStreamlineMatrix_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_cm_streamlines.mat"
        reduced_dilated_labels_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_reduced_dilated_labels.nii.gz"
        subcortical_RoiInfo_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort_RoiInfo.mat"

        # save the raw count matrix
        CM = M[1:, 1:]
        sio.savemat(cmCountMatrix_fname, {'cm': CM})
        sio.savemat(cmCountMatrix_processed_fname,
                    {'cm': CM_after_ourlierremove})

        filtered_labels_img = nib.Nifti1Image(aparc_filtered_labels,
                                              org_labels_img.get_affine(),
                                              org_labels_img.get_header())
        nib.save(filtered_labels_img, reduced_dilated_labels_fname)
        print args.sub_id + ' all brain dilated labels have saved'

        # ===================== process the streamlines and extract features =============
        cm_fa_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  fa_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_fa_mean, tmp_cm_fa_max,
         cm_count) = fa_mean_extraction(cm_fa_curve, Msize)

        # extract MD values along the streamlines
        cm_md_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  md_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_md_mean, tmp_cm_md_max,
         testcm) = fa_mean_extraction(cm_md_curve, Msize)

        # connected surface area
        # extract the connective volume ratio
        (tmp_cm_volumn, tmp_cm_volumn_ratio) = rois_connectedvol_cellinput(
            reduced_dilated_labels,
            Msize,
            cell_streamlines,
            cell_id,
            affine=affine)

        # fiber length
        tmp_connectcm_len = rois_fiberlen_cellinput(Msize, cell_streamlines)

        # save cm features
        cm_md_mean = tmp_cm_md_mean[1:, 1:]
        cm_md_max = tmp_cm_md_max[1:, 1:]

        cm_fa_mean = tmp_cm_fa_mean[1:, 1:]
        cm_fa_max = tmp_cm_fa_max[1:, 1:]

        cm_volumn = tmp_cm_volumn[1:, 1:]
        cm_volumn_ratio = tmp_cm_volumn_ratio[1:, 1:]

        connectcm_len = tmp_connectcm_len[1:, 1:]

        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_mdmean_100.mat",
            {'cm_mdmean': cm_md_mean})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_mdmax_100.mat",
            {'cm_mdmax': cm_md_max})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_famean_100.mat",
            {'cm_famean': cm_fa_mean})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_famax_100.mat",
            {'cm_famax': cm_fa_max})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_volumn_100.mat",
            {'cm_volumn': cm_volumn})
        sio.savemat(
            args.pre + "_partbrain_subcort" +
            "_cm_processed_volumn_ratio_100.mat",
            {'cm_volumn_ratio': cm_volumn_ratio})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_fiberlen_100.mat",
            {'cm_len': connectcm_len})

        # save the streamline matrix
        cell_fa = []
        cell_id = []
        for i in range(1, Nsubcortical_reg):
            for j in range(i + 1, Msize):
                tmp_fa = cm_fa_curve[i, j]
                tmp_fa = list(tmp_fa)
                cell_fa.append(tmp_fa)
                cell_id.append([i, j])

        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_sfa_100.mat",
            {'sfa': cell_fa})
        print args.pre + '_partbrain_subcort' + '_cm_processed_sfa_100.mat' + 'has been saved.'

        cell_md = []
        for i in range(1, Nsubcortical_reg):
            for j in range(i + 1, Msize):
                tmp_md = cm_md_curve[i, j]
                tmp_md = list(tmp_md)
                cell_md.append(tmp_md)

        sio.savemat(args.pre + "_partbrain" + "_cm_processed_smd_100.mat",
                    {'smd': cell_md})
        print args.pre + '_partbrain' + '_cm_processed_smd_100.mat' + 'has been saved.'

        # save the streamline matrix
        subcortical_cell_streamlines = []
        cell_id = []
        idx = 0
        for i in range(1, Nsubcortical_reg):
            for j in range(i + 1, Msize):
                tmp_sls = cell_streamlines[idx]
                idx = idx + 1
                subcortical_cell_streamlines.append(tmp_sls)
                cell_id.append([i, j])

        sio.savemat(cmStreamlineMatrix_fname,
                    {'slines': subcortical_cell_streamlines})
        sio.savemat(subcortical_RoiInfo_fname, {'ROIinfo': cell_id})
        print cmStreamlineMatrix_fname + ' has been saved'
Exemplo n.º 31
0
        print "Loading", buffers_filename
        buffers = np.load(buffers_filename)
    except IOError:
        print "Buffers not found, recomputing."
        fdpyw = tracks_basenane+'.dpy'    
        dpr = Dpy(fdpyw, 'r')
        print "Loading", fdpyw
        T = dpr.read_tracks()
        dpr.close()
    
        # T = T[:5000]
        # T = np.array(T, dtype=np.object)

        if downsampling:
            print "Dowsampling."
            T = downsample(t, qb_n_points)

#        print "Centering."
#        T = [t - np.array(data.shape[:3]) / 2.0 for t in T]
#            
#        print "Rotating."
#        axis = np.array([1, 0, 0])
#        theta = - 90.
#        if downsampling:
#            T = np.dot(T,rotation_matrix(axis, theta))
#        else:
#            T = [np.dot(t,rotation_matrix(axis, theta)) for t in T]
#            
#        axis = np.array([0, 1, 0])
#        theta = 180.
#        if downsampling:
def doit(basedir,
         rootname,
         trkloc,
         lenthr,
         subsampN=12,
         save_snapshot=False,
         debug=False):
    savename = '%s/%s_mtrx.txt' % (basedir, rootname)
    savetrk = '%s/%s.trk' % (basedir, rootname)
    savetrkss = '%s/%s_SS.trk' % (basedir, rootname)
    savetrkpic = savetrk.replace('.trk', '.png')
    savetrkpicss = savetrk.replace('.trk', 'SS.png')
    savehist = '%s/%s_hist.png' % (basedir, rootname)
    showhist = False
    showsls_var = False
    lenthr = 50
    pow = 1
    if debug:
        subsampN = 3
        subset = 800

    trk, hdr = nib.trackvis.read(trkloc)
    sls = [item[0] for item in trk]
    lengths = list(length(sls))
    #print lengths
    '''print("cheerio")
    print(len(lengths))
    print(len(sls))
    print "two"
    print lengths[0]'''

    sls_long = []
    for n, j in enumerate(sls):
        if len(j) > 5:
            if lengths[n] > lenthr:
                print(lengths[n])
                sls_long.append(j)
            else:
                print("BAD")
                print(lengths[n])

    print("CHECKME")
    print(len(sls))
    print(len(sls_long))

    subsamp = [downsample(sl, subsampN) for sl in sls_long]
    if debug:
        subsamp = subsamp[0:subset]
        sls_long = sls_long[0:subset]
    score_mtrx = np.zeros([len(subsamp)])
    print("what")
    print(len(subsamp))
    print(len(sls))

    for i, sl in enumerate(subsamp):
        print(str(i) + '/' + str(len(subsamp)))
        mtrx2 = bundles_distances_mdf([sls_long[i]], sls_long)
        #mtrx2_oi = np.all([(mtrx2>0) & (mtrx2<5) & (not np.isnan(mtrx2))],axis=0)
        mtrx2_oi = (mtrx2 > 0) & (mtrx2 < 5) & ~np.isnan(mtrx2)
        zoom = mtrx2[mtrx2_oi]
        score = np.sum(np.divide(1, np.power(zoom, pow)))
        #makehist(zoom.T,score)
        score_mtrx[i] = score
        #print score
        #print score_mtrx
    '''print savename
    print basedir
    print rootname'''
    np.savetxt(savename, score_mtrx)
    #makehist(score_mtrx,name=rootname,saveloc=savehist,show=showhist)
    newhdr = hdr.copy()
    newhdr['n_properties'] = 1
    proplist = newhdr['property_name']
    proplist[0] = 'Cluster Confidence'
    newhdr['property_name'] = proplist
    #newtrkss = ((sl,None,score_mtrx[i]) for i,sl in enumerate(subsamp))
    newtrk = ((sl, None, score_mtrx[i]) for i, sl in enumerate(sls_long))
    #nib.trackvis.write(savetrkss, newtrkss, newhdr)
    nib.trackvis.write(savetrk, newtrk, newhdr)
    #showsls(subsamp,score_mtrx,savetrkpicss,show=showsls_var)
    if save_snapshot:
        showsls(sls_long, score_mtrx, savetrkpic, show=showsls_var)
Exemplo n.º 33
0
    t1=time()
    C5=pf.local_skeleton_clustering(T2,.5)
    t2=time()
    print t2-t1
    print len(C5)
    
    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError, e:
        raise nose.plugins.skip.SkipTest(
            'Fails to import dipy.viz due to %s' % str(e))
    
    streams,hdr=tv.read(get_data('fornix'))
    T3=[tm.downsample(s[0],6) for s in streams]    
    

    print 'lenT3',len(T3)
    
    C=pf.local_skeleton_clustering(T3,10.)
    
    print 'lenC',len(C)
    
    """
    
    r=fvtk.ren()
    colors=np.zeros((len(C),3))
    for c in C:
        color=np.random.rand(3)
        for i in C[c]['indices']:
Exemplo n.º 34
0
    def __init__(self,
                 name,
                 qb,
                 tracks,
                 reps='exemplars',
                 colors=None,
                 vol_shape=None,
                 virtuals_line_width=5.0,
                 tracks_line_width=2.0,
                 virtuals_alpha=1.0,
                 tracks_alpha=0.6,
                 affine=None,
                 verbose=False):
        """TrackLabeler is meant to explore and select subsets of the
        tracks. The exploration occurs through QuickBundles (qb) in
        order to simplify the scene.
        """
        super(TrackLabeler, self).__init__(name)

        if affine is None: self.affine = np.eye(4, dtype=np.float32)
        else: self.affine = affine

        self.mouse_x = None
        self.mouse_y = None
        self.cache = {}
        self.qb = qb
        self.reps = reps
        #virtual tracks
        if self.reps == 'virtuals':
            self.virtuals = qb.virtuals()
        if self.reps == 'exemplars':
            self.virtuals, self.ex_ids = qb.exemplars()
        self.virtuals_alpha = virtuals_alpha
        self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count = self.compute_buffers(
            self.virtuals, self.virtuals_alpha)
        #full tractography (downsampled at 12 pts per track)
        self.tracks = tracks
        self.tracks_alpha = tracks_alpha
        self.tracks_ids = np.arange(len(self.tracks), dtype=np.int)
        self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.compute_buffers(
            self.tracks, self.tracks_alpha)
        #calculate boundary box for entire tractography
        self.min = np.min(self.tracks_buffer, axis=0)
        self.max = np.max(self.tracks_buffer, axis=0)
        self.vertices = self.tracks_buffer
        #coord1 = np.array([self.tracks_buffer[:,0].min(),self.tracks_buffer[:,1].min(),self.tracks_buffer[:,2].min()], dtype = 'f4')
        #coord2 = np.array([self.tracks_buffer[:,0].max(),self.tracks_buffer[:,1].max(),self.tracks_buffer[:,2].max()], dtype = 'f4')
        #self.make_aabb((coord1,coord2),0)
        #show size of tractography buffer
        print('MBytes %f' % (self.tracks_buffer.nbytes / 2.**20, ))
        self.position = (0, 0, 0)
        #buffer for selected virtual tracks
        self.selected = []
        self.virtuals_line_width = virtuals_line_width
        self.tracks_line_width = tracks_line_width
        self.old_color = {}
        self.hide_virtuals = False
        self.expand = False
        self.verbose = verbose
        self.tracks_visualized_first = np.array([], dtype='i4')
        self.tracks_visualized_count = np.array([], dtype='i4')
        self.history = [[
            self.qb, self.tracks, self.tracks_ids, self.virtuals_buffer,
            self.virtuals_colors, self.virtuals_first, self.virtuals_count,
            self.tracks_buffer, self.tracks_colors, self.tracks_first,
            self.tracks_count
        ]]
        #shifting of track is necessary for dipy.tracking.vox2track.track_counts
        #we also upsample using 30 points in order to increase the accuracy of track counts
        self.vol_shape = vol_shape
        if self.vol_shape != None:
            #self.tracks_shifted =[t+np.array(vol_shape)/2. for t in self.tracks]
            self.virtuals_shifted = [
                downsample(t + np.array(self.vol_shape) / 2., 30)
                for t in self.virtuals
            ]
        else:
            #self.tracks_shifted=None
            self.virtuals_shifted = None
Exemplo n.º 35
0
def test_LSCv2():
    xyz1=np.array([[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
    xyz2=np.array([[1,0,0],[1,2,0],[1,3,0]],dtype='float32')
    xyz3=np.array([[1.1,0,0],[1,2,0],[1,3,0]],dtype='float32')
    xyz4=np.array([[1,0,0],[2.1,0,0],[3,0,0]],dtype='float32')
    
    xyz5=np.array([[100,0,0],[200,0,0],[300,0,0]],dtype='float32')
    xyz6=np.array([[0,20,0],[0,40,0],[300,50,0]],dtype='float32')
    
    T=[xyz1,xyz2,xyz3,xyz4,xyz5,xyz6]
    C=pf.local_skeleton_clustering(T,0.2)
    
    #print C
    #print len(C)
    
    C2=pf.local_skeleton_clustering_3pts(T,0.2)
    
    #print C2
    #print len(C2)
            
    #"""
    
    for i in range(40):
        xyz=np.random.rand(3,3).astype('f4')
        T.append(xyz)
            
    from time import time
    t1=time()
    C3=pf.local_skeleton_clustering(T,.5)
    t2=time()
    print(t2-t1)
    print(len(C3))
    
    t1=time()
    C4=pf.local_skeleton_clustering_3pts(T,.5)
    t2=time()
    print(t2-t1)
    print(len(C4))

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden']-C4[c]['hidden']),0)
    
    T2=[]
    for i in range(10**4):
        xyz=np.random.rand(10,3).astype('f4')
        T2.append(xyz)
    t1=time()
    C5=pf.local_skeleton_clustering(T2,.5)
    t2=time()
    print(t2-t1)
    print(len(C5))
    
    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError as e:
        raise nose.plugins.skip.SkipTest(
            'Fails to import dipy.viz due to %s' % str(e))
    
    streams,hdr=tv.read(get_data('fornix'))
    T3=[tm.downsample(s[0],6) for s in streams]    
    

    print('lenT3',len(T3))
    
    C=pf.local_skeleton_clustering(T3,10.)
    
    print('lenC',len(C))
    
    """
Exemplo n.º 36
0
#same as
T=[]
for s in streams:
    T.append(s[0])


r=fvtk.ren()
linea=fvtk.line(T,fvtk.red)
fvtk.add(r,linea)
fvtk.show(r)


#for more complicated visualizations use mayavi
#or the new fos when released

dT=[tm.downsample(t,10) for t in T]
C=td.local_skeleton_clustering(dT,d_thr=5)

ldT=[tm.length(t) for t in dT]
#average length
avg_ldT=sum(ldT)/len(dT)
print(avg_ldT)

"""
r=fvtk.ren()
#fvtk.clear(r)
colors=np.zeros((len(T),3))
for c in C:
    color=np.random.rand(1,3)
    for i in C[c]['indices']:
        colors[i]=color
        #    if root.endswith('DIFF2DEPI_EKJ_64dirs_14'):
        #        base_dir = root+'/'
        #        filename = 'raw'
        #        base_dir2 = base_dir+ 'DTI/'

        print base_dir + "../MPRAGE_1/T1_flirt_out.nii.gz"
        img = nib.load(base_dir + "../MPRAGE_1/T1_flirt_out.nii.gz")
        data = img.get_data()

        for i in range(len(tracks_filename_arr)):
            print ">>>>"
            print base_dir2 + tracks_filename_arr[i]
            tracks = load_tracks(base_dir2 + tracks_filename_arr[i])
            print len(tracks)
            # tracks = [downsample(t, 12) - np.array(data.shape[:3])/2. for t in tracks]
            tracks = [downsample(t, 12) - np.array(data.shape) / 2.0 for t in tracks]
            # shift in the center of the volume
            # tracks=[t-np.array(data.shape)/2. for t in tracks]
            # 1/0
            print base_dir2 + qb_filename_15[i]
            qb = QuickBundles(tracks, 15.0, 12)
            save_pickle(base_dir2 + qb_filename_15[i], qb)

            print base_dir2 + qb_filename_20[i]
            qb = QuickBundles(tracks, 20.0, 12)
            save_pickle(base_dir2 + qb_filename_20[i], qb)

            print base_dir2 + qb_filename_30[i]
            qb = QuickBundles(tracks, 30.0, 12)
            save_pickle(base_dir2 + qb_filename_30[i], qb)
        # ================ quick bundles ==============================================
Exemplo n.º 38
0
in mind when you calculate lengths with real data.

Next, let's find the number of points that each streamline has.
"""

n_pts = [len(streamline) for streamline in bundle]
"""
Often, streamlines are represented with more points than what is actually
necessary for specific applications. Also, sometimes every streamline has
different number of points which could be of a trouble for some algorithms
. The function ``downsample`` can be used to set the number of points of a
streamline at a specific number and at the same time enforce that all the
segments of the streamline will have equal length.
"""

bundle_downsampled = [downsample(s, 12) for s in bundle]
n_pts_ds = [len(s) for s in bundle_downsampled]
"""
Alternatively, the function ``approx_polygon_track`` allows to reduce the number
of points so that they are more points in curvy regions and less points in
less curvy regions. In contrast with ``downsample`` it does not enforce that
segments should be of equal size.
"""

bundle_downsampled2 = [approx_polygon_track(s, 0.25) for s in bundle]
n_pts_ds2 = [len(streamline) for streamline in bundle_downsampled2]
"""
Both, ``downsample`` and ``approx_polygon_track`` can be thought as methods for
lossy compression of streamlines.
"""
        #        base_dir2 = base_dir+ 'DTI/'

        # print base_dir+'../ANATOMY/T1_flirt_out.nii.gz'
        # img = nib.load(base_dir+'../ANATOMY/T1_flirt_out.nii.gz')
        # data = img.get_data()

        for i in range(len(tracks_filename_arr)):
            print ">>>>"
            print base_dir2 + tracks_filename_arr[i]
            tracks = load_tracks(base_dir2 + tracks_filename_arr[i])
            print len(tracks)

            # shift in the center of the volume
            # tracks=[t-np.array(data.shape)/2. for t in tracks]

            tracks = [downsample(t, 12) for t in tracks]
            print base_dir2 + qb_filename_15[i]
            qb = QuickBundles(tracks, 15.0, 12)
            save_pickle(base_dir2 + qb_filename_15[i], qb)

            print base_dir2 + qb_filename_20[i]
            qb = QuickBundles(tracks, 20.0, 12)
            save_pickle(base_dir2 + qb_filename_20[i], qb)

            print base_dir2 + qb_filename_30[i]
            qb = QuickBundles(tracks, 30.0, 12)
            save_pickle(base_dir2 + qb_filename_30[i], qb)
        # ================ quick bundles ==============================================

        print ("Done")
        print (linear_filename)
Exemplo n.º 40
0
def test_LSCv2():
    xyz1 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]], dtype='float32')
    xyz2 = np.array([[1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz3 = np.array([[1.1, 0, 0], [1, 2, 0], [1, 3, 0]], dtype='float32')
    xyz4 = np.array([[1, 0, 0], [2.1, 0, 0], [3, 0, 0]], dtype='float32')

    xyz5 = np.array([[100, 0, 0], [200, 0, 0], [300, 0, 0]], dtype='float32')
    xyz6 = np.array([[0, 20, 0], [0, 40, 0], [300, 50, 0]], dtype='float32')

    T = [xyz1, xyz2, xyz3, xyz4, xyz5, xyz6]
    C = pf.local_skeleton_clustering(T, 0.2)

    # print C
    # print len(C)

    C2 = pf.local_skeleton_clustering_3pts(T, 0.2)

    # print C2
    # print len(C2)

    # """

    for i in range(40):
        xyz = np.random.rand(3, 3).astype('f4')
        T.append(xyz)

    from time import time
    t1 = time()
    C3 = pf.local_skeleton_clustering(T, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C3))

    t1 = time()
    C4 = pf.local_skeleton_clustering_3pts(T, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C4))

    for c in C3:
        assert_equal(np.sum(C3[c]['hidden'] - C4[c]['hidden']), 0)

    T2 = []
    for i in range(10**4):
        xyz = np.random.rand(10, 3).astype('f4')
        T2.append(xyz)
    t1 = time()
    C5 = pf.local_skeleton_clustering(T2, .5)
    t2 = time()
    print(t2 - t1)
    print(len(C5))

    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError as e:
        raise nose.plugins.skip.SkipTest('Fails to import dipy.viz due to %s' %
                                         str(e))

    streams, hdr = tv.read(get_data('fornix'))
    T3 = [tm.downsample(s[0], 6) for s in streams]

    print('lenT3', len(T3))

    C = pf.local_skeleton_clustering(T3, 10.)

    print('lenC', len(C))
    """
Exemplo n.º 41
0
streams,hdr=tv.read(fname)

"""
Copy tracks:
"""

T=[i[0] for i in streams]

#T=T[:1000]

"""
Downsample tracks to just 3 points:
"""

tracks=[tm.downsample(t,3) for t in T]

"""
Delete unnecessary data:
"""

del streams,hdr

"""
Perform Local Skeleton Clustering (LSC) with a 5mm threshold:
"""

now=time.clock()
C=td.local_skeleton_clustering(tracks,d_thr=5)
print('Done in %.2f s'  % (time.clock()-now,))
Exemplo n.º 42
0
def test_downsample():

    t = np.array([[  82.20181274,   91.3650589 ,   43.15737152],
       [  82.3844223 ,   91.79336548,   43.87036514],
       [  82.48710632,   92.27861023,   44.56298065],
       [  82.53310394,   92.7854538 ,   45.24635315],
       [  82.53793335,   93.26902008,   45.94785309],
       [  82.48797607,   93.75003815,   46.6493988 ],
       [  82.35533142,   94.2518158 ,   47.32533264],
       [  82.15484619,   94.76634216,   47.97451019],
       [  81.90982819,   95.28792572,   48.6024437 ],
       [  81.63336945,   95.78153229,   49.23971176],
       [  81.35479736,   96.24868011,   49.89558792],
       [  81.08713531,   96.69807434,   50.56812668],
       [  80.81504822,   97.14285278,   51.24193192],
       [  80.52591705,   97.56719971,   51.92168427],
       [  80.26599884,   97.98269653,   52.61848068],
       [  80.0463562 ,   98.38131714,   53.3385582 ],
       [  79.8469162 ,   98.77052307,   54.06955338],
       [  79.57667542,   99.13599396,   54.78985596],
       [  79.23351288,   99.4320755 ,   55.51065063],
       [  78.84815979,   99.64141846,   56.24016571],
       [  78.47383881,   99.77347565,   56.9929924 ],
       [  78.12837219,   99.81330872,   57.76969528],
       [  77.80438995,   99.85082245,   58.55574799],
       [  77.4943924 ,   99.88065338,   59.34777069],
       [  77.21414185,   99.85343933,   60.15090561],
       [  76.96416473,   99.82772827,   60.96406937],
       [  76.74712372,   99.80519104,   61.78676605],
       [  76.52263641,   99.79122162,   62.60765076],
       [  76.03757477,  100.08692169,   63.24152374],
       [  75.44867706,  100.3526535 ,   63.79513168],
       [  74.78033447,  100.57255554,   64.272789  ],
       [  74.11605835,  100.7733078 ,   64.76428986],
       [  73.51222992,  100.98779297,   65.32373047],
       [  72.97387695,  101.23387146,   65.93502045],
       [  72.47355652,  101.49151611,   66.57343292],
       [  71.99834442,  101.72480774,   67.2397995 ],
       [  71.5690918 ,  101.98665619,   67.92664337],
       [  71.18083191,  102.29483795,   68.61888123],
       [  70.81879425,  102.63343048,   69.31127167],
       [  70.47422791,  102.98672485,   70.00532532],
       [  70.10092926,  103.28502655,   70.70999908],
       [  69.69512177,  103.51667023,   71.42147064],
       [  69.27423096,  103.71351624,   72.13452911],
       [  68.91260529,  103.81676483,   72.89796448],
       [  68.60788727,  103.81982422,   73.69258118],
       [  68.34162903,  103.7661972 ,   74.49915314],
       [  68.08542633,  103.70635223,   75.30856323],
       [  67.83590698,  103.60187531,   76.11553955],
       [  67.56822968,  103.4482193 ,   76.90870667],
       [  67.28399658,  103.25878906,   77.68825531],
       [  67.00117493,  103.03740692,   78.45989227],
       [  66.72718048,  102.80329895,   79.23099518],
       [  66.4619751 ,  102.54130554,   79.99622345],
       [  66.20803833,  102.22305298,   80.7438736 ],
       [  65.96872711,  101.88980865,   81.48987579],
       [  65.72864532,  101.59316254,   82.25085449],
       [  65.47808075,  101.33383942,   83.02194214],
       [  65.21841431,  101.11295319,   83.80186462],
       [  64.95678711,  100.94080353,   84.59326935],
       [  64.71759033,  100.82022095,   85.40114594],
       [  64.48053741,  100.73490143,   86.21411896],
       [  64.24304199,  100.65074158,   87.02709198],
       [  64.01773834,  100.55318451,   87.84204865],
       [  63.83801651,  100.41996765,   88.66333008],
       [  63.70982361,  100.25119019,   89.48779297],
       [  63.60707855,  100.06730652,   90.31262207],
       [  63.46164322,   99.91001892,   91.13648224],
       [  63.26287842,   99.78648376,   91.95485687],
       [  63.03713226,   99.68377686,   92.76905823],
       [  62.81192398,   99.56619263,   93.58140564],
       [  62.57145309,   99.42708588,   94.38592529],
       [  62.32259369,   99.25592804,   95.18167114],
       [  62.07497787,   99.05770111,   95.97154236],
       [  61.82253647,   98.83877563,   96.7543869 ],
       [  61.59536743,   98.59293365,   97.5370636 ],
       [  61.46530151,   98.30503845,   98.32772827],
       [  61.39904785,   97.97928619,   99.11172485],
       [  61.33279419,   97.65353394,   99.89572906],
       [  61.26067352,   97.30914307,  100.67123413],
       [  61.19459534,   96.96743011,  101.44847107],
       [  61.1958046 ,   96.63417053,  102.23215485],
       [  61.26572037,   96.2988739 ,  103.01185608],
       [  61.39840698,   95.96297455,  103.78307343],
       [  61.5720787 ,   95.6426239 ,  104.55268097],
       [  61.78163528,   95.35540771,  105.32629395],
       [  62.06700134,   95.09746552,  106.08564758],
       [  62.39427185,   94.8572464 ,  106.83369446],
       [  62.74076462,   94.62278748,  107.57482147],
       [  63.11461639,   94.40107727,  108.30641937],
       [  63.53397751,   94.20418549,  109.02002716],
       [  64.00019836,   94.03809357,  109.71183777],
       [  64.43580627,   93.87523651,  110.42416382],
       [  64.84857941,   93.69993591,  111.14715576],
       [  65.26740265,   93.51858521,  111.86515808],
       [  65.69511414,   93.3671875 ,  112.58474731],
       [  66.10470581,   93.22719574,  113.31711578],
       [  66.45891571,   93.06028748,  114.07256317],
       [  66.78582001,   92.90560913,  114.84281921],
       [  67.11138916,   92.79004669,  115.6204071 ],
       [  67.44729614,   92.75711823,  116.40135193],
       [  67.75688171,   92.98265076,  117.16111755],
       [  68.02041626,   93.28012848,  117.91371155],
       [  68.25725555,   93.53466797,  118.69052124],
       [  68.46047974,   93.63263702,  119.51107788],
       [  68.62039948,   93.62007141,  120.34690094],
       [  68.76782227,   93.56475067,  121.18331909],
       [  68.90222168,   93.46326447,  122.01765442],
       [  68.99872589,   93.30039978,  122.84759521],
       [  69.04119873,   93.05428314,  123.66156769],
       [  69.05086517,   92.74394989,  124.45450592],
       [  69.02742004,   92.40427399,  125.23509979],
       [  68.95466614,   92.09059143,  126.02339935],
       [  68.84975433,   91.7967453 ,  126.81564331],
       [  68.72673798,   91.53726196,  127.61715698],
       [  68.6068573 ,   91.3030014 ,  128.42681885],
       [  68.50636292,   91.12481689,  129.25317383],
       [  68.39311218,   91.01572418,  130.08976746],
       [  68.25946808,   90.94654083,  130.92756653]], dtype=np.float32)

    pts = 12
    td = tm.downsample(t, pts)
    # print td
    assert_equal(len(td), pts)

    res = []
    t = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
    for pts in range(3, 200):
        td = tm.downsample(t, pts)
        res.append(pts-len(td))
    assert_equal(np.sum(res), 0)

    """
Exemplo n.º 43
0
def test_downsample():

    t = np.array([[82.20181274, 91.3650589, 43.15737152],
                  [82.3844223, 91.79336548, 43.87036514],
                  [82.48710632, 92.27861023, 44.56298065],
                  [82.53310394, 92.7854538, 45.24635315],
                  [82.53793335, 93.26902008, 45.94785309],
                  [82.48797607, 93.75003815, 46.6493988],
                  [82.35533142, 94.2518158, 47.32533264],
                  [82.15484619, 94.76634216, 47.97451019],
                  [81.90982819, 95.28792572, 48.6024437],
                  [81.63336945, 95.78153229, 49.23971176],
                  [81.35479736, 96.24868011, 49.89558792],
                  [81.08713531, 96.69807434, 50.56812668],
                  [80.81504822, 97.14285278, 51.24193192],
                  [80.52591705, 97.56719971, 51.92168427],
                  [80.26599884, 97.98269653, 52.61848068],
                  [80.0463562, 98.38131714, 53.3385582],
                  [79.8469162, 98.77052307, 54.06955338],
                  [79.57667542, 99.13599396, 54.78985596],
                  [79.23351288, 99.4320755, 55.51065063],
                  [78.84815979, 99.64141846, 56.24016571],
                  [78.47383881, 99.77347565, 56.9929924],
                  [78.12837219, 99.81330872, 57.76969528],
                  [77.80438995, 99.85082245, 58.55574799],
                  [77.4943924, 99.88065338, 59.34777069],
                  [77.21414185, 99.85343933, 60.15090561],
                  [76.96416473, 99.82772827, 60.96406937],
                  [76.74712372, 99.80519104, 61.78676605],
                  [76.52263641, 99.79122162, 62.60765076],
                  [76.03757477, 100.08692169, 63.24152374],
                  [75.44867706, 100.3526535, 63.79513168],
                  [74.78033447, 100.57255554, 64.272789],
                  [74.11605835, 100.7733078, 64.76428986],
                  [73.51222992, 100.98779297, 65.32373047],
                  [72.97387695, 101.23387146, 65.93502045],
                  [72.47355652, 101.49151611, 66.57343292],
                  [71.99834442, 101.72480774, 67.2397995],
                  [71.5690918, 101.98665619, 67.92664337],
                  [71.18083191, 102.29483795, 68.61888123],
                  [70.81879425, 102.63343048, 69.31127167],
                  [70.47422791, 102.98672485, 70.00532532],
                  [70.10092926, 103.28502655, 70.70999908],
                  [69.69512177, 103.51667023, 71.42147064],
                  [69.27423096, 103.71351624, 72.13452911],
                  [68.91260529, 103.81676483, 72.89796448],
                  [68.60788727, 103.81982422, 73.69258118],
                  [68.34162903, 103.7661972, 74.49915314],
                  [68.08542633, 103.70635223, 75.30856323],
                  [67.83590698, 103.60187531, 76.11553955],
                  [67.56822968, 103.4482193, 76.90870667],
                  [67.28399658, 103.25878906, 77.68825531],
                  [67.00117493, 103.03740692, 78.45989227],
                  [66.72718048, 102.80329895, 79.23099518],
                  [66.4619751, 102.54130554, 79.99622345],
                  [66.20803833, 102.22305298, 80.7438736],
                  [65.96872711, 101.88980865, 81.48987579],
                  [65.72864532, 101.59316254, 82.25085449],
                  [65.47808075, 101.33383942, 83.02194214],
                  [65.21841431, 101.11295319, 83.80186462],
                  [64.95678711, 100.94080353, 84.59326935],
                  [64.71759033, 100.82022095, 85.40114594],
                  [64.48053741, 100.73490143, 86.21411896],
                  [64.24304199, 100.65074158, 87.02709198],
                  [64.01773834, 100.55318451, 87.84204865],
                  [63.83801651, 100.41996765, 88.66333008],
                  [63.70982361, 100.25119019, 89.48779297],
                  [63.60707855, 100.06730652, 90.31262207],
                  [63.46164322, 99.91001892, 91.13648224],
                  [63.26287842, 99.78648376, 91.95485687],
                  [63.03713226, 99.68377686, 92.76905823],
                  [62.81192398, 99.56619263, 93.58140564],
                  [62.57145309, 99.42708588, 94.38592529],
                  [62.32259369, 99.25592804, 95.18167114],
                  [62.07497787, 99.05770111, 95.97154236],
                  [61.82253647, 98.83877563, 96.7543869],
                  [61.59536743, 98.59293365, 97.5370636],
                  [61.46530151, 98.30503845, 98.32772827],
                  [61.39904785, 97.97928619, 99.11172485],
                  [61.33279419, 97.65353394, 99.89572906],
                  [61.26067352, 97.30914307, 100.67123413],
                  [61.19459534, 96.96743011, 101.44847107],
                  [61.1958046, 96.63417053, 102.23215485],
                  [61.26572037, 96.2988739, 103.01185608],
                  [61.39840698, 95.96297455, 103.78307343],
                  [61.5720787, 95.6426239, 104.55268097],
                  [61.78163528, 95.35540771, 105.32629395],
                  [62.06700134, 95.09746552, 106.08564758],
                  [62.39427185, 94.8572464, 106.83369446],
                  [62.74076462, 94.62278748, 107.57482147],
                  [63.11461639, 94.40107727, 108.30641937],
                  [63.53397751, 94.20418549, 109.02002716],
                  [64.00019836, 94.03809357, 109.71183777],
                  [64.43580627, 93.87523651, 110.42416382],
                  [64.84857941, 93.69993591, 111.14715576],
                  [65.26740265, 93.51858521, 111.86515808],
                  [65.69511414, 93.3671875, 112.58474731],
                  [66.10470581, 93.22719574, 113.31711578],
                  [66.45891571, 93.06028748, 114.07256317],
                  [66.78582001, 92.90560913, 114.84281921],
                  [67.11138916, 92.79004669, 115.6204071],
                  [67.44729614, 92.75711823, 116.40135193],
                  [67.75688171, 92.98265076, 117.16111755],
                  [68.02041626, 93.28012848, 117.91371155],
                  [68.25725555, 93.53466797, 118.69052124],
                  [68.46047974, 93.63263702, 119.51107788],
                  [68.62039948, 93.62007141, 120.34690094],
                  [68.76782227, 93.56475067, 121.18331909],
                  [68.90222168, 93.46326447, 122.01765442],
                  [68.99872589, 93.30039978, 122.84759521],
                  [69.04119873, 93.05428314, 123.66156769],
                  [69.05086517, 92.74394989, 124.45450592],
                  [69.02742004, 92.40427399, 125.23509979],
                  [68.95466614, 92.09059143, 126.02339935],
                  [68.84975433, 91.7967453, 126.81564331],
                  [68.72673798, 91.53726196, 127.61715698],
                  [68.6068573, 91.3030014, 128.42681885],
                  [68.50636292, 91.12481689, 129.25317383],
                  [68.39311218, 91.01572418, 130.08976746],
                  [68.25946808, 90.94654083, 130.92756653]],
                 dtype=np.float32)

    pts = 12
    td = tm.downsample(t, pts)
    # print td
    assert_equal(len(td), pts)

    res = []
    t = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
    for pts in range(3, 200):
        td = tm.downsample(t, pts)
        res.append(pts - len(td))
    assert_equal(np.sum(res), 0)
    """
Exemplo n.º 44
0
    t1 = time()
    C5 = pf.local_skeleton_clustering(T2, .5)
    t2 = time()
    print t2 - t1
    print len(C5)

    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError, e:
        raise nose.plugins.skip.SkipTest('Fails to import dipy.viz due to %s' %
                                         str(e))

    streams, hdr = tv.read(get_data('fornix'))
    T3 = [tm.downsample(s[0], 6) for s in streams]

    print 'lenT3', len(T3)

    C = pf.local_skeleton_clustering(T3, 10.)

    print 'lenC', len(C)
    """
    
    r=fvtk.ren()
    colors=np.zeros((len(C),3))
    for c in C:
        color=np.random.rand(3)
        for i in C[c]['indices']:
            fvtk.add(r,fvtk.line(T3[i],color))
        colors[c]=color