Example #1
0
def skeletonize(fdpy,flsc,points=3):

    dpr=Dpy(fdpy,'r')    
    T=dpr.read_tracks()
    dpr.close()    
    print len(T)
    Td=[downsample(t,points) for t in T]
    C=local_skeleton_clustering(Td,d_thr=10.,points=points)    
    #Tobject=np.array(T,dtype=np.object)
    

    #'''
    #r=fvtk.ren()    
    skeleton=[]    
    for c in C:
        #color=np.random.rand(3)
        if C[c]['N']>0:
            Ttmp=[]
            for i in C[c]['indices']:
                Ttmp.append(T[i])
            si,s=most_similar_track_mam(Ttmp,'avg')
            print si,C[c]['N']    
            C[c]['most']=Ttmp[si]            
            #fvtk.add(r,fvtk.line(Ttmp[si],color))            
    print len(skeleton)
    #r=fos.ren()
    #fos.add(r,fos.line(skeleton,color))    
    #fos.add(r,fos.line(T,fos.red))    
    #fvtk.show(r)
    #'''
    
    save_pickle(flsc,C)
Example #2
0
def spherical_rois(fdpy,fsr,sq_radius=4):    
    
    
    R=atlantic_points()    
    dpr=Dpy(fdpy,'r')
    T=dpr.read_tracks()
    dpr.close()
    
    center=R['BCC']
    
    refimg=nib.load(fref)
    aff=refimg.get_affine()
    
    SR={}
    
    for key in R:
        
        center=R[key]
        #back to world space
        centerw=np.dot(aff,np.array(center+(1,)))[:3]        
        centerw.shape=(1,)+centerw.shape   
        centerw=centerw.astype(np.float32)
    
        res= [track_roi_intersection_check(t,centerw,sq_radius) for t in T]
        res= np.array(res,dtype=np.int)
        ind=np.where(res>0)[0]
        
        SR[key]={}
        SR[key]['center']=center
        SR[key]['centerw']=tuple(np.squeeze(centerw))
        SR[key]['radiusw']=np.sqrt(sq_radius)
        SR[key]['indices']=ind
        
    
    save_pickle(fsr,SR)
Example #3
0
    def bloat_bundle(self,tracks):

        tracksn=[]

        #c=np.array([0,0,0],np.float32)

        import dipy.core.track_performance as pf

        #c=tracks[0][-4]

        ct=tracks[pf.most_similar_track_mam(tracks,'avg')[0]]
        
        c=ct[len(ct)/2]

        #tracks2=[tm.downsample(t,200) for t in tracks]

        for X in tracks2:

            Xn = X + np.multiply(10/np.sum((X-c)**2,axis=1).reshape(len(X),1),X-c)
            #Xn = X + 2*(X-c)

            tracksn.append(Xn)


        from dipy.io import pickles as pkl

        pkl.save_pickle('bundles.pkl',{'init':tracks[10],'after':tracksn[10]})

        
        
        

        return tracksn
Example #4
0
def a_few_phantoms():
    
#fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00

    table={}
    
    #sd=['fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
    #sd=['fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
    
    sd=['fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
    
    #for simfile in simdata:
    for simfile in sd:
        
        data=np.loadtxt(simdir+simfile)
        sf=simfile.split('_')
        print sf        
        print sf[1],sf[3],sf[5],sf[7],sf[9],sf[11],sf[13],sf[15]
        
        b_vals_dirs=np.loadtxt(simdir+'Dir_and_bvals_DSI_marta.txt')
        bvals=b_vals_dirs[:,0]*1000
        gradients=b_vals_dirs[:,1:]
        
        data2=data[::1000,:]                
        
        table={'fibres':sf[1],'snr':sf[3],'angle':sf[5],'l1':sf[7],'l2':sf[9],\
               'l3':sf[11],'iso':sf[13],'diso':sf[15],\
               'data':data2,'bvals':bvals,'gradients':gradients}
        
        #print table
        print table['data'].shape        
        pkl.save_pickle('test0.pkl',table)
        break
Example #5
0
def gq_tn_calc_save():

    for simfile in simdata:
    
        dataname = simfile
        print dataname

        sim_data=np.loadtxt(simdir+dataname)

        marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
        b_vals_dirs=np.loadtxt(marta_table_fname)
        bvals=b_vals_dirs[:,0]*1000
        gradients=b_vals_dirs[:,1:]

        gq = dgqs.GeneralizedQSampling(sim_data,bvals,gradients)
        gqfile = simdir+'gq/'+dataname+'.pkl'
        pkl.save_pickle(gqfile,gq)

        '''
        gq.IN               gq.__doc__          gq.glob_norm_param
        gq.QA               gq.__init__         gq.odf              
        gq.__class__        gq.__module__       gq.q2odf_params
        '''

        tn = ddti.Tensor(sim_data,bvals,gradients)
        tnfile = simdir+'tn/'+dataname+'.pkl'
        pkl.save_pickle(tnfile,tn)


        '''
        tn.ADC               tn.__init__          tn._getevals
        tn.B                 tn.__module__        tn._getevecs
        tn.D                 tn.__new__           tn._getndim
        tn.FA                tn.__reduce__        tn._getshape
        tn.IN                tn.__reduce_ex__     tn._setevals
        tn.MD                tn.__repr__          tn._setevecs
        tn.__class__         tn.__setattr__       tn.adc
        tn.__delattr__       tn.__sizeof__        tn.evals
        tn.__dict__          tn.__str__           tn.evecs
        tn.__doc__           tn.__subclasshook__  tn.fa
        tn.__format__        tn.__weakref__       tn.md
        tn.__getattribute__  tn._evals            tn.ndim
        tn.__getitem__       tn._evecs            tn.shape
        tn.__hash__          tn._getD             
        '''

        ''' file  has one row for every voxel, every voxel is repeating 1000
def gq_tn_calc_save():

    for simfile in simdata:
    
        dataname = simfile
        print dataname

        sim_data=np.loadtxt(simdir+dataname)

        marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
        b_vals_dirs=np.loadtxt(marta_table_fname)
        bvals=b_vals_dirs[:,0]*1000
        gradients=b_vals_dirs[:,1:]

        gq = dp.GeneralizedQSampling(sim_data,bvals,gradients)
        gqfile = simdir+'gq/'+dataname+'.pkl'
        pkl.save_pickle(gqfile,gq)

        '''
        gq.IN               gq.__doc__          gq.glob_norm_param
        gq.QA               gq.__init__         gq.odf              
        gq.__class__        gq.__module__       gq.q2odf_params
        '''

        tn = dp.Tensor(sim_data,bvals,gradients)
        tnfile = simdir+'tn/'+dataname+'.pkl'
        pkl.save_pickle(tnfile,tn)


        '''
        tn.ADC               tn.__init__          tn._getevals
        tn.B                 tn.__module__        tn._getevecs
        tn.D                 tn.__new__           tn._getndim
        tn.FA                tn.__reduce__        tn._getshape
        tn.IN                tn.__reduce_ex__     tn._setevals
        tn.MD                tn.__repr__          tn._setevecs
        tn.__class__         tn.__setattr__       tn.adc
        tn.__delattr__       tn.__sizeof__        tn.evals
        tn.__dict__          tn.__str__           tn.evecs
        tn.__doc__           tn.__subclasshook__  tn.fa
        tn.__format__        tn.__weakref__       tn.md
        tn.__getattribute__  tn._evals            tn.ndim
        tn.__getitem__       tn._evecs            tn.shape
        tn.__hash__          tn._getD             
        '''

        ''' file  has one row for every voxel, every voxel is repeating 1000
def main():

	parser = argparse.ArgumentParser(
	 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
	 description='Given the DWI data, the corresponding b-value and b-vecs files\n\
	              and the type of fODF model. This scripts\n\
	              computes the fODF for every voxel.'
	)
	parser.add_argument('-d', '--dwi', required=True, metavar='<dwi>',
					    help='Path to the diffusion weighted image file (4D in NIFTI format).')
	parser.add_argument('-bvals', required=True, metavar='<bvals>',
					    help='Path to the b-value file (FSL format).')
	parser.add_argument('-bvecs', required=True, metavar='<bvecs>',
					    help='Path to the b-vectors file (FSL format).')
	parser.add_argument('-m','--mask', metavar='<mask_file>',
					    help='Path to the brain mask file.')
	parser.add_argument('-t', '--type', metavar='<type>', default='dti', choices=['dti','csd','csa'],
					    help='The type of the fODF model (default dti).')
	parser.add_argument('-b0', metavar='<b0>', type=float, default=0.,
					    help='Threshold to use for defining b0 images.')
	parser.add_argument('--order', type=int, metavar='<order>', default=4,
					    help='Order of fODF (not used for DTI).')
	parser.add_argument('-o','--output', required=True, metavar='<output>',
					    help='Specifies the output file.')
	
	#parse command line arguments
	args = parser.parse_args()
	#read DWI
	img = nib.load(args.dwi)
	data = img.get_data()
	#read mask
	if args.mask:
		img = nib.load(args.mask)
		mask = img.get_data().astype(bool)
		mask = np.logical_not(mask)
		#apply mask to data
		data[mask,:] = 0
	sys.stdout.write('Data read.\n')
	
	start = time()
	#fit the model
	if args.mask:
		model_fit = reconstruction(data, args.bvals, args.bvecs, mask=np.logical_not(mask), type=args.type, b0=args.b0, order=args.order)
	else:
		model_fit = reconstruction(data, args.bvals, args.bvecs, type=args.type, b0=args.b0, order=args.order)
	sys.stdout.write('Fitted %s model with order %i in %.2f sec.\n' % (args.type,args.order,time()-start))
	sys.stdout.flush()
	#save the fODF fit, depends on chosen model
	if args.type == 'dti':
		save_pickle(args.output+'.fit',model_fit.model_params)
	elif args.type == 'csd':
		save_pickle(args.output+'.fit',model_fit.fit_array)
	elif args.type == 'csa':
		save_pickle(args.output+'.fit',model_fit.shm_coeff)
	#save the model
	save_pickle(args.output+'.model',model_fit.model)
Example #8
0
def plot_timings():
    
    #dir='/home/eg309/Data/LSC_limits/full_1M'
    dir='/tmp/full_1M'
    fs=['.npy','_2.npy','_3.npy','_4.npy','_5.npy']#,'_6.npy','_7.npy','_8.npy','_9.npy','_10.npy']
    
    T=[]
    for f in fs:
        fs1=dir+f
        T+=change_dtype(list(np.load(fs1)))
    #return T
    T=T[:100000]
    
    print len(T)    
    #dists=[4.,6.,8.,10.]
    dists=[8.]
    #pts=[3,6,12,18]
    pts=[12]
    #sub=10**5
    sub=10**3
    res={}
    for p in pts:
        print p
        res[p]={}
        for d in dists:
            print d
            res[p][d]={}
            res[p][d]['time']=[]
            res[p][d]['len']=[]
            step=0
            while step <= len(T):
                print step
                Td=[downsample(t,p) for t in T[0:step+sub]]
                t1=time()
                C=local_skeleton_clustering(Td,d)
                t2=time()
                res[p][d]['time'].append(t2-t1)
                res[p][d]['len'].append(len(C))       
                step=step+sub
    
    
    save_pickle('/tmp/res.pkl',res)
    print('Result saved in /tmp/res.pkl')
    #return res
    save_pickle('/tmp/res.pkl',res)
Example #9
0
def gq_tn_calc_save():

    for simfile in simdata:    
        dataname = simfile
        print dataname
        sim_data=np.loadtxt(simdir+dataname)
        marta_table_fname=simdir+'Dir_and_bvals_DSI_marta.txt'
        b_vals_dirs=np.loadtxt(marta_table_fname)
        bvals=b_vals_dirs[:,0]*1000
        gradients=b_vals_dirs[:,1:]
        gq = GeneralizedQSampling(sim_data,bvals,gradients)
        gqfile = '/tmp/gq'+dataname+'.pkl'
        pkl.save_pickle(gqfile,gq)
        tn = Tensor(sim_data,bvals,gradients)
        tnfile = '/tmp/tn'+dataname+'.pkl'
        pkl.save_pickle(tnfile,tn)

        ''' file  has one row for every voxel, every voxel is repeating 1000
Example #10
0
def training_check(dres, prefix):

    seeds_per_vox = 5

    num_of_cpus = 6

    cmd = 'python ~/Devel/scilpy/scripts/stream_local.py -odf ' + dres + prefix + 'odf_sh.nii.gz -m data/training-data_mask.nii.gz -s data/training-data_rois.nii.gz -n -' + str(
        seeds_per_vox) + ' -process ' + str(num_of_cpus) + ' -o ' + dres + prefix + 'streams.trk'

    pipe(cmd)

    mat, conn_mats, diffs, ratio = streams_to_connmat(dres + prefix + 'streams.trk', seeds_per_vox)

    save_pickle(dres + prefix + 'conn_mats.pkl', {'mat': mat, 'conn_mats': conn_mats, 'diffs': diffs, 'ratio':ratio})

    print dres + prefix + 'conn_mats.pkl'

    return diffs
Example #11
0
def load_pbc_data(id=None):
    if id is None:
        path = "/home/eg309/Data/PBC/pbc2009icdm/brain1/"
        streams, hdr = tv.read(path + "brain1_scan1_fiber_track_mni.trk")
        streamlines = [s[0] for s in streams]
        return streamlines
    if not osp.exists("/tmp/" + str(id) + ".pkl"):
        path = "/home/eg309/Data/PBC/pbc2009icdm/brain1/"
        streams, hdr = tv.read(path + "brain1_scan1_fiber_track_mni.trk")
        streamlines = [s[0] for s in streams]
        labels = np.loadtxt(path + "brain1_scan1_fiber_labels.txt")
        labels = labels[:, 1]
        mask_cst = labels == id
        cst_streamlines = [s for (i, s) in enumerate(streamlines) if mask_cst[i]]
        save_pickle("/tmp/" + str(id) + ".pkl", cst_streamlines)
        return cst_streamlines
        # return [approx_polygon_track(s, 0.7853) for s in cst_streamlines]
    else:
        return load_pickle("/tmp/" + str(id) + ".pkl")
Example #12
0
def generate_skeletons():

    img=nib.load(fbet)
    data=img.get_data()
    affine=img.get_affine()
    bvals=np.loadtxt(fbvals)
    bvecs=np.loadtxt(fbvecs).T
    t=time()
    gqs=GeneralizedQSampling(data,bvals,bvecs)
    print 'gqs time',time()-t,'s'
    for (i,sds) in enumerate(seeds):
        print i,sds
        t=time()
        eu=EuDX(gqs.qa(),gqs.ind(),seeds=sds,a_low=0.0239)
        T=[downsample(e,12) for e in eu]
        #np.save(dout+outs[i]+'.npy',np.array(T,dtype=np.object))
        C=local_skeleton_clustering(T,4.)
        save_pickle(dout+outs[i]+'.skl',C)
        print time()-t
        del T
    print outs
Example #13
0
def save_id_tract_plus_sff(tracks_filename, id_file, num_proto, distance, out_fname):
   
    if (tracks_filename[-3:]=='dpy'):
        dpr_tracks = Dpy(tracks_filename, 'r')
        all_tracks=dpr_tracks.read_tracks()
        dpr_tracks.close()
    else:
        all_tracks = load_whole_tract_trk(tracks_filename)
    
    tracks_id = load_pickle(id_file)
    	
    tract = [all_tracks[i] for i  in tracks_id]    
    
    not_tract_fil = []
    id_not_tract_fil = []
    min_len = min(len(i) for i in tract)
    #print 'min_len of cst', min_len
    min_len = min_len*2.2/3#2./3.2# - 20
    for i in np.arange(len(all_tracks)):
        if (i not in tracks_id) and (length(all_tracks[i]) > min_len):
            not_tract_fil.append(all_tracks[i])
            id_not_tract_fil.append(i)
    
    not_tract_fil = np.array(not_tract_fil,dtype=np.object)        
    sff_pro_id = sff(not_tract_fil, num_proto, distance)        
    
    tract_sff_id = []
    for i in tracks_id:
        tract_sff_id.append(i)
        
    for idx in sff_pro_id:        
        tract_sff_id.append(id_not_tract_fil[idx])
        
    #tract_sff_id.append(id_not_tract_fil[i] for i in sff_pro_id)
    print len(tract), len(tract_sff_id)
    save_pickle(out_fname, tract_sff_id)
    return tract_sff_id
Example #14
0
def save_id_tract_plus_sff_in_ext(tracks_filename, id_file, num_proto, distance,  out_fname_ext , out_fname_sff_in_ext, thres_len= 2.2/3., thres_vol = 1.4 , thres_dis = 3./2.):
    
    
    tract_ext_id = save_id_tract_ext1(tracks_filename,id_file, distance, out_fname_ext, thres_len, thres_vol , thres_dis)
    
    if (tracks_filename[-3:]=='dpy'):
        dpr_tracks = Dpy(tracks_filename, 'r')
        all_tracks=dpr_tracks.read_tracks()
        dpr_tracks.close()
    else:
        all_tracks = load_whole_tract_trk(tracks_filename)
    
    tracks_id = load_pickle(id_file)
    	
    ext_not_tract_id = []
    ext_not_tract = []
    for idx in tract_ext_id:
        if idx not in tracks_id:
            ext_not_tract.append(all_tracks[idx])
            ext_not_tract_id.append(idx)
        
          
    ext_not_tract = np.array(ext_not_tract,dtype=np.object)        
    sff_pro_id = sff(ext_not_tract, num_proto, distance)        
    
    tract_sff_in_ext_id = []
    for i in tracks_id:
        tract_sff_in_ext_id.append(i)
        
    for k in sff_pro_id:        
        tract_sff_in_ext_id.append(ext_not_tract_id[k])
        
    #tract_sff_id.append(id_not_tract_fil[i] for i in sff_pro_id)
    print len(tracks_id), len(tract_sff_in_ext_id), len(tract_ext_id)
    save_pickle( out_fname_sff_in_ext, tract_sff_in_ext_id)
    return tract_sff_in_ext_id
Example #15
0
def multiple_comparisons(T,samplesize = 10**4, lscdist = 4., replications = 2,subj='1'):
    labels1 = np.random.permutation(np.arange(len(T)))[:samplesize]
    #labels1 = np.arange(0*samplesize,1*samplesize)
    T1=T[labels1]
    print 'Number of tracks is %d' % (len(T1))
    lists = {}
    C = {}
    results = {}
    C_size = {}

    for replication in np.arange(replications):
        print '... preparing LSC(%d)' % (replication)
        rearrangement = np.random.permutation(np.arange(samplesize))
        #print '... min %d, max %d, len %d' % (np.min(rearrangement), np.max(rearrangement), len(rearrangement))
        rearrangement = list(rearrangement)
        C[replication] = local_skeleton_clustering(T1[rearrangement],lscdist)
        print '... skeleton size %d' % (len(C[replication]))
        lists[replication] = rearrangement
        C_size[replication] = len(C[replication])

    save_pickle('labels'+str(samplesize)+'_'+subj+'.pkl',labels1)
    save_pickle('C'+str(samplesize)+'_'+subj+'.pkl',C)
    save_pickle('lists'+str(samplesize)+'_'+subj+'.pkl',lists)
    save_pickle('C_size'+str(samplesize)+'_'+subj+'.pkl',C_size)

    for rep1 in np.arange(replications-1):
        for rep2 in np.arange(rep1+1,replications):
            #report_comparisons(C[rep1],lists[rep1],C[rep2],lists[rep2])
            print 'comparing %d and %d' % (rep1,rep2)
            results[(rep1,rep2)] = return_comparisons(C[rep1],lists[rep1],C[rep2],lists[rep2])

    '''
    labels2 = np.arange(1*samplesize,2*samplesize)
    T2=T[labels2]
    list21 = np.random.permutation(np.arange(samplesize))
    C21=local_skeleton_clustering(T2[list21],lscdist)
    list22 = np.random.permutation(np.arange(samplesize))
    C22=local_skeleton_clustering(T2[list22],lscdist)
    '''
    #print 'C11 vs C12'
    '''
    print 'C21 vs C22'
    report_comparisons(C21,list21,C22,list22)
    '''
    return results
        print base_dir + "../MPRAGE_1/T1_flirt_out.nii.gz"
        img = nib.load(base_dir + "../MPRAGE_1/T1_flirt_out.nii.gz")
        data = img.get_data()

        for i in range(len(tracks_filename_arr)):
            print ">>>>"
            print base_dir2 + tracks_filename_arr[i]
            tracks = load_tracks(base_dir2 + tracks_filename_arr[i])
            print len(tracks)
            # tracks = [downsample(t, 12) - np.array(data.shape[:3])/2. for t in tracks]
            tracks = [downsample(t, 12) - np.array(data.shape) / 2.0 for t in tracks]
            # shift in the center of the volume
            # tracks=[t-np.array(data.shape)/2. for t in tracks]
            # 1/0
            print base_dir2 + qb_filename_15[i]
            qb = QuickBundles(tracks, 15.0, 12)
            save_pickle(base_dir2 + qb_filename_15[i], qb)

            print base_dir2 + qb_filename_20[i]
            qb = QuickBundles(tracks, 20.0, 12)
            save_pickle(base_dir2 + qb_filename_20[i], qb)

            print base_dir2 + qb_filename_30[i]
            qb = QuickBundles(tracks, 30.0, 12)
            save_pickle(base_dir2 + qb_filename_30[i], qb)
        # ================ quick bundles ==============================================

        print ("Done")
        print (linear_filename)
        # stop
Example #17
0
scene.SetBackground(1, 1, 1)
scene.add(actor.streamtube(streamlines, window.colors.white))
window.record(scene, out_path=outpath + 'fornix_initial.png', size=(600, 600))
if interactive:
    window.show(scene)

colormap = actor.create_colormap(np.arange(len(clusters)))

scene.clear()
scene.SetBackground(1, 1, 1)
scene.add(actor.streamtube(streamlines, window.colors.white, opacity=0.05))
scene.add(actor.streamtube(clusters.centroids, colormap, linewidth=0.4))
window.record(scene,
              out_path=outpath + 'fornix_centroids.png',
              size=(600, 600))
if interactive:
    window.show(scene)

colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
    colormap_full[cluster.indices] = color

scene.clear()
scene.SetBackground(1, 1, 1)
scene.add(actor.streamtube(streamlines, colormap_full))
window.record(scene, out_path=outpath + 'fornix_clusters.png', size=(600, 600))
if interactive:
    window.show(scene)

save_pickle(outpath + 'QB.pkl', clusters)
    fvtk.label(r, text=str(len(bundle)), pos=(bundle[si][-1]), scale=(2, 2, 2))

fvtk.add(r, fvtk.line(skeleton, colors, opacity=1))
#fvtk.show(r)
fvtk.record(r, n_frames=1, out_path='fornix_most', size=(600, 600))
"""
.. figure:: fornix_most1000000.png
   :align: center

   **Showing skeleton with the most representative tracks as the skeletal representation**.
   
   The numbers are depicting the number of tracks in each cluster. This is a very compact way to see the underlying
   structures an alternative would be to draw the representative tracks with different widths.
   
"""
"""
Save the skeleton information in the dictionary. Now try to play with different thresholds LSC and check the different results.
Try it with your datasets and gives us some feedback.

"""

for (i, c) in enumerate(C):
    C[c]['most'] = skeleton[i]

for c in C:
    print('Keys in bundle %d' % c)
    print(C[c].keys())
    print('Shape of skeletal track (%d, %d) ' % C[c]['most'].shape)

pkl.save_pickle('skeleton_fornix.pkl', C)
#tracks=tracks[:1000]

#print 'Deleting unnecessary data...'
del streams#,hdr

if not os.path.isfile(C_fname):

    print 'Starting LARCH ...'
    tim=time.clock()
    C,atracks=tl.larch(tracks,[50.**2,20.**2,5.**2],True,True)
    #tracks=[tm.downsample(t,3) for t in tracks]
    #C=pf.local_skeleton_clustering(tracks,20.)
    print 'Done in total of ',time.clock()-tim,'seconds.'

    print 'Saving result...'
    pkl.save_pickle(C_fname,C)
    
    streams=[(i,None,None)for i in atracks]
    tv.write(appr_fname,streams,hdr)

else:

    print 'Loading result...'
    C=pkl.load_pickle(C_fname)

skel=[]
for c in C:
    skel.append(C[c]['repz'])
    
print 'Showing dataset after clustering...'
r=fos.ren()
    
def loss_function(mapping12):
    """Computes the loss function of a given mapping.

    This is the 'energy_function' of simulated annealing.
    """
    global dm1, dm2
    loss = np.linalg.norm(dm1[np.triu_indices(size1)] - dm2[mapping12[:,None], mapping12][np.triu_indices(size1)])
    return loss
    
#mapping12_coregistration_1nn, loss_coregistration_1nn, mapping12_best, energy_best = tracts_mapping1(tractography1, tractography2, loss_function,neighbour4, iterations_anneal, pre_map_fn)
mapping12_coregistration_1nn, loss_coregistration_1nn, mapping12_best, energy_best = tracts_mapping(tractography1, tractography2, loss_function,neighbour4, iterations_anneal)
print "Best enegery of annealing: ", energy_best

from dipy.io.pickles import save_pickle
save_pickle(map_best_fn,mapping12_best)
save_pickle(map_1nn_fn,mapping12_coregistration_1nn)
print 'Saved ', map_best_fn
print 'Saved ', map_1nn_fn
'''
#visualize source and mapped source - red and blue
ren3 = fvtk.ren()
ren3 = visualize_source_mappedsource(ren3, tractography1[:- num_pro], tractography2, mapping12_best[:-num_pro])
fvtk.show(ren3)

#visualize target cst and mapped source cst - yellow and blue
ren4 = fvtk.ren()
target_cst_only = load_tract(t_file,t_cst)
ren4 = visualize_tract(ren4, target_cst_only, fvtk.yellow)
ren4 = visualize_mapped(ren4, tractography2, mapping12_best[:- num_pro], fvtk.blue)
fvtk.show(ren4)
'''
marta_table_fname = '/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
sim_data = np.loadtxt(fname)
#bvalsf='/home/eg01/Data_Backup/Data/Marta/DSI/SimData/bvals101D_float.txt'

b_vals_dirs = np.loadtxt(marta_table_fname)

bvals = b_vals_dirs[:, 0] * 1000
gradients = b_vals_dirs[:, 1:]

gq = dp.GeneralizedQSampling(sim_data, bvals, gradients)
tn = dp.Tensor(sim_data, bvals, gradients)
#'''

gqfile = '/home/ian/Data/SimData/gq_SNR030_1fibre.pkl'
pkl.save_pickle(gqfile, gq)
tnfile = '/home/ian/Data/SimData/tn_SNR030_1fibre.pkl'
pkl.save_pickle(tnfile, tn)
'''
print tn.evals.shape
print tn.evecs.shape

evals=tn.evals[0]
evecs=tn.evecs[0]

print evecs.shape 

first_directions = tn.evecs[:,:,0]
first1000 = first_directions[:1000,:]
cross = np.dot(first1000.T,first1000)
np.linalg.eig(cross)
Example #22
0
    img_mask = nib.load(brain_mask_file)
    img, gtab = dwi.get_dwi_img_gtab(dwi_data_file, dwi_bval_file,
                                     dwi_bvec_file)
    data_mask = img_mask.get_data()
    affine = img.get_affine()
    data = img.get_data()

    model = TensorModel(gtab)
    ten = model.fit(data, mask=data_mask)
    sphere = get_sphere('symmetric724')
    ind = quantize_evecs(ten.evecs, sphere.vertices)

    p = Struct()
    p.affine = affine
    p.sphere = sphere
    p.ten = ten
    p.ind = ind
    return p


if __name__ == '__main__':
    import sys
    print(sys.argv)
    if len(sys.argv) == 5:
        brain_mask = sys.argv[1]
        dwi_data = sys.argv[2]
        dwi_bval = sys.argv[3]
        dwi_bvec = sys.argv[4]
        outp = track_gen_model(brain_mask, dwi_data, dwi_bval, dwi_bvec)
        pickles.save_pickle('track_gen_model.pickle', outp)
    for i in np.arange(size1):
        print 'sum row ', i , np.sum(prb_map12[i,:])

    #compare to mapping results
    if t==0:
        pre_map12 = np.copy(prb_map12_init)            
    norm = np.linalg.norm(prb_map12 - pre_map12)
    print "Norm of results - with previous: ", norm
    pre_map12 = np.copy(prb_map12)
    
         
    #print L      
    plot_smooth(plt, np.arange(len(L)), L, False)       
    
    if save:
        save_pickle(map_prob, prb_map12)
    
plt.title('Loss function ')  
plt.xlabel('Gradient evaluations')  
if save:
    #plt.savefig(os.path.join(os.path.curdir, 'objective_function_'+ str(num_pro) + '_' + str(num_pro) + '_sparse_density_' + str(nearest) + '_neighbors.pdf'))
    plt.savefig(obj_func_file)
plt.show()

#end of optimize with slsqp





   
    for data_id in data_ids:
        print 'Subject/Control: ', data_id
        
        #data = load_data('big_dataset',data_id)   

        #X = compute_disimilarity(data, bundles_distances_mam, 'random', num_prototype,len(data))        
        
        #file_name_dis = 'Results/'+str(data_id)+'/'+str(data_id)+'_data_disimilarity_full_tracks_' + str(num_prototype) + '_prototyes_random_modified_ward_full_tree_130524.dis'            
        file_name_dis = 'Results/'+str(data_id)+'/'+str(data_id)+'_data_disimilarity_full_tracks_40_prototyes_random_130516.dis'        
        #save_pickle(file_name_dis,X)        
        #print 'Saving data_disimilarity: ',file_name_dis,' - done'
        X = load_pickle(file_name_dis)

        for num_neigh in num_neighbors:                    
            print "\tGenerating at ", num_neigh, " neighbor"                
            
            file_name = str(data_id)+'_full_tracks_' + str(num_neigh) + '_neighbors_modified_ward_full_tree_130516_new.tree'            
            connectivity = kneighbors_graph(X, n_neighbors=num_neigh)                          
            
            st = cpu_time()#time.clock()
            ward = Ward(n_clusters=num_cluster, compute_full_tree=True, connectivity=connectivity).fit(X)
            #t = time.clock() - st
            t = cpu_time() - st
            
            #-----------------------------------------------------------------------------
            # saving the result                       
            save_pickle(file_name,ward)
            print '\tSaving tree: ',file_name,' - done'
            
            
'''
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
sim_data=np.loadtxt(fname)
#bvalsf='/home/eg01/Data_Backup/Data/Marta/DSI/SimData/bvals101D_float.txt'

b_vals_dirs=np.loadtxt(marta_table_fname)

bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]

gq = dp.GeneralizedQSampling(sim_data,bvals,gradients)
tn = dp.Tensor(sim_data,bvals,gradients)
#'''

gqfile = '/home/ian/Data/SimData/gq_SNR030_1fibre.pkl'
pkl.save_pickle(gqfile,gq)
tnfile = '/home/ian/Data/SimData/tn_SNR030_1fibre.pkl'
pkl.save_pickle(tnfile,tn)


'''
print tn.evals.shape
print tn.evecs.shape

evals=tn.evals[0]
evecs=tn.evecs[0]

print evecs.shape 

first_directions = tn.evecs[:,:,0]
first1000 = first_directions[:1000,:]
Example #26
0
def save_id_tract_ext1(tracks_filename, id_file,  distance, out_fname, thres_len= 2.2/3., thres_vol = 1.2 , thres_dis = 2.8/2.):
    
    print thres_len, thres_vol, thres_dis
    if (tracks_filename[-3:]=='dpy'):
        dpr_tracks = Dpy(tracks_filename, 'r')
        all_tracks=dpr_tracks.read_tracks()
        dpr_tracks.close()
    else:
        all_tracks = load_whole_tract_trk(tracks_filename)    
    
    
    tracks_id = load_pickle(id_file)
    	
    tract = [all_tracks[i] for i  in tracks_id]    
    
    not_tract_fil = []
    id_not_tract_fil = []
    min_len = min(len(i) for i in tract)
    #print 'min_len of cst', min_len
    min_len = min_len*thres_len
    
    for i in np.arange(len(all_tracks)):
        if (i not in tracks_id) and (length(all_tracks[i]) > min_len):
            not_tract_fil.append(all_tracks[i])
            id_not_tract_fil.append(i)
       
    k = np.round(len(tract) * thres_vol  )     
            
    from dipy.segment.quickbundles import QuickBundles
    
    qb = QuickBundles(tract,200,18)
    
    medoid_tract = qb.centroids[0]
    
    med_nottract_dm =  distance([medoid_tract], not_tract_fil)
    med_tract_dm =  distance([medoid_tract], tract)
    
    tract_rad = med_tract_dm[0][np.argmax(med_tract_dm[0])]
    len_dis = tract_rad * thres_dis# 2.8/2.
   
    #k_indices which close to the medoid
    sort = np.argsort(med_nottract_dm,axis = 1)[0]
    #print sort[:k+1]
    while (k>0 and med_nottract_dm[0][sort[k]]>=len_dis):
        k = k - 1
        
    
    #print k
    close_indices = sort[0:k]
    
    #for idx in close_indices:
    #    tract_ext.append(not_tract_fil[idx])          
    #print 'close indices', len(close_indices)
    tract_ext_id = []
    for i in tracks_id:
         tract_ext_id.append(i)
    
    #print 'Before', len(tract_ext_id)
    
    for idx in close_indices:
        tract_ext_id.append(id_not_tract_fil[idx]) 
    #    print idx, id_not_tract_fil[idx]
      
    #print 'After', len(tract_ext_id)
    #tract_ext_id = [i for i in tracks_id]
    #tract_ext_id.append(id_not_tract_fil[i] for i in close_indices)
    
    save_pickle(out_fname, tract_ext_id)
    return tract_ext_id
Example #27
0
"""
.. figure:: fornix_most1000000.png
   :align: center

   **Showing skeleton with the most representative tracks as the skeletal representation**.
   
   The numbers are depicting the number of tracks in each cluster. This is a very compact way to see the underlying
   structures an alternative would be to draw the representative tracks with different widths.
   
"""

"""
Save the skeleton information in the dictionary. Now try to play with different thresholds LSC and check the different results.
Try it with your datasets and gives us some feedback.

"""

for (i,c) in enumerate(C):    
    C[c]['most']=skeleton[i]
    
for c in C:    
    print('Keys in bundle %d' % c)
    print(C[c].keys())
    print('Shape of skeletal track (%d, %d) ' % C[c]['most'].shape)

pkl.save_pickle('skeleton_fornix.pkl',C)




Example #28
0
"""

"""
Show the labeled *Fornix* (colors from centroids):
"""

colormap_full = np.ones((len(tracks), 3))
for i, centroid in enumerate(centroids):
    inds=qb.label2tracksids(i)
    colormap_full[inds]=colormap[i]
fvtk.add(r, fvtk.line(tracks, colormap_full, opacity=1., linewidth=3))


#fvtk.show(r)
fvtk.record(r,n_frames=1,out_path='fornix_clust',size=(600,600))

"""
.. figure:: fornix_clust1000000.png
   :align: center

   **Showing the different clusters with random colors**.

"""

"""
It is also possible to save the QuickBundles object with pickling.
"""
save_pickle('QB.pkl',qb)


#tracks=tracks[:1000]

#print 'Deleting unnecessary data...'
del streams  #,hdr

if not os.path.isfile(C_fname):

    print 'Starting LARCH ...'
    tim = time.clock()
    C, atracks = tl.larch(tracks, [50.**2, 20.**2, 5.**2], True, True)
    #tracks=[tm.downsample(t,3) for t in tracks]
    #C=pf.local_skeleton_clustering(tracks,20.)
    print 'Done in total of ', time.clock() - tim, 'seconds.'

    print 'Saving result...'
    pkl.save_pickle(C_fname, C)

    streams = [(i, None, None) for i in atracks]
    tv.write(appr_fname, streams, hdr)

else:

    print 'Loading result...'
    C = pkl.load_pickle(C_fname)

skel = []
for c in C:

    skel.append(C[c]['repz'])

print 'Showing dataset after clustering...'
        diffs.append(np.sum(np.abs(conn_mat-golden_mat)))

    return mat, conn_mats, diffs


Np = 100
Ni = 50



for NC in [1, 2, 3]:
    for iso in [0, 1]:
        for fr in [0, 1]:
            for snr in [30, 10]:
                for typ in [0, 1, 2, 3]:
                    for ang_t in [20,23,25, 30,33,35]:
                        for category in ['dti']:#, 'hardi']:

                            filename = '{}_pso_track_sel={}_NC={}_iso={}_fr={}_Np={}_Ni={}_snr={}_type={}'.format(category, ang_t, NC, iso, fr, Np, Ni, snr, typ)
                            filepath = '/media/Data/work/isbi2013/pso_track/' + filename + '.trk'

                            if os.path.exists(filepath):

                                mat, conn_mats, diffs = streams_to_connmat(filepath, seeds_per_voxel=5, thr=[0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])#thr=[0.25, 0.5, 0.75])

                                print(filename, diffs)

                                filename2 = '{}_pso_conn_mat_sel={}_NC={}_iso={}_fr={}_Np={}_Ni={}_snr={}_type={}'.format(category, ang_t, NC, iso, fr, Np, Ni, snr, typ)
                                save_pickle('/media/Data/work/isbi2013/pso_conn_mat/' + filename2 + '.pkl', {'mat':mat, 'conn_mats':conn_mats, 'diffs':diffs})

Example #31
0
def humans():   

    no_seeds=10**6
    visualize = False
    save_odfs = False
    dirname = "data/"    
    for root, dirs, files in os.walk(dirname):
        if root.endswith('101_32'):
            
            base_dir = root+'/'
            filename = 'raw'
            base_filename = base_dir + filename
            nii_filename = base_filename + 'bet.nii.gz'
            bvec_filename = base_filename + '.bvec'
            bval_filename = base_filename + '.bval'
            flirt_mat = base_dir + 'DTI/flirt.mat'    
            fa_filename = base_dir + 'DTI/fa.nii.gz'
            fsl_ref = '/usr/share/fsl/data/standard/FMRIB58_FA_1mm.nii.gz'
            dpy_filename = base_dir + 'DTI/res_tracks_dti.dpy'
    
            print bvec_filename
            
            img = nib.load(nii_filename)
            data = img.get_data()

            affine = img.get_affine()
            bvals = np.loadtxt(bval_filename)
            gradients = np.loadtxt(bvec_filename).T # this is the unitary direction of the gradient
            
            tensors = Tensor(data, bvals, gradients, thresh=50)
            FA = tensors.fa()                       
            FA = tensors.fa()
            famask=FA>=.2
            
            ds=DiffusionSpectrum(data,bvals,gradients,odf_sphere='symmetric642',mask=famask,half_sphere_grads=True,auto=True,save_odfs=save_odfs)
            gq=GeneralizedQSampling(data,bvals,gradients,1.2,odf_sphere='symmetric642',mask=famask,squared=False,save_odfs=save_odfs)
            ei=EquatorialInversion(data,bvals,gradients,odf_sphere='symmetric642',mask=famask,half_sphere_grads=True,auto=False,save_odfs=save_odfs,fast=True)
            ei.radius=np.arange(0,5,0.4)
            ei.gaussian_weight=0.05
            ei.set_operator('laplacian')
            ei.update()
            ei.fit()    
            
            ds.PK[FA<.2]=np.zeros(5) 
            ei.PK[FA<.2]=np.zeros(5)
            gq.PK[FA<.2]=np.zeros(5)                   
                        
            print 'create seeds'
            x,y,z,g=ei.PK.shape
            seeds=np.zeros((no_seeds,3))
            sid=0
            while sid<no_seeds:
                rx=(x-1)*np.random.rand()
                ry=(y-1)*np.random.rand()
                rz=(z-1)*np.random.rand()
                seed=np.ascontiguousarray(np.array([rx,ry,rz]),dtype=np.float64)        
                seeds[sid]=seed
                sid+=1
            
            euler = EuDX(a=FA, ind=tensors.ind(), seeds=seeds, a_low=.2)
            dt_tracks = [track for track in euler]                                    
            euler2 = EuDX(a=ds.PK, ind=ds.IN, seeds=seeds, odf_vertices=ds.odf_vertices, a_low=.2)
            ds_tracks = [track for track in euler2]    
            euler3 = EuDX(a=gq.PK, ind=gq.IN, seeds=seeds, odf_vertices=gq.odf_vertices, a_low=.2)
            gq_tracks = [track for track in euler3]
            euler4 = EuDX(a=ei.PK, ind=ei.IN, seeds=seeds, odf_vertices=ei.odf_vertices, a_low=.2)
            ei_tracks = [track for track in euler4]
            
            if visualize:
                renderer = fvtk.ren()
                fvtk.add(renderer, fvtk.line(tensor_tracks, fvtk.red, opacity=1.0))
                fvtk.show(renderer)
            
            print 'Load images to be used for registration'
            img_fa =nib.load(fa_filename)
            img_ref =nib.load(fsl_ref)
            mat=flirt2aff(np.loadtxt(flirt_mat),img_fa,img_ref)
            del img_fa
            del img_ref
            
            print 'transform the tracks'
            dt_linear = transform_tracks(dt_tracks,mat)
            ds_linear = transform_tracks(ds_tracks,mat)
            gq_linear = transform_tracks(gq_tracks,mat)
            ei_linear = transform_tracks(ei_tracks,mat)
                        
            print 'save tensor tracks'
            dpy_filename = base_dir + 'DTI/dt_linear.dpy'
            print dpy_filename
            dpr_linear = Dpy(dpy_filename, 'w')
            dpr_linear.write_tracks(dt_linear)
            dpr_linear.close()
            
            print 'save ei tracks'
            dpy_filename = base_dir + 'DTI/ei_linear.dpy'
            print dpy_filename
            dpr_linear = Dpy(dpy_filename, 'w')
            dpr_linear.write_tracks(ei_linear)
            dpr_linear.close()
            
            print 'save ds tracks'
            dpy_filename = base_dir + 'DTI/ds_linear.dpy'
            print dpy_filename
            dpr_linear = Dpy(dpy_filename, 'w')
            dpr_linear.write_tracks(ds_linear)
            dpr_linear.close()
            
            print 'save gq tracks'
            dpy_filename = base_dir + 'DTI/gq_linear.dpy'
            print dpy_filename
            dpr_linear = Dpy(dpy_filename, 'w')
            dpr_linear.write_tracks(gq_linear)
            dpr_linear.close()
            
            print 'save lengths'
            pkl_filename = base_dir + 'DTI/dt_lengths.pkl'
            save_pickle(pkl_filename,lengths(dt_linear))            
            pkl_filename = base_dir + 'DTI/ei_lengths.pkl'
            save_pickle(pkl_filename,lengths(ei_linear))
            pkl_filename = base_dir + 'DTI/gq_lengths.pkl'
            save_pickle(pkl_filename,lengths(gq_linear))
            pkl_filename = base_dir + 'DTI/ds_lengths.pkl'
            save_pickle(pkl_filename,lengths(ds_linear))
Example #32
0
xdata = np.zeros(len(x.keys()))
ydata = np.zeros(len(y.keys()))

for i,k in enumerate(x.keys()):
    xdata[i]=x[k]
    ydata[i]=y[k]

#maxKappa(xdata,ydata)

#maxkappa, maxorder = maxKappa(xdata,ydata)
#print 'maxkappa   ', maxkappa
#print 'max order', maxorder
'''

"""
fsolid='/home/eg309/Data/LSC_limits/solid_1M.npy'
#fsolid='/home/ian/Data/LSC_stability/solid_1M.npy'

T=np.load(fsolid)
print 'Before',len(T)
T=np.array([t for t in list(T) if length(t)>= 40. and length(t)< 120.],dtype=np.object) # 100mm - 200mm
print 'After',len(T)

results = multiple_comparisons(samplesize=len(T), replications=3)
save_pickle('results_full.pkl', results)
"""

"""
results = multiple_comparisons(samplesize=25*10**3, replications=12)
save_pickle('results25k.pkl', results)
    clusters = qb.cluster(streamlines)

    # extract > 100
    # print len(clusters) # 89
    for c in clusters:
        if len(c) < 100:
            clusters.remove_cluster(c)

    out_path = '/home/brain/workingdir/data/dwi/hcp/' \
               'preprocessed/response_dhollander/101006/result/CC_fib_length1_2.png'
    show(imgtck, clusters, out_path)

    metric = CosineMetric()
    qb = QuickBundles(threshold=0.1, metric=metric)
    clusters = qb.cluster(streamlines)

    # extract > 100
    # print len(clusters)  # 41
    for c in clusters:
        if len(c) < 100:
            clusters.remove_cluster(c)

    out_path = '/home/brain/workingdir/data/dwi/hcp/' \
               'preprocessed/response_dhollander/101006/result/CC_fib_length2_2.png'
    show(imgtck, clusters, out_path)

    # save the complete ClusterMap object with picking
    save_pickle(
        '/home/brain/workingdir/data/dwi/hcp/preprocessed/'
        'response_dhollander/101006/result/CC_fib_length_2.pk2', clusters)
            
            from common_functions import init_prb_state_sparse
            
            prb_map12_init, cs_idxs = init_prb_state_sparse(s_cst_sff,t_cst_ext,nn) 
            
            #aang lam do den fan nay
            #load cai map va chon cai co prob cao nhat
            #-------------------------------------------------------
            #only for saving the higest prob map            
            cst_sff_len = len(s_cst_sff)
            
            map_idxs_all = [map_all[i].argsort()[-1] for i in np.arange(cst_sff_len)]
            
            mapped_all = [cs_idxs[i][map_idxs_all[i]] for i in np.arange(cst_sff_len)]
            
            save_pickle(map_file+'choose_highest.txt',mapped_all)
            
            stop

            # end only for saving the higest prob map            
            #-------------------------------------------------------
            
            
            cst_len = len(s_cst)
            map_tmp = map_all[:cst_len]
            
            map_idxs = [map_tmp[i].argsort()[-1] for i in np.arange(cst_len)]
            
            mapped = [cs_idxs[i][map_idxs[i]] for i in np.arange(cst_len)]
            
            #print map_tmp[:10], map_idxs[:10], cs_idxs[:10], mapped[:10]            
Example #35
0
colormap_full = np.ones((len(streamlines), 3))
for i, centroid in enumerate(centroids):
    inds = qb.label2tracksids(i)
    colormap_full[inds] = colormap[i]

fvtk.clear(r)
fvtk.add(r, fvtk.line(streamlines, colormap_full, opacity=1.0, linewidth=3))
fvtk.record(r, n_frames=1, out_path="fornix_clust.png", size=(600, 600))

"""
.. figure:: fornix_clust.png
   :align: center

   **Showing the different clusters with random colors**.

It is also possible to save the complete QuickBundles object with pickling.
"""

save_pickle("QB.pkl", qb)

"""
Finally, here is a video of QuickBundles applied on a larger dataset.

.. raw:: html

    <iframe width="420" height="315" src="http://www.youtube.com/embed/kstL7KKqu94" frameborder="0" allowfullscreen></iframe>

.. include:: ../links_names.inc

"""
 source = source_ext[:len(s_cst_idx)]            
 
 t_cst_idx = load_pickle(t_cst_idx_file)            
 target = target_ext[:len(t_cst_idx)]   
 
 #print len(source), len(target)
 
 tractography1 = source
 tractography2 = target_ext
 
            
 map_all = mapping_nn(tractography1, tractography2)
 
 if save:            
     #print 'Saving 1-NN tract based: ', out_file
     save_pickle(out_file, map_all)
 
 s_cst = source
 t_cst = target
 t_cst_ext = target_ext
 
 
 cst_len = len(s_cst)
 mapped = map_all[:cst_len]
 
 mapped_s_cst = [t_cst_ext[idx] for idx in mapped]
 
 #jac0, bfn0 = Jac_BFN(s_cst, t_cst, vol_dims, disp=False)            
 #jac1, bfn1 = Jac_BFN(mapped_s_cst, t_cst, vol_dims, disp=False)
 
 #jac0, bfn0 = Jac_BFN2(s_cst, t_cst, vol_dims, disp=False)            
Example #37
0
    colormap_full[inds] = colormap[i]

fvtk.clear(ren)
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren, n_frames=1, out_path='fornix_clust.png', size=(600, 600))
"""
.. figure:: fornix_clust.png
   :align: center

   **Showing the different clusters with random colors**.

It is also possible to save the complete QuickBundles object with pickling.
"""

save_pickle('QB.pkl', qb)
"""
Finally, here is a video of QuickBundles applied on a larger dataset.

.. raw:: html

    <iframe width="420" height="315" src="http://www.youtube.com/embed/kstL7KKqu94" frameborder="0" allowfullscreen></iframe>

.. include:: ../links_names.inc

.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
                    tractography simplification, Frontiers in Neuroscience, vol
                    6, no 175, 2012.

"""
Example #38
0
ren.SetBackground(1, 1, 1)
ren.add(actor.streamtube(streamlines, colormap_full))
window.record(ren, out_path='fornix_clusters.png', size=(600, 600))
if interactive:
    window.show(ren)

"""
.. figure:: fornix_clusters.png
   :align: center

   Showing the different clusters.

It is also possible to save the complete `ClusterMap` object with pickling.
"""

save_pickle('QB.pkl', clusters)

"""
Finally, here is a video of QuickBundles applied on a larger dataset.

.. raw:: html

    <iframe width="420" height="315" src="http://www.youtube.com/embed/kstL7KKqu94" frameborder="0" allowfullscreen></iframe>

.. include:: ../links_names.inc

References
----------

.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
                    tractography simplification, Frontiers in Neuroscience, vol
Example #39
0
def track_gen_net(trackpickle, templatepath):
    tracks = dwi.load_streamlines_from_trk(trackpickle)
    img_template = nib.load(templatepath)
    p = track_gen_net_work(tracks, img_template)
    pickles.save_pickle('net_gen_net.pickle', p)
# Display streamlines
ren = window.Renderer()
ren.add(actor.streamtube(streamlines, window.colors.white))
window.show(ren)
window.record(ren, out_path=filename + '_stream_lines_eu.png', size=(600, 600))

# Display centroids
window.clear(ren)
colormap = actor.create_colormap(np.arange(qb.total_clusters))
ren.add(actor.streamtube(streamlines, window.colors.white, opacity=0.1))
ren.add(actor.streamtube(qb.centroids, colormap, linewidth=0.5))
window.show(ren)
window.record(ren, out_path=filename + '_centroids_eu.png', size=(600, 600))

# Display tracks
window.clear(ren)
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters.items(), colormap):
    colormap_full[cluster[1]['indices']] = color
ren.add(actor.streamtube(streamlines, colormap_full))
window.show(ren)
window.record(ren,
              out_path=filename + '_stream_line_cluster_eu.png',
              size=(600, 600))

# Save Streamline files
save_trk(filename + "_stream_line_eu.trk",
         streamlines=streamlines,
         affine=np.eye(4))
save_pickle(filename + '_qb_eu.pkl', clusters)
Example #41
0
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
fvtk.add(ren, fvtk.streamtube(clusters.centroids, colormap, linewidth=0.4))
fvtk.record(
    ren,
    n_frames=1,
    out_path='/home/brain/workingdir/data/dwi/hcp/preprocessed/'
    'response_dhollander/100206/result/CC_fib1_remove_non_cc_z-10_x-min_10_jet.png',
    size=(600, 600))
# fvtk.show(ren)

# show the label CC (colors form centroids)
colormap_full = np.ones((len(streamlines)), np.float64(3))
for clusters, color in zip(clusters, colormap):
    colormap_full[clusters.indices] = color
fvtk.clear(ren)
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(
    ren,
    n_frames=1,
    out_path='/home/brain/workingdir/data/dwi/hcp/preprocessed/'
    'response_dhollander/100206/result/CC_fib2_remove_non_cc_z-10_x-min_10_jet.png',
    size=(600, 600))
# fvtk.show(ren)

# save the complete ClusterMap object with picking
save_pickle(
    '/home/brain/workingdir/data/dwi/hcp/preprocessed/response_dhollander/'
    '100206/result/CC_fib_remove_non_cc_z-10_x-min_10_jet.pk2', clusters)