def run_comparisons(sample_data=35): for simfile in [simdata[sample_data]]: dataname = simfile print (dataname) gqfile = simdir+'gq/'+dataname+'.pkl' gq = pkl.load_pickle(gqfile) tnfile = simdir+'tn/'+dataname+'.pkl' tn = pkl.load_pickle(tnfile) dt_first_directions_in=odf_vertices[tn.IN] dt_indices = tn.IN.reshape((100,1000)) dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90)) gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000)) gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')] #print gq_first_directions_in.shape gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90)) np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000) out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w') results = np.hstack((np.vstack(dt_results), np.vstack(gq_results))) print >> out, results[:,:] out.close()
def read_reconstruction(model_file,fit_file,mask,type='dti'): """ Reads in an ODF for each voxel reconstructed using DiPy. Parameters ---------- model_file: string (mandatory) Path to the pickled model used for ODF reconstruction. fit_file: string (mandatory) Path to the pickled, fitted ODFs reconstructed by DiPy. mask: numpy array (mandatory) Logical array defining the brain mask. type: string \in {'dti','csd','csa'} (default = 'dti') The type of the ODF reconstruction. Returns ----------- model_fit: Dipy Object (depends on the type) Represents the fitted ODF for each voxel. """ model = load_pickle(model_file) if type == 'dti': model_params = load_pickle(fit_file) model_fit = TensorFit(model,model_params) elif type == 'csd': fit_array = load_pickle(fit_file) model_fit = MultiVoxelFit(model,fit_array,mask) elif type == 'csa': shm_coeff = load_pickle(fit_file) model_fit = SphHarmFit(model,shm_coeff,mask) return model_fit
def bench_time(): bench = load_pickle("bench_qbx_vs_qb.pkl") nbs = [] qb_times = [] qbx_times = [] for nb_streamlines in np.sort(bench.keys()): nbs.append(nb_streamlines) qb_times.append(bench[nb_streamlines]["QB time"]) qbx_times.append(bench[nb_streamlines]["QBX time"]) fig = plt.figure() # fig.suptitle('Time comparison of QB vs QBX', fontsize=14) ax = fig.add_subplot(1, 1, 1) ax.set_title("QB vs QBX (execution time)") linewidth = 5.0 ax.plot(nbs, qb_times, "r", linewidth=linewidth, alpha=0.6) ax.plot(nbs, qbx_times, "g", linewidth=linewidth, alpha=0.6) for i in [1, 2, 3]: ax.plot([nbs[i], nbs[i]], [qbx_times[i], qb_times[i]], "k--") ax.text(x=nbs[i], y=qb_times[i] / 2.0, s=" " + str(int(np.round(bench[nbs[i]]["Speedup"]))) + "X") ax.legend(["QB", "QBX"], loc=2) ax.set_xticks(nbs) ax.set_xticklabels(["1M", "2M", "3M", "4M", "5M"]) ax.set_xlabel("# streamlines in millions (M)") ax.set_ylabel("# seconds") plt.savefig("speed.png", dpi=300, bbox_inches="tight")
def tracts_mapping1(tractography1, tractography2, loss_function, neighbour, iterations_anneal_now,pre_map_file): ann = [100, 200, 400, 600, 800, 1000] iterations_anneal_pre = 0 if iterations_anneal_now<=100: dm12 = bundles_distances_mam(tractography1, tractography2) mapping12_coregistration_1nn = np.argmin(dm12, axis=1) else: k = (iterations_anneal_now/200) - 1 iterations_anneal_pre = ann[k] from dipy.io.pickles import load_pickle mapping12_coregistration_1nn = load_pickle(pre_map_file) iterations_anneal = iterations_anneal_now - iterations_anneal_pre print "Iteration: ", iterations_anneal_now, iterations_anneal_pre, iterations_anneal print "The previous coregistration gives a mapping12 with the following loss:" loss_coregistration_1nn = loss_function(mapping12_coregistration_1nn) print "loss =", loss_coregistration_1nn #iterations_anneal = 100 print "Simulated Annealing" np.random.seed(1) initial_state = mapping12_coregistration_1nn.copy() mapping12_best, energy_best = anneal(initial_state=initial_state, energy_function=loss_function, neighbour=neighbour, transition_probability=transition_probability, temperature=temperature_boltzmann, max_steps=iterations_anneal, energy_max=0.0, T0=200.0, log_every=1000) return mapping12_coregistration_1nn, loss_coregistration_1nn, mapping12_best, energy_best
def load_ward_tree(data_id,num_neigh=50): #file_name = 'Results/' + str(data_id) + '/' + str(data_id)+'_full_tracks_' + str(num_neigh) + '_neighbors_original_ward_stop_50_clusters.tree' file_name = 'Results/' + str(data_id) + '/' + str(data_id)+'_full_tracks_' + str(num_neigh) + '_neighbors_original_ward.tree' print "Loading ward tree " ward = load_pickle(file_name) return ward
def converging_lsc(inp): C0=load_pickle(dout+outs[inp]+'.skl') print len(C0) v0,i0,l0=bring_virtuals(C0) v=v0 #print len(v0) not_converged=1 Cs=[] while not_converged: lv_before=len(v) C=local_skeleton_clustering(v,4.) v,i,l=bring_virtuals(C) #print '=',len(v) #for (i,v_) in enumerate(v): # if length(v_)<50.: # del v[i] #c=[v_ for v_ in v if length(v_)>50.] lv=len(v) #print lv if len(v)==lv_before: not_converged=0 else: Cs.append(C) return Cs
def run_on_multi_subjects(): k1 = 15 iterations = 25 fig = plt.figure() #subjects =['101','109','201','205','210'] #colors = ['ko--', 'kx:', 'k^-','k*-','v-.' ] subjects =['109','205'] colors = [ 'k^:','ko-' ] for m, sub_id in enumerate(subjects): #tree_name = 'Results/' + str(sub_id) + '/' + str(sub_id) +'_full_tracks_50_neighbors_modified_ward_full_tree.tree' tree_name = 'Results/' + str(sub_id) + '/' + str(sub_id) +'_full_tracks_50_neighbors_modified_ward_full_tree_130516.tree' tree = load_pickle(tree_name) cut = tree.best_cut() print 'origin cut', cut remove_valley(cut) print 'after remove valley', cut cut_scales_ori = [s[0] for s in cut] temp_scales = heuristic_modified_cuts(cut_scales_ori[:4],3) temp_scales_1 = heuristic_modified_cuts(cut_scales_ori[4:],4,temp_scales[len(temp_scales)-1]) cut_scales = np.concatenate((temp_scales,temp_scales_1)) #print cut_scales_ori #print cut_scales #cut_scales = cut_scales_ori #cut_scales = heuristic_modified_cuts(cut_scales,4) cut_scales = cut_scales_ori split = np.zeros((len(cut_scales),iterations)) for j in np.arange(len(cut_scales)): #run from the top cut to the bottom cut i = len(cut_scales) - j - 1 height = cut_scales[i] guillotines = tree.cut(height) #the heigh of the next cut if i==0: h = 0. else: h = cut_scales[i-1] for k in np.arange(iterations): random_nodes = random_elements(guillotines, k1) split[i,k] = split_factor_setnodes(tree,random_nodes,h) plot_results(plt, split, cut_scales, colors[m], sub_id) print sub_id, ' : ', cut_scales plt.legend(loc='upper right') plt.xlabel("cut scales with a base unit of 1/h", fontsize=17) plt.ylabel("split factor ", fontsize=17) #fig.suptitle ("\n Evaluating the cut based on split factor", fontsize=20) plt.show()
def load_cst(tracks_filename, cst_index_file, ext): from dipy.io.dpy import Dpy from dipy.io.pickles import load_pickle dpr_tracks = Dpy(tracks_filename, 'r') all_tracks=dpr_tracks.read_tracks() dpr_tracks.close() tracks_id = load_pickle(cst_index_file) cst = [all_tracks[i] for i in tracks_id] cst_ext = [all_tracks[i] for i in tracks_id] medoid_cst = [] #len_dis = 250 if ext: k = np.round(len(cst)*1.2) not_cst_fil = [] min_len = min(len(i) for i in cst) #print 'min_len of cst', min_len min_len = min_len*2.2/3#2./3.2# - 20 for i in np.arange(len(all_tracks)): if (i not in tracks_id) and (length(all_tracks[i]) > min_len): not_cst_fil.append(all_tracks[i]) #for st in all_tracks: # if (length(st)>=min_len) and (st not in cst): # not_cst_fil.append(st) from dipy.segment.quickbundles import QuickBundles qb = QuickBundles(cst,200,18) medoid_cst = qb.centroids[0] med_notcst_dm = bundles_distances_mam([medoid_cst], not_cst_fil) med_cst_dm = bundles_distances_mam([medoid_cst], cst) cst_rad = med_cst_dm[0][np.argmax(med_cst_dm[0])] len_dis = cst_rad * 2.8/2. #print med_cst_dm #print cst_rad #print len_dis #k_indices which close to the medoid sort = np.argsort(med_notcst_dm,axis = 1)[0] #print sort[:k+1] while (k>0 and med_notcst_dm[0][sort[k]]>=len_dis): k = k - 1 #print med_notcst_dm[0][sort[0:k]] #print k #close_indices = np.argsort(cst_dm,axis = 1)[:,0:k][0] close_indices = sort[0:k] for idx in close_indices: cst_ext.append(not_cst_fil[idx]) return cst, cst_ext, medoid_cst return cst
def bench_complexity(): bench = load_pickle("bench_qbx_vs_qb_complexity.pkl") nbs = [] qb_mdfs = [] qbx_mdfs = [] qbx_1 = [] qbx_2 = [] qbx_3 = [] qbx_4 = [] for nb_streamlines in np.sort(bench.keys()): nbs.append(nb_streamlines) tmp = bench[nb_streamlines]["QB stats"]["nb_mdf_calls"] / 2 print("QB {}".format(tmp)) qb_mdfs.append(tmp) tmpx = 0 tmpx_1 = bench[nb_streamlines]["QBX stats"]["stats_per_level"][0]["nb_mdf_calls"] / 2 tmpx_2 = bench[nb_streamlines]["QBX stats"]["stats_per_level"][1]["nb_mdf_calls"] / 2 tmpx_3 = bench[nb_streamlines]["QBX stats"]["stats_per_level"][2]["nb_mdf_calls"] / 2 tmpx_4 = bench[nb_streamlines]["QBX stats"]["stats_per_level"][3]["nb_mdf_calls"] / 2 tmpx = tmpx_1 + tmpx_2 + tmpx_3 + tmpx_4 print("QBX {}".format(tmpx)) print("QB/QBX {}".format(tmp / float(tmpx))) qbx_mdfs.append(tmpx) qbx_1.append(tmpx_1) qbx_2.append(tmpx_2) qbx_3.append(tmpx_3) qbx_4.append(tmpx_4) fig = plt.figure() # fig.suptitle('Time comparison of QB vs QBX', fontsize=14) ax = fig.add_subplot(1, 1, 1) ax.set_title("QB vs QBX (MDF calls)") linewidth = 5.0 # ax.plot(nbs, qb_mdfs, 'r--', linewidth=linewidth, alpha=0.5) ax.plot(nbs, qbx_mdfs, "g--", linewidth=linewidth, alpha=0.5) ax.plot(nbs, qbx_1, "g-", linewidth=linewidth, alpha=0.6) ax.plot(nbs, qbx_2, linewidth=linewidth, alpha=0.7) ax.plot(nbs, qbx_3, linewidth=linewidth, alpha=0.8) ax.plot(nbs, qbx_4, linewidth=linewidth, alpha=0.9) # for i in [1, 2, 3]: # ax.plot([nbs[i], nbs[i]], [qbx_times[i], qb_times[i]], 'k--') # ax.text(x=nbs[i], y=qb_times[i]/2., s=' ' + str(int(np.round(bench[nbs[i]]['Speedup']))) + 'X') ax.legend(["QBX", "QBX1", "QBX2", "QBX3", "QBX4"], loc=2) ax.set_xticks(nbs) ax.set_xticklabels(["1K", "2K", "3K", "4K", "5K"]) ax.set_xlabel("# streamlines in millions (M)") ax.set_ylabel("# MDF calls") plt.savefig("complexity.png", dpi=300, bbox_inches="tight")
def see_skeletons(fskel): C=load_pickle(fskel) tracks=[C[c]['most'] for c in C if C[c]['N'] > 10 ] r=fvtk.ren() colors=np.array([t[0]-t[-1] for t in tracks]) colors=colormap.orient2rgb(colors) fvtk.add(r,fvtk.line(tracks,colors)) fvtk.show(r)
def show_timings(): res=load_pickle('/home/eg309/Data/LSC_limits/timings_100K_1M.pkl') #[ 3, 6, 12, 18] dists=[ 4., 6., 8., 10.] plt.subplots_adjust(hspace=0.4) ax1=plt.subplot(221) plt.title('3 point tracks') for d in dists: ax1.plot(res[3][d]['time'][:-2],label=str(d*2.5)+' mm') ax1.set_ylim((0,6000)) ax1.set_xlim((0,9)) ax1.set_xticklabels(['100K','200K','300K','400K','500K','600K','700K','800K','900K','1M']) ax1.set_ylabel('Seconds') ax1.set_xlabel('Number of tracks') plt.legend() ax2=plt.subplot(222) plt.title('6 point tracks') for d in dists: ax2.plot(res[6][d]['time'][:-2],label=str(d*2.5)+' mm') ax2.set_ylim((0,6000)) ax2.set_xlim((0,9)) ax2.set_xticklabels(['100K','200K','300K','400K','500K','600K','700K','800K','900K','1M']) ax2.set_ylabel('Seconds') ax2.set_xlabel('Number of tracks') plt.legend() ax3=plt.subplot(223) plt.title('12 point tracks') for d in dists: ax3.plot(res[12][d]['time'][:-2],label=str(d*2.5)+' mm') ax3.set_ylim((0,6000)) ax3.set_xlim((0,9)) ax3.set_xticklabels(['100K','200K','300K','400K','500K','600K','700K','800K','900K','1M']) ax3.set_ylabel('Seconds') ax3.set_xlabel('Number of tracks') plt.legend() ax4=plt.subplot(224) plt.title('18 point tracks') for d in dists: ax4.plot(res[18][d]['time'][:-2],label=str(d*2.5)+' mm') ax4.set_ylim((0,6000)) ax4.set_xlim((0,9)) ax4.set_xticklabels(['100K','200K','300K','400K','500K','600K','700K','800K','900K','1M']) ax4.set_ylabel('Seconds') ax4.set_xlabel('Number of tracks') plt.legend(loc=2) plt.show()
def gen_net_csv(): netpickle = 'net_gen_net.pickle' net = pickles.load_pickle(netpickle) M = net.M atlasdir = os.path.dirname(os.getcwd()) atlasname = os.path.basename(atlasdir) atlasobj = atlas.get(atlasname) idx = atlasobj.regions realM = M[idx][:, idx] save_csvmat('dwinetraw.csv', realM) save_csvmat('dwinet.csv', np.log1p(realM)) return realM
def get_luigi_SLFI(): fseg1 = "/home/eg309/Devel/segmented_bundles/luigi_s1_for_eleftherios/S1_SLFI" # fseg2='/home/eg309/Devel/segmented_bundles/luigi_s1_for_eleftherios/S1_SLFII' subject = "01" fdpyw = "data/subj_" + subject + "/101_32/DTI/tracks_gqi_3M_linear.dpy" dpr = Dpy(fdpyw, "r") T = dpr.read_tracks() dpr.close() seg_inds = load_pickle(fseg1) T1 = [T[i] for i in seg_inds] # seg_inds=load_pickle(fseg2) # T2=[T[i] for i in seg_inds] return T1
def load_data_dis(data_id, num_prototype=40): filename = ( "Results/" + str(data_id) + "/" + str(data_id) + "_data_disimilarity_full_tracks_" + str(num_prototype) + "_prototyes_random_130516.dis" ) print "Loading tracks dissimilarity" dis = load_pickle(filename) return dis
def combine_results(): subs=['subj_01','subj_02','subj_03','subj_04','subj_05','subj_06','subj_07','subj_08','subj_09','subj_10','subj_11','subj_12'] categ=['64','64_32','101','101_32','118','118_32'] methods=['DTI','GQI','SDI','NPA'] RES={} for sub in subs: RES[sub]={} for cat in categ: RES[sub][cat]={} for meth in methods: RES[sub][cat][meth]={} for root, dirs, files in os.walk(dname+sub+'/'+cat+'/'+meth): for file in files: if file.endswith('FAW_len.npy'): print pjoin(root,file) track_lengths=np.load(pjoin(root,file)) RES[sub][cat][meth]['track_no']=len(track_lengths) RES[sub][cat][meth]['total_length']=np.sum(track_lengths) #RES[sub][cat][meth]['lenghts']=track_lengths if file.startswith('FAW_LSC_ref_'): if file.endswith('3.pkl'): RES[sub][cat][meth]['len_lsc_3']=len(load_pickle(pjoin(root,file))) if file.endswith('6.pkl'): RES[sub][cat][meth]['len_lsc_6']=len(load_pickle(pjoin(root,file))) if file.endswith('9.pkl'): RES[sub][cat][meth]['len_lsc_9']=len(load_pickle(pjoin(root,file))) if file.endswith('12.pkl'): RES[sub][cat][meth]['len_lsc_12']=len(load_pickle(pjoin(root,file))) if file.endswith('_SR.pkl'): RES[sub][cat][meth]['sr']=load_pickle(pjoin(root,file)) return RES
def load_tract_trk(tracks_filename, id_file): ''' load tract from trackvis format ''' import nibabel as nib streams,hdr=nib.trackvis.read(tracks_filename,points_space='voxel') all_tracks = np.array([s[0] for s in streams], dtype=np.object) from dipy.io.pickles import load_pickle tracks_id = load_pickle(id_file) tract = [all_tracks[i] for i in tracks_id] tract = np.array(tract,dtype=np.object) return tract
def analyze_humans(): #if __name__ == '__main__': dirname = "data/" for root, dirs, files in os.walk(dirname): if root.endswith('101_32'): base_dir = root+'/' #filename = 'raw' #dpy_filename = base_dir + 'DTI/tensor_linear.dpy' #print dpy_filename #dpr_linear = Dpy(dpy_filename, 'r') #tensor_tracks=dpr_linear.read_tracks() #dpr_linear.close() pkl_filename = base_dir + 'DTI/dt_lengths.pkl' dt_lengths=load_pickle(pkl_filename) pkl_filename = base_dir + 'DTI/ei_lengths.pkl' ei_lengths=load_pickle(pkl_filename) pkl_filename = base_dir + 'DTI/gq_lengths.pkl' gq_lengths=load_pickle(pkl_filename) pkl_filename = base_dir + 'DTI/ds_lengths.pkl' ds_lengths=load_pickle(pkl_filename) d=np.zeros(6) p=np.zeros(6) np.set_printoptions(3) d[0],p[0]=ks_2samp(dt_lengths,ei_lengths) d[1],p[1]=ks_2samp(dt_lengths,ds_lengths) d[2],p[2]=ks_2samp(dt_lengths,gq_lengths) d[3],p[3]=ks_2samp(ei_lengths,ds_lengths) d[4],p[4]=ks_2samp(ei_lengths,gq_lengths) d[5],p[5]=ks_2samp(ds_lengths,gq_lengths) print 'KS statistic ',d,'P-value ',p print np.median(dt_lengths),np.median(ei_lengths),\ np.median(ds_lengths),np.median(gq_lengths) #break stop
def see_combined_spherical_intersections(): subs=['subj_01','subj_02','subj_03','subj_04','subj_05','subj_06','subj_07','subj_08','subj_09','subj_10','subj_11','subj_12'] categ=['64','64_32','101','101_32','118','118_32'] methods=['DTI','GQI','SDI','NPA'] centers=['GCC', 'CSTL', 'FX', 'CGCR', 'SCC', 'BCC', 'CGCL', 'UNCL?', 'CSTR', 'UNCR?'] RES=load_pickle('/home/eg309/Data/PROC_MR10032/results/res_tmp.pkl') #print RES['subj_03']['64_32']['DTI'] SR_combined={} for cent in centers: SR_combined[cent]={} for cat in categ: SR_combined[cent][cat]=[] for sub in subs: try: SR_combined[cent][cat].append(len(RES[sub][cat]['DTI']['sr'][cent]['indices'])) except KeyError: pass #return SR_combined #RES['subj_03']['64_32']['DTI']['sr']['GCC']['indices'] width=0.2 for cent in SR_combined: fig = plt.figure() ax = fig.add_subplot(111) ax.set_title(cent) mean_=[] std_=[] ind=np.arange(6) for cat in categ: mean_.append(np.array(SR_combined[cent][cat]).mean()) std_.append(np.array(SR_combined[cent][cat]).std()) rects = ax.bar(ind+width, mean_ , width, color='m', yerr= std_) ax.set_xticks(ind+width) ax.set_xticklabels(categ) plt.show() """
def test_sltensor(): fname=pjoin(os.path.dirname(__file__),'data/eg_3voxels.pkl') dix=pkls.load_pickle(fname) b=dix['bs'] g=dix['gs'] data=np.array(dix['data']).T yield assert_equal(b[0],0.) slt=ten.STensorL(b,g) yield assert_equal(slt.A.shape[0],len(b)-1) slt.fit(data) print 'data coeff',slt.coeff print 'tensors',slt.tensors print 'fa',slt.fa print 'adc',slt.adc data2=100*np.ones((3,3,3,len(b))) slt.fit(data2) print 'data2 coeff',slt.coeff print 'tensors',slt.tensors print 'fa',slt.fa print 'adc',slt.adc yield assert_array_equal(slt.fa,np.zeros((3,3,3))) data2[:,:,:,0]=250 slt.fit(data2) print 'data2 coeff bigger S0',slt.coeff print 'tensors',slt.tensors print 'fa',slt.fa print 'adc',slt.adc data2[:,:,:,0]=50 slt.fit(data2) print 'data2 coeff smaller S0',slt.coeff print 'tensors',slt.tensors print 'fa',slt.fa print 'adc',slt.adc
def test_sltensor(): fname = pjoin(os.path.dirname(__file__), 'data/eg_3voxels.pkl') dix = pkls.load_pickle(fname) b = dix['bs'] g = dix['gs'] data = np.array(dix['data']).T yield assert_equal(b[0], 0.) slt = ten.STensorL(b, g) yield assert_equal(slt.A.shape[0], len(b) - 1) slt.fit(data) print 'data coeff', slt.coeff print 'tensors', slt.tensors print 'fa', slt.fa print 'adc', slt.adc data2 = 100 * np.ones((3, 3, 3, len(b))) slt.fit(data2) print 'data2 coeff', slt.coeff print 'tensors', slt.tensors print 'fa', slt.fa print 'adc', slt.adc yield assert_array_equal(slt.fa, np.zeros((3, 3, 3))) data2[:, :, :, 0] = 250 slt.fit(data2) print 'data2 coeff bigger S0', slt.coeff print 'tensors', slt.tensors print 'fa', slt.fa print 'adc', slt.adc data2[:, :, :, 0] = 50 slt.fit(data2) print 'data2 coeff smaller S0', slt.coeff print 'tensors', slt.tensors print 'fa', slt.fa print 'adc', slt.adc
def run_one_subject(subject_id): #tree_name = 'Results/' + str(subject_id) + '/' + str(subject_id) +'_full_tracks_50_neighbors_modified_ward_full_tree.tree' tree_name = 'Results/' + str(subject_id) + '/' + str(subject_id) +'_full_tracks_50_neighbors_modified_ward_full_tree_130516.tree' #'Result/210/210_full_tracks_50_neighbors_modified_ward_full_tree.tree' tree = load_pickle(tree_name) k1 = 15 iterations = 50 cut = tree.best_cut() #print 'origin cut', cut remove_valley(cut) #print 'after remove valley', cut cut_scales_ori = [s[0] for s in cut] temp_scales = heuristic_modified_cuts(cut_scales_ori[:4],3) temp_scales_1 = heuristic_modified_cuts(cut_scales_ori[4:],4,temp_scales[len(temp_scales)-1]) cut_scales = np.concatenate((temp_scales,temp_scales_1)) split = np.zeros((len(cut_scales),iterations)) for j in np.arange(len(cut_scales)): #run from the top cut to the bottom cut i = len(cut_scales) - j - 1 height = cut_scales[i] t0 = time.time() guillotines = tree.cut(height) t_cut = time.time() - t0 print 'Time for cutting at height ',height, ' : ',t_cut #the heigh of the next cut if i==0: h = 0. else: h = cut_scales[i-1] print 'Compute split factor of ', height,' to scale', h for k in np.arange(iterations): random_nodes = random_elements(guillotines, k1) print '\t trial ', k, ': ', random_nodes split[i,k] = split_factor_setnodes(tree,random_nodes,h) title = '\nSplit factor for cutting time of ' + tree_name plot_result(split, cut_scales, '-kx', '210', title)
def see_spherical_intersections(fdpy,fsr): dpr=Dpy(fdpy,'r') T=dpr.read_tracks() dpr.close() SR=load_pickle(fsr) r=fvtk.ren() for key in SR: ind=SR[key]['indices'] intersT=[T[i] for i in ind] fvtk.add(r,fvtk.line(intersT,np.random.rand(3))) centerw=SR[key]['centerw'] radius=SR[key]['radiusw'] fvtk.add(r,fvtk.sphere(position=centerw,radius=radius)) fvtk.show(r)
def load_pbc_data(id=None): if id is None: path = "/home/eg309/Data/PBC/pbc2009icdm/brain1/" streams, hdr = tv.read(path + "brain1_scan1_fiber_track_mni.trk") streamlines = [s[0] for s in streams] return streamlines if not osp.exists("/tmp/" + str(id) + ".pkl"): path = "/home/eg309/Data/PBC/pbc2009icdm/brain1/" streams, hdr = tv.read(path + "brain1_scan1_fiber_track_mni.trk") streamlines = [s[0] for s in streams] labels = np.loadtxt(path + "brain1_scan1_fiber_labels.txt") labels = labels[:, 1] mask_cst = labels == id cst_streamlines = [s for (i, s) in enumerate(streamlines) if mask_cst[i]] save_pickle("/tmp/" + str(id) + ".pkl", cst_streamlines) return cst_streamlines # return [approx_polygon_track(s, 0.7853) for s in cst_streamlines] else: return load_pickle("/tmp/" + str(id) + ".pkl")
def load_tract(tracks_filename, id_file): from dipy.io.pickles import load_pickle if (tracks_filename[-3:]=='dpy'): from dipy.io.dpy import Dpy dpr_tracks = Dpy(tracks_filename, 'r') all_tracks=dpr_tracks.read_tracks() dpr_tracks.close() else: import nibabel as nib streams,hdr=nib.trackvis.read(tracks_filename,points_space='voxel') all_tracks = np.array([s[0] for s in streams], dtype=np.object) tracks_id = load_pickle(id_file) tract = [all_tracks[i] for i in tracks_id] tract = np.array(tract,dtype=np.object) return tract
def show_timing_vs_others(): res=load_pickle('/home/eg309/Data/LSC_limits/timings_1K_100K.pkl') ax=plt.subplot(111) times=res[12][8]['time'][:-1] print len(times) x=np.arange(10**3,10**5+10**3,10**3) print len(x) #ax.set_xticklabels(['1K','20K','40K','60K','70K','80K','90K','100K']) ax.plot(x,times,label='LSC') ax.plot([1000,60000],[30,14400],"*",label='Wang') #ax.set_ylim((0,40)) #ax.set_xlim((1000,100000)) plt.legend(loc=0) plt.show()
def save_id_tract_plus_sff(tracks_filename, id_file, num_proto, distance, out_fname): if (tracks_filename[-3:]=='dpy'): dpr_tracks = Dpy(tracks_filename, 'r') all_tracks=dpr_tracks.read_tracks() dpr_tracks.close() else: all_tracks = load_whole_tract_trk(tracks_filename) tracks_id = load_pickle(id_file) tract = [all_tracks[i] for i in tracks_id] not_tract_fil = [] id_not_tract_fil = [] min_len = min(len(i) for i in tract) #print 'min_len of cst', min_len min_len = min_len*2.2/3#2./3.2# - 20 for i in np.arange(len(all_tracks)): if (i not in tracks_id) and (length(all_tracks[i]) > min_len): not_tract_fil.append(all_tracks[i]) id_not_tract_fil.append(i) not_tract_fil = np.array(not_tract_fil,dtype=np.object) sff_pro_id = sff(not_tract_fil, num_proto, distance) tract_sff_id = [] for i in tracks_id: tract_sff_id.append(i) for idx in sff_pro_id: tract_sff_id.append(id_not_tract_fil[idx]) #tract_sff_id.append(id_not_tract_fil[i] for i in sff_pro_id) print len(tract), len(tract_sff_id) save_pickle(out_fname, tract_sff_id) return tract_sff_id
def save_id_tract_plus_sff_in_ext(tracks_filename, id_file, num_proto, distance, out_fname_ext , out_fname_sff_in_ext, thres_len= 2.2/3., thres_vol = 1.4 , thres_dis = 3./2.): tract_ext_id = save_id_tract_ext1(tracks_filename,id_file, distance, out_fname_ext, thres_len, thres_vol , thres_dis) if (tracks_filename[-3:]=='dpy'): dpr_tracks = Dpy(tracks_filename, 'r') all_tracks=dpr_tracks.read_tracks() dpr_tracks.close() else: all_tracks = load_whole_tract_trk(tracks_filename) tracks_id = load_pickle(id_file) ext_not_tract_id = [] ext_not_tract = [] for idx in tract_ext_id: if idx not in tracks_id: ext_not_tract.append(all_tracks[idx]) ext_not_tract_id.append(idx) ext_not_tract = np.array(ext_not_tract,dtype=np.object) sff_pro_id = sff(ext_not_tract, num_proto, distance) tract_sff_in_ext_id = [] for i in tracks_id: tract_sff_in_ext_id.append(i) for k in sff_pro_id: tract_sff_in_ext_id.append(ext_not_tract_id[k]) #tract_sff_id.append(id_not_tract_fil[i] for i in sff_pro_id) print len(tracks_id), len(tract_sff_in_ext_id), len(tract_ext_id) save_pickle( out_fname_sff_in_ext, tract_sff_in_ext_id) return tract_sff_in_ext_id
def show_conn_mat(filename): d = load_pickle(filename) method = filename.split('__')[1].split('_')[0] subplot(2, 2, 1) title(method + ' full') imshow(d['mat']) subplot(2, 2, 2) title(method + ' 0.5') imshow(d['conn_mats'][0]) subplot(2, 2, 3) title(method + ' 1.') imshow(d['conn_mats'][1]) subplot(2, 2, 4) title(method + ' 1.5') imshow(d['conn_mats'][2]) print 'Diffs: ', d['diffs'] try: print 'Ratio: ', d['ratio'] except KeyError: print 'KeyError: ratio does not exist'
def check_bigger_clusters(): avirtuals={} for (i,out) in enumerate(outs): C=load_pickle(dout+out+'.skl') cinds=np.zeros(len(C)) for c in C: cinds[c]=len(C[c]['indices']) descend=np.argsort(cinds)[::-1] desc=descend[:400] virtuals=[] for c in desc: v=C[c]['hidden']/float(C[c]['N']) virtuals.append(v) avirtuals[i]=virtuals r=fvtk.ren() fvtk.add(r,fvtk.line(avirtuals[0],fvtk.red)) fvtk.add(r,fvtk.line(avirtuals[9],fvtk.yellow)) fvtk.add(r,fvtk.line(avirtuals[5],fvtk.green)) fvtk.show(r)
[2*(b*c + a*d), a*a + c*c - b*b - d*d, 2*(c*d - a*b)], [2*(b*d - a*c), 2*(c*d + a*b), a*a + d*d - b*b - c*c]]) if __name__ == '__main__': root = Tkinter.Tk() root.withdraw() dir_name = 'ALS/ALS_Segmentation' tracks_chosen_filename = tkFileDialog.askopenfilename(parent=root,initialdir=dir_name) num_seeds = 3 #1M 3M #tracks_chosen_filename = 'ALS/ALS_Segmentation/s201_corticospinal_left_3M_Nivedita.pkl' #subj = 201 tracks_id=load_pickle(tracks_chosen_filename) root = Tkinter.Tk() root.wm_title('Subject Selection') subsel = SubjectSelector(root, default_value=1) root.wait_window() mapping = [0,101,201,102,202,103,203,104,204,105,205,106,206,107,207,109,208,110,209,111,210,112,212,113,213] subj = mapping[subsel.value] #load the tracks tracks_filename = 'ALS/ALS_Data/'+str(subj)+'/DIFF2DEPI_EKJ_64dirs_14/DTI/tracks_dti_'+str(num_seeds)+'M_linear.dpy' dpr_tracks = Dpy(tracks_filename, 'r') tensor_all_tracks=dpr_tracks.read_tracks(); dpr_tracks.close() T = [tensor_all_tracks[i] for i in tracks_id]
to_be_deleted[i] = 1 for i in np.where(to_be_deleted > 0)[0]: del C[k[i]] return C def most(C): for c in C: pass # pf.most_similar_track_mam() T = pkl.load_pickle(fname) print 'Reducing the number of points...' T = [pf.approx_polygon_track(t) for t in T] print 'Reducing further to tracks with 3 pts...' T2 = [tm.downsample(t, 3) for t in T] print 'LARCH ...' print 'Splitting ...' t = time.clock() C = pf.larch_3split(T2, None, 5.) print time.clock() - t, len(C) for c in C: print c, C[c]['rep3'] / C[c]['N']
#t_cst_ext_file = '/home/bao/tiensy/Lauren_registration/data_compare_mapping/pairwise_reg_cstext2cstext/CST_ROI_trkvis_Right/' + source_sub + '_' + target_sub + '/out_reg/iteration_4/' + target_sub + '_cst_R_tvis_ext_reg.trk' #out_file = '/home/bao/tiensy/Tractography_Mapping/data/trackvis_tractography/Lauren_pair_CSText2CSText/Lauren_pair_CSText2CSText_f300_l75_1NN/map_1nn_pairwise_reg_CST_R_ext_' + source_sub + '_aligned_to_CST_R_ext_' + target_sub + '_Lauren_f300_l75.txt' s_cst_ext_file = '/home/bao/tiensy/Lauren_registration/data_compare_mapping/pairwise_reg_cstext2cstext/CST_ROI_trkvis_Right/' + source_sub + '_' + target_sub + '/out_reg_f100_l25/iteration_4/' + source_sub + '_cst_R_tvis_ext_reg.trk' t_cst_ext_file = '/home/bao/tiensy/Lauren_registration/data_compare_mapping/pairwise_reg_cstext2cstext/CST_ROI_trkvis_Right/' + source_sub + '_' + target_sub + '/out_reg_f100_l25/iteration_4/' + target_sub + '_cst_R_tvis_ext_reg.trk' out_file = '/home/bao/tiensy/Tractography_Mapping/data/trackvis_tractography/Lauren_pair_CSText2CSText/Lauren_pair_CSText2CSText_f100_l25_1NN/map_1nn_pairwise_reg_CST_R_ext_' + source_sub + '_aligned_to_CST_R_ext_' + target_sub + '_Lauren_f100_l25.txt' s_cst_idx_file = '/home/bao/tiensy/Tractography_Mapping/data/trackvis_tractography/ROI_seg_tvis/ROI_seg_tvis_native/' + source_sub + '_corticospinal_R_tvis.pkl' t_cst_idx_file = '/home/bao/tiensy/Tractography_Mapping/data/trackvis_tractography/ROI_seg_tvis/ROI_seg_tvis_native/' + target_sub + '_corticospinal_R_tvis.pkl' ''' source_ext = load_whole_tract(s_cst_ext_file) target_ext = load_whole_tract(t_cst_ext_file) s_cst_idx = load_pickle(s_cst_idx_file) source = source_ext[:len(s_cst_idx)] t_cst_idx = load_pickle(t_cst_idx_file) target = target_ext[:len(t_cst_idx)] #print len(source), len(target) tractography1 = source tractography2 = target_ext map_all = mapping_nn(tractography1, tractography2) if save: #print 'Saving 1-NN tract based: ', out_file
tim = time.clock() C, atracks = tl.larch(tracks, [50.**2, 20.**2, 5.**2], True, True) #tracks=[tm.downsample(t,3) for t in tracks] #C=pf.local_skeleton_clustering(tracks,20.) print 'Done in total of ', time.clock() - tim, 'seconds.' print 'Saving result...' pkl.save_pickle(C_fname, C) streams = [(i, None, None) for i in atracks] tv.write(appr_fname, streams, hdr) else: print 'Loading result...' C = pkl.load_pickle(C_fname) skel = [] for c in C: skel.append(C[c]['repz']) print 'Showing dataset after clustering...' r = fos.ren() fos.clear(r) colors = np.zeros((len(skel), 3)) for (i, s) in enumerate(skel): color = np.random.rand(1, 3) colors[i] = color
def track_eudx(trackmodelpickle): trackmodel = pickles.load_pickle(trackmodelpickle) tracks = track_eudx_work(trackmodel) with open('raw_track.trk', 'wb') as ftrkout: dwi.save_streamlines_to_trk(tracks, trackmodel.affine, ftrkout)
if __name__ == '__main__': #load T1 volume registered in MNI space #img = nib.load('data/subj_05/MPRAGE_32/T1_flirt_out.nii.gz') #data = img.get_data() #affine = img.get_affine() #load the tracks registered in MNI space fdpyw = 'data/subj_05/101_32/DTI/tracks_gqi_1M_linear.dpy' dpr = Dpy(fdpyw, 'r') T = dpr.read_tracks() dpr.close() #load initial QuickBundles with threshold 30mm fpkl = 'data/subj_05/101_32/DTI/qb_gqi_1M_linear_30.pkl' #qb=QuickBundles(T,30.,12) #save_pickle(fpkl,qb) qb = load_pickle(fpkl) #create the interaction system for tracks tl = TrackLabeler('Bundle Picker', qb, qb.downsampled_tracks(), vol_shape=(182, 218, 182), tracks_alpha=1) #add a interactive slicing/masking tool #sl = Slicer(affine,data) #add one way communication between tl and sl #tl.slicer=sl title = 'Bundle Picking' w = Window(caption=title, width=1200, height=800, bgcolor=(0., 0., 0.2))
def load_data_dis(data_id,num_prototype=40): filename = 'Results/' + str(data_id) + '/' + str(data_id)+'_data_disimilarity_full_tracks_' + str(num_prototype) + '_prototyes_random.dis' print "Loading tracks dissimilarity" dis = load_pickle(filename) return dis