############################################################################### # data simulation shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([3, 4, 4]) x = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() th = 2.36 # compute the field structure and perform the watershed domain = grid_domain_from_shape(shape) nroi = HROI_from_watershed(domain, np.ravel(x), threshold=th) label = nroi.label #compute the region-based signal average bfm = np.array([np.mean(x.ravel()[label == k]) for k in range(label.max() + 1)]) bmap = np.zeros(x.size) if label.max() > - 1: bmap[label > - 1] = bfm[label[label > - 1]] label = np.reshape(label, shape) bmap = np.reshape(bmap, shape) ############################################################################### # plot the input image
# step 1: generate some synthetic data n_subj = 10 shape = (60, 60) pos = 3 * np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([5, 7, 6]) sjitter = 6.0 dataset = simul.surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=10.0) # dataset represents 2D activation images from n_subj subjects, # step 2 : prepare all the information for the parcellation nbparcel = 10 ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) domain = dom.grid_domain_from_shape(shape) # step 3 : run the algorithm Pa = hp.hparcel(domain, ldata, nbparcel, mu=3.0) # note: play with mu to change the 'stiffness of the parcellation' # step 4: look at the results Label = np.array([np.reshape(Pa.individual_labels[:, s], shape) for s in range(n_subj)]) import matplotlib.pylab as mp mp.figure(figsize=(8, 4)) mp.title('Input data') for s in range(n_subj): mp.subplot(2, 5, s + 1) mp.imshow(dataset[s], interpolation='nearest')
[20, 20], [30, 20]]) ampli = np.array([5, 7, 6]) sjitter = 1.0 stats = simul.surrogate_2d_dataset(n_subj=n_subjects, shape=shape, pos=pos, ampli=ampli, width=5.0) # set various parameters threshold = float(st.t.isf(0.01, 100)) sigma = 4. / 1.5 prevalence_threshold = n_subjects * .25 prevalence_pval = 0.9 smin = 5 algorithm = 'co-occurrence' # 'density' domain = grid_domain_from_shape(shape) # get the functional information stats_ = np.array([np.ravel(stats[k]) for k in range(n_subjects)]).T # run the algo landmarks, hrois = compute_landmarks( domain, stats_, sigma, prevalence_pval, prevalence_threshold, threshold, smin, method='prior', algorithm=algorithm) display_landmarks_2d(landmarks, hrois, stats) if landmarks is not None: landmarks.show() plt.show()
# step 1: generate some synthetic data n_subj = 10 shape = (60, 60) pos = 3 * np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([5, 7, 6]) sjitter = 6.0 dataset = simul.surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=10.0) # dataset represents 2D activation images from n_subj subjects, # step 2 : prepare all the information for the parcellation nbparcel = 10 ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) domain = dom.grid_domain_from_shape(shape) # step 3 : run the algorithm Pa = hp.hparcel(domain, ldata, nbparcel, mu=3.0) # note: play with mu to change the 'stiffness of the parcellation' # step 4: look at the results Label = np.array([np.reshape(Pa.individual_labels[:, s], shape) for s in range(n_subj)]) plt.figure(figsize=(8, 4)) plt.title('Input data') for s in range(n_subj): plt.subplot(2, 5, s + 1) plt.imshow(dataset[s], interpolation='nearest') plt.axis('off')