def create_frequency_map_from_many_binary_images( Path_of_list_of_single_3D_files, probability_map_file_name, save_dir, n_total=0): """ Path_of_list_of_single_3D_files = LIST_of_ROI_PATHS (created from above HELP function) or it is a text file with list of ROIs taht should be summed together, directly. """ if isinstance(Path_of_list_of_single_3D_files, str): with open(Path_of_list_of_single_3D_files) as L: ThreeD_files_list = L.read().splitlines() else: # Then it should be a list already ThreeD_files_list = Path_of_list_of_single_3D_files merged_file_name = "temporary" fourD_file_path = create_merged_file(ThreeD_files_list, save_dir, merged_file_name="temporary") if n_total == 0: n_total = len(ThreeD_files_list) masker = NiftiMasker(standardize=False) four_D_file_maksed = masker.fit_transform(fourD_file_path) # Here to calculate the percentage of being detected or not, for wach voxel, I do the following: sum_file = four_D_file_maksed.sum(axis=0) sum_file = sum_file[:, np.newaxis] sum_file = sum_file.T sum_file = sum_file / n_total sum_file = sum_file * 100 file_to_save = os.path.join(save_dir, probability_map_file_name) masker.inverse_transform(sum_file).to_filename(file_to_save) os.remove(os.path.join(save_dir, merged_file_name + '.nii.gz')) return file_to_save
def compute_fixed_effects(contrast_imgs, variance_imgs, mask=None, precision_weighted=False): """Compute the fixed effects, given images of effects and variance Parameters ---------- contrast_imgs: list of Nifti1Images or strings the input contrast images variance_imgs: list of Nifti1Images or strings the input variance images mask: Nifti1Image or NiftiMasker instance or None, optional, mask image. If None, it is recomputed from contrast_imgs precision_weighted: Bool, optional, Whether fixed effects estimates should be weighted by inverse variance or not. Defaults to False. Returns ------- fixed_fx_contrast_img: Nifti1Image, the fixed effects contrast computed within the mask fixed_fx_variance_img: Nifti1Image, the fixed effects variance computed within the mask fixed_fx_t_img: Nifti1Image, the fixed effects t-test computed within the mask Note ---- This function is experimental. It may change in any future release of Nilearn. """ if len(contrast_imgs) != len(variance_imgs): raise ValueError('The number of contrast images (%d) ' 'differs from the number of variance images (%d). ' % (len(contrast_imgs), len(variance_imgs))) if isinstance(mask, NiftiMasker): masker = mask.fit() elif mask is None: masker = NiftiMasker().fit(contrast_imgs) else: masker = NiftiMasker(mask_img=mask).fit() variances = masker.transform(variance_imgs) contrasts = masker.transform(contrast_imgs) (fixed_fx_contrast, fixed_fx_variance, fixed_fx_t) = _compute_fixed_effects_params(contrasts, variances, precision_weighted) fixed_fx_contrast_img = masker.inverse_transform(fixed_fx_contrast) fixed_fx_variance_img = masker.inverse_transform(fixed_fx_variance) fixed_fx_t_img = masker.inverse_transform(fixed_fx_t) return fixed_fx_contrast_img, fixed_fx_variance_img, fixed_fx_t_img
def map_threshold(stat_img, mask_img, threshold, height_control='fpr', cluster_threshold=0): """ Threshold the provvided map Parameters ---------- stat_img : Niimg-like object, statistical image (presumably in z scale) mask_img : Niimg-like object, mask image threshold: float, cluster forming threshold (either a p-value or z-scale value) height_control: string false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'|'none' cluster_threshold : float, optional cluster size threshold Returns ------- thresholded_map : Nifti1Image, the stat_map theresholded at the prescribed voxel- and cluster-level """ # Masking masker = NiftiMasker(mask_img=mask_img) stats = np.ravel(masker.fit_transform(stat_img)) n_voxels = np.size(stats) # Thresholding if height_control == 'fpr': z_th = norm.isf(threshold) elif height_control == 'fdr': z_th = fdr_threshold(stats, threshold) elif height_control == 'bonferroni': z_th = norm.isf(threshold / n_voxels) else: # Brute-force thresholding z_th = threshold stats *= (stats > z_th) stat_map = masker.inverse_transform(stats).get_data() # Extract connected components above threshold label_map, n_labels = label(stat_map > z_th) labels = label_map[(masker.mask_img_.get_data() > 0)] for label_ in range(1, n_labels + 1): if np.sum(labels == label_) < cluster_threshold: stats[labels == label_] = 0 return masker.inverse_transform(stats)
def map_threshold(stat_img, mask_img, threshold, height_control='fpr', cluster_threshold=0): """ Threshold the provvided map Parameters ---------- stat_img : Niimg-like object, statistical image (presumably in z scale) mask_img : Niimg-like object, mask image threshold: float, cluster forming threshold (either a p-value or z-scale value) height_control: string false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'|'none' cluster_threshold : float, optional cluster size threshold Returns ------- thresholded_map : Nifti1Image, the stat_map theresholded at the prescribed voxel- and cluster-level """ # Masking masker = NiftiMasker(mask_img=mask_img) stats = np.ravel(masker.fit_transform(stat_img)) n_voxels = np.size(stats) # Thresholding if height_control == 'fpr': z_th = norm.isf(threshold) elif height_control == 'fdr': z_th = fdr_threshold(stats, threshold) elif height_control == 'bonferroni': z_th = norm.isf(threshold / n_voxels) else: # Brute-force thresholding z_th = threshold stats *= (stats > z_th) stat_map = masker.inverse_transform(stats).get_data() # Extract connected components above threshold label_map, n_labels = label(stat_map > z_th) labels = label_map[(masker.mask_img_.get_data() > 0)] for label_ in range(1, n_labels + 1): if np.sum(labels == label_) < cluster_threshold: stats[labels == label_] = 0 return masker.inverse_transform(stats)
def make_ttest(reg1, reg2): masker = NiftiMasker(nib.load(MASK_FILE), standardize=False) masker.fit() subjects = [1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] a = np.arctanh(join_all_subjects(reg1, subjects, masker)) b = np.arctanh(join_all_subjects(reg2, subjects, masker)) t, prob = ttest_rel(a, b) tt = masker.inverse_transform(t) pp = masker.inverse_transform(prob) return tt, pp
def plot_results(scores, data_path, Y_dim): #plot the result provided by the optimized model unique, counts = np.unique(scores, return_counts=True) maxi = max(unique) occurence = max(counts) print('\nscores max: ', maxi) print('scores equals to 0: ', occurence) print('scores above 0: ', Y_dim[1] - occurence, '\n') #plotting results filename_mask = "/home/brain/datasets/SherlockMerlin_ds001110/sub-12/func/sub-12_task-SherlockMovie_bold_space-MNI152NLin2009cAsym_brainmask.nii.gz" masker = NiftiMasker(mask_img=filename_mask, detrend=True, standardize=True) masker.fit() meanepi = os.path.join(data_path, 'meanepi.nii.gz') score_img = masker.inverse_transform(scores) plotting.plot_roi(score_img, bg_img=meanepi, title="Results of the clustering", cut_coords=5, display_mode='z', aspect=1.25) plt.show() plt.close()
def nilearn_denoise(in_file, brain_mask, wm_mask, csf_mask, motreg_file, outlier_file, bandpass, tr ): """Clean time series using Nilearn high_variance_confounds to extract CompCor regressors and NiftiMasker for regression of all nuissance regressors, detrending, normalziation and bandpass filtering. """ import numpy as np import nibabel as nb import os from nilearn.image import high_variance_confounds from nilearn.input_data import NiftiMasker from nipype.utils.filemanip import split_filename # reload niftis to round affines so that nilearn doesn't complain wm_nii=nb.Nifti1Image(nb.load(wm_mask).get_data(), np.around(nb.load(wm_mask).get_affine(), 2), nb.load(wm_mask).get_header()) csf_nii=nb.Nifti1Image(nb.load(csf_mask).get_data(), np.around(nb.load(csf_mask).get_affine(), 2), nb.load(csf_mask).get_header()) time_nii=nb.Nifti1Image(nb.load(in_file).get_data(),np.around(nb.load(in_file).get_affine(), 2), nb.load(in_file).get_header()) # infer shape of confound array # not ideal confound_len = nb.load(in_file).get_data().shape[3] # create outlier regressors outlier_regressor = np.empty((confound_len,1)) try: outlier_val = np.genfromtxt(outlier_file) except IOError: outlier_val = np.empty((0)) for index in np.atleast_1d(outlier_val): outlier_vector = np.zeros((confound_len, 1)) outlier_vector[index] = 1 outlier_regressor = np.hstack((outlier_regressor, outlier_vector)) outlier_regressor = outlier_regressor[:,1::] # load motion regressors motion_regressor=np.genfromtxt(motreg_file) # extract high variance confounds in wm/csf masks from motion corrected data wm_regressor=high_variance_confounds(time_nii, mask_img=wm_nii, detrend=True) csf_regressor=high_variance_confounds(time_nii, mask_img=csf_nii, detrend=True) # create Nifti Masker for denoising denoiser=NiftiMasker(mask_img=brain_mask, standardize=True, detrend=True, high_pass=bandpass[1], low_pass=bandpass[0], t_r=tr) # denoise and return denoise data to img confounds=np.hstack((outlier_regressor,wm_regressor, csf_regressor, motion_regressor)) denoised_data=denoiser.fit_transform(in_file, confounds=confounds) denoised_img=denoiser.inverse_transform(denoised_data) # save _, base, _ = split_filename(in_file) img_fname = base + '_denoised.nii.gz' nb.save(denoised_img, img_fname) confound_fname = os.path.join(os.getcwd(), "all_confounds.txt") np.savetxt(confound_fname, confounds, fmt="%.10f") return os.path.abspath(img_fname), confound_fname
def oneSample_ttest(map_list, timeseries, coord): print("hOLII") template = ld.load_timserie_mask() print("alooooo") design_matrix = pd.DataFrame([1] * len(map_list), columns=['intercept']) print("Hasta aqui si") nifti_masker = NiftiMasker(standardize=True, mask_strategy='epi', memory="nilearn_cache", memory_level=2, smoothing_fwhm=8) print("me iamgino que hasta aqui llega.") ts = ants.matrix_to_timeseries(template, timeseries[0]) print("Esto es nuevo") nifti_masker.fit(ts) print("No estoy seguro de donde ha reventado") zf = np.asmatrix(map_list[0].transpose()) imgUsar = nifti_masker.inverse_transform(zf) print("No estoy seguro de donde ha reventado 2") second_level_model = SecondLevelModel().fit(pd.DataFrame(zf), design_matrix=design_matrix) print("Creo que peta aqui") z_map = second_level_model.compute_contrast(output_type='z_score') return z_map
def _run_interface(self, runtime): from nilearn.input_data import NiftiMasker, NiftiLabelsMasker from nipype.utils.filemanip import split_filename import nibabel as nib import os functional_filename = self.inputs.in_file atlas_filename = self.inputs.atlas_filename mask_filename = self.inputs.mask_filename # Extracting the ROI signals masker = NiftiLabelsMasker(labels_img=atlas_filename, background_label = 0, standardize=True, detrend = True, verbose = 1 ) time_series = masker.fit_transform(functional_filename) # Removing the ROI signal from the time series nifti_masker = NiftiMasker(mask_img=mask_filename) masked_data = nifti_masker.fit_transform(functional_filename, confounds=time_series[...,0]) masked_img = nifti_masker.inverse_transform(masked_data) # Saving the result to disk outputs = self._outputs().get() fname = self.inputs.in_file _, base, _ = split_filename(fname) nib.save(masked_img, os.path.abspath(base + '_regressed.nii.gz')) return runtime
def ward_adni_rs_fmri(func_files, n_clusters=200): masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=1)) masker.fit(func_files) func_masked = masker.transform(func_files) #func_masked = masker.transform_niimgs(func_files, n_jobs=4) func_masked = np.vstack(func_masked) ########################################################################### # Ward ########################################################################### mask = masker.mask_img_.get_data().astype(np.bool) shape = mask.shape connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1], n_z=shape[2], mask=mask) # Computing the ward for the first time, this is long... ward = WardAgglomeration(n_clusters=n_clusters, connectivity=connectivity, memory='nilearn_cache') ward.fit(func_masked) ward_labels_unique = np.unique(ward.labels_) ward_labels = ward.labels_ ward_filename = '_'.join(['ward', str(n_clusters)]) img_ward = masker.inverse_transform(ward.labels_) img_ward.to_filename(os.path.join(CACHE_DIR, ward_filename))
def preprocess(num, subj, subj_dir, subj_warp_dir, force_warp=False): bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1) bold_path = os.path.join(DATA_DIR, subj, bold_path) template_path = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz') warp_path = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'in_grpbold7Tp1', 'subj2tmpl_warp.nii.gz') output_path = os.path.join(subj_warp_dir, 'run00%i.nii.gz' % num) if force_warp or not os.path.exists(output_path): print 'Warping image #%i...' % num subprocess.call(['fsl5.0-applywarp', '-i', bold_path, '-o', output_path, '-r', template_path, '-w', warp_path, '-d', 'float']) else: print 'Reusing cached warp image #%i' % num print 'Loading image #%i...' % num bold = load(output_path) masker = NiftiMasker(load(MASK_FILE)) # masker = niftimasker(load(MASK_FILE), detrend=true, smoothing_fwhm=4.0, # high_pass=0.01, t_r=2.0, standardize=true) masker.fit() print 'Removing confounds from image #%i...' % num data = masker.transform(bold, confounds(num, subj)) print 'Detrending image #%i...' % num filtered = np.float32(savgol_filter(data, 61, 5, axis=0)) img = masker.inverse_transform(data-filtered) print 'Smoothing image #%i...' % num img = image.smooth_img(img, 4.0) print 'Saving image #%i...' % num save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num)) print 'Finished with image #%i' % num
def nilearn_denoise(in_file, brain_mask, motreg_file, outlier_file, bandpass, tr): """Clean time series using Nilearn high_variance_confounds to extract CompCor regressors and NiftiMasker for regression of all nuissance regressors, detrending, normalziation and bandpass filtering. """ import numpy as np import nibabel as nb import os from nilearn.image import high_variance_confounds from nilearn.input_data import NiftiMasker from nipype.utils.filemanip import split_filename # reload niftis to round affines so that nilearn doesn't complain #csf_nii=nb.Nifti1Image(nb.load(csf_mask).get_data(), np.around(nb.load(csf_mask).get_affine(), 2), nb.load(csf_mask).get_header()) #time_nii=nb.Nifti1Image(nb.load(in_file).get_data(),np.around(nb.load(in_file).get_affine(), 2), nb.load(in_file).get_header()) # infer shape of confound array confound_len = nb.load(in_file).get_data().shape[3] # create outlier regressors outlier_regressor = np.empty((confound_len,1)) try: outlier_val = np.genfromtxt(outlier_file) except IOError: outlier_val = np.empty((0)) for index in np.atleast_1d(outlier_val): outlier_vector = np.zeros((confound_len, 1)) outlier_vector[int(index)] = 1 outlier_regressor = np.hstack((outlier_regressor, outlier_vector)) outlier_regressor = outlier_regressor[:,1::] # load motion regressors motion_regressor=np.genfromtxt(motreg_file) # extract high variance confounds in wm/csf masks from motion corrected data #csf_regressor=high_variance_confounds(time_nii, mask_img=csf_nii, detrend=True) #global_regressor=high_variance_confounds(time_nii, mask_img=brain_nii, detrend=True) # create Nifti Masker for denoising denoiser=NiftiMasker(mask_img=brain_mask, standardize=True, detrend=True, high_pass=bandpass[1], low_pass=bandpass[0], t_r=tr) # denoise and return denoise data to img confounds=np.hstack((outlier_regressor, motion_regressor)) # csf regressor denoised_data=denoiser.fit_transform(in_file, confounds=confounds) denoised_img=denoiser.inverse_transform(denoised_data) # save _, base, _ = split_filename(in_file) img_fname = base + '_denoised.nii' nb.save(denoised_img, img_fname) data_fname = base + '_denoised.npy' np.save(data_fname, denoised_data) confound_fname = os.path.join(os.getcwd(), "all_confounds.txt") np.savetxt(confound_fname, confounds, fmt="%.10f") return os.path.abspath(img_fname), os.path.abspath(data_fname), confound_fname
def _run_interface(self, runtime): import os import nibabel as nb file_path = os.path.abspath("skullstrip.nii.gz") from nilearn.input_data import NiftiMasker nifti_masker = NiftiMasker(detrend=False, standardize=False, mask_img=self.inputs.brain_mask, memory='nilearn_cache', memory_level=1) # cache options masked = nifti_masker.fit_transform(self.inputs.in_file) nifti_masker.inverse_transform(masked).to_filename(file_path) setattr(self, 'skullstrip_brain', file_path) return runtime
def compute_classifs(estimator, standard_scaler, config, return_type='img'): if config['model']['estimator'] in ['multi_study', 'ensemble']: module = curate_module(estimator) module.eval() studies = module.classifiers.keys() in_features = module.embedder.in_features with torch.no_grad(): classifs = module( {study: torch.eye(in_features) for study in studies}, logits=True) biases = module( {study: torch.zeros((1, in_features)) for study in studies}, logits=True) classifs = { study: classifs[study] - biases[study] for study in studies } classifs = { study: classif - classif.mean(dim=0, keepdim=True) for study, classif in classifs.items() } classifs = { study: classif.numpy().T for study, classif in classifs.items() } elif config['model']['estimator'] == 'logistic': classifs = estimator.coef_ else: raise ValueError('Wrong config file') if standard_scaler is not None: for study, classif in classifs.items(): sc = standard_scaler.scs_[study] classifs[study] = classif / sc.scale_[None, :] if config['data']['reduced']: mask = fetch_mask() masker = NiftiMasker(mask_img=mask).fit() modl_atlas = fetch_atlas_modl() dictionary = modl_atlas['components_453_gm'] dictionary = masker.transform(dictionary) classifs = { study: classif.dot(dictionary) for study, classif in classifs.items() } if return_type == 'img': classifs_img = masker.inverse_transform( np.concatenate(list(classifs.values()), axis=0)) return classifs_img elif return_type == 'arrays': return classifs else: raise ValueError
def SVM(self, X, y): # (-10.0, -26.0, 28.0) COORDINATES WERE FOUND print("Resampling..") X_resampled = self.nilearn_resample(X) mean_img = nilearn.image.mean_img(X_resampled) masker = NiftiMasker(smoothing_fwhm=1, standardize=True, memory="nilearn_cache", memory_level=1) X = masker.fit_transform(X_resampled) # Model print("Training...") feature_selection = SelectPercentile(f_classif, percentile=0.5) svc = SVC(kernel='linear') anova_svc = Pipeline([('anova', feature_selection), ('svc', svc)]) anova_svc.fit(X, y) y_pred = anova_svc.predict(X) # Compute the prediction accuracy for the different folds (i.e. session) if self.task == "classification": scoring = "accuracy" else: scoring = "neg_mean_absolute_error" # For cross validation # cv_scores = cross_val_score(anova_svc, X, y, cv = 3, scoring=scoring) # print(cv_scores) # Return the corresponding mean prediction accuracy # score = cv_scores.mean() # print("Score at task: ", score) from sklearn.metrics import mean_squared_error if self.task == "classification": output, stats = self.binary_classification(y, y_pred) print(stats) else: stats = mean_squared_error(y, y_pred) print(stats) coef = svc.coef_ # was svc print("COEF\n", coef) # reverse feature selection coef = feature_selection.inverse_transform(coef) print("COEF_INVERSE\n", coef) # reverse masking weight_img = masker.inverse_transform(coef) # Use the mean image as a background to avoid relying on anatomical data # Create the figure plot_stat_map(weight_img, mean_img, title='SVM weights', cut_coords=()) #-50,-33, 7 show() return stats
def glass_brain(r2_voxels, subject, current_ROI, ROI_name, name): """ Input : Masked results of r2score Take masked data and project it again in a 3D space Ouput : 3D glassbrain of r2score """ # Get one mask and fit it to the corresponding ROI if current_ROI != -1 and current_ROI <= 5: masks_ROIs_filenames = sorted( glob.glob(os.path.join(paths.path2Data, "en/ROIs_masks/", "*.nii"))) ROI_mask = masks_ROIs_filenames[current_ROI] ROI_mask = NiftiMasker(ROI_mask, detrend=True, standardize=True) ROI_mask.fit() unmasked_data = ROI_mask.inverse_transform(r2_voxels) # Get masks and fit a global mask else: masks = [] masks_filenames = sorted( glob.glob( os.path.join(paths.path2Data, "en/fmri_data/masks", "sub_{}".format(subject), "resample*.pkl"))) for file in masks_filenames: with open(file, 'rb') as f: mask = pickle.load(f) masks.append(mask) global_mask = math_img('img>0.5', img=mean_img(masks)) masker = MultiNiftiMasker(global_mask, detrend=True, standardize=True) masker.fit() unmasked_data = masker.inverse_transform(r2_voxels) display = plot_glass_brain(unmasked_data, display_mode='lzry', threshold='auto', colorbar=True, title='Sub_{}'.format(subject)) if not os.path.exists( os.path.join(paths.path2Figures, 'glass_brain', 'Sub_{}'.format(subject), ROI_name)): os.makedirs( os.path.join(paths.path2Figures, 'glass_brain', 'Sub_{}'.format(subject), ROI_name)) display.savefig( os.path.join(paths.path2Figures, 'glass_brain', 'Sub_{}'.format(subject), ROI_name, 'R_squared_test_{}.png'.format(name))) print( 'Figure Path : ', os.path.join(paths.path2Figures, 'glass_brain', 'Sub_{}'.format(subject), ROI_name, 'R_squared_test_{}.png'.format(name))) display.close()
def _run_interface(self, runtime): orig_units = nib.load(self.inputs.in_file).header.get_xyzt_units() masker = NiftiMasker(mask_img=self.inputs.mask_file, standardize=self.inputs.method) standard_mat = masker.fit_transform(self.inputs.in_file) standard_image = masker.inverse_transform(standard_mat) standard_image.header.set_xyzt_units(orig_units[0], orig_units[1]) nib.save(standard_image, 'normalized.nii.gz') return runtime
def coords_to_img(coords, fwhm=9.0): mask = load_mni152_brain_mask() masker = NiftiMasker(mask).fit() voxels = np.asarray( image.coord_transform(*coords.T, np.linalg.pinv(mask.affine)), dtype=int, ).T peaks = np.zeros(mask.shape) np.add.at(peaks, tuple(voxels.T), 1.0) peaks_img = image.new_img_like(mask, peaks) img = image.smooth_img(peaks_img, fwhm=fwhm) img = masker.inverse_transform(masker.transform(img).squeeze()) return img
def test_dict_learning(): data, mask_img, components, rng = _make_canica_test_data(n_subjects=8) masker = NiftiMasker(mask_img=mask_img).fit() mask = mask_img.get_data() != 0 flat_mask = mask.ravel() dict_init = masker.inverse_transform(components[:, flat_mask]) dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., alpha=1) dict_learning_auto_init = DictLearning(n_components=4, random_state=0, mask=mask_img, smoothing_fwhm=0., n_epochs=10, alpha=1) maps = {} for estimator in [dict_learning, dict_learning_auto_init]: estimator.fit(data) maps[estimator] = estimator.masker_. \ inverse_transform(estimator.components_).get_data() maps[estimator] = np.reshape( np.rollaxis(maps[estimator], 3, 0)[:, mask], (4, flat_mask.sum())) masked_components = components[:, flat_mask] for this_dict_learning in [dict_learning]: these_maps = maps[this_dict_learning] S = np.sqrt(np.sum(masked_components**2, axis=1)) S[S == 0] = 1 masked_components /= S[:, np.newaxis] S = np.sqrt(np.sum(these_maps**2, axis=1)) S[S == 0] = 1 these_maps /= S[:, np.newaxis] K = np.abs(masked_components.dot(these_maps.T)) recovered_maps = np.sum(K > 0.9) assert (recovered_maps >= 2) # Smoke test n_epochs > 1 dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., n_epochs=2, alpha=1) dict_learning.fit(data)
def prior_classifier(meta_file, meta_category): np.random.seed(42) # reprodicible parallelisation print '-' * 80 print 'Current meta prior is %s' % meta_category img = load_img(meta_file) img_data = img.get_data() img_data[img.get_data() < 0] = 0 img_data[img.get_data() > 0] = 1 prior_mask = new_img_like(img, img_data, affine=img.get_affine()) print 'Created NiftiMasker with %i voxels for %s' % ( (prior_mask.get_data() > 0).sum(), meta_category) masker = NiftiMasker(prior_mask) masker.fit() masked_data = masker.transform(vbm_path) z_vbm_data_array = StandardScaler().fit_transform(masked_data) # matrix decomposition method if method == 'pca': matdecomp = PCA(n_components=k, random_state=42) elif method == 'ica': matdecomp = FastICA(n_components=k, random_state=42) elif method == 'sparse_pca': matdecomp = SparsePCA(n_components=k, tol=0.1, random_state=42) matdecomp_loadings = matdecomp.fit_transform(z_vbm_data_array) # visualisation of components matdecomp_weights = matdecomp.components_ nifti_4d_components = masker.inverse_transform(matdecomp_weights) for ii in range(0, k, 9): print 'visualisation of component %i of %i' % (ii + 1, k) cur_nifti = index_img(nifti_4d_components, ii) fname = '%s_%s_%i_comp_%i' % (meta_category, method, k, ii + 1) vis_dir1 = data_dir + '/data/processed/%s_%s_%s_%i/%s' % ( modality, method, prior_group, k, meta_category) if not os.path.exists(vis_dir1): os.makedirs(vis_dir1) plot_stat_map(cur_nifti, cut_coords=(0, 0, 0), display_mode='ortho', colorbar=True, black_bg=True, draw_cross=True, bg_img=project_dir + '/data/raw/colin.nii', output_file='%s/%s.png' % (vis_dir1, fname)) cur_nifti.to_filename('%s/%s.nii.gz' % (vis_dir1, fname)) FS_prior = matdecomp_loadings return (meta_category, FS_prior)
def find_cluster(label,masker,data_path,nbc): masked_label = np.zeros_like(label) #nbc = 4 masked_label[label == nbc] = 1 print('\nData loaded successfully') filename_mask = os.path.join(data_path,'mask_inter.nii.gz') masker = NiftiMasker(mask_img=filename_mask, detrend=True,standardize=True) masker.fit() #now that we have the scores we create the masks score_map_img = masker.inverse_transform(masked_label) plotting.plot_roi(score_map_img, bg_img=meanepi, title="Results of the clustering", cut_coords = 5, display_mode='z', aspect=1.25) plt.close() unique, counts = np.unique(masked_label, return_counts=True) print(dict(zip(unique,counts))) score_map_img = masker.inverse_transform(masked_label) masker = NiftiMasker(mask_img=score_map_img, detrend=True,standardize=True) masker.fit() return masker
def preprocess_varpar(num, subj, subj_dir, **kwargs): from nistats.design_matrix import make_design_matrix from nistats.first_level_model import run_glm bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1) bold_path = os.path.join(DATA_DIR, subj, bold_path) mask = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'brain_mask.nii.gz') bold = load(bold_path) masker = NiftiMasker(mask) data = masker.fit_transform(bold) dmat = make_design_matrix(np.arange(data.shape[0])*TR, hrf_model='fir', drift_order=5, **kwargs) labels, results = run_glm(data, dmat, noise_model='ols', verbose=1) img = masker.inverse_transform(StandardScaler().fit_transform(results[0.0].resid)) # return StandardScaler().fit_transform(results[0.0].resid) save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num))
def test_nifti_labels_masker_with_mask(): shape = (13, 11, 12) affine = np.eye(4) fmri_img, mask_img = generate_random_img(shape, affine=affine, length=3) labels_img = data_gen.generate_labeled_regions(shape, affine=affine, n_regions=7) masker = NiftiLabelsMasker( labels_img, resampling_target=None, mask_img=mask_img) signals = masker.fit().transform(fmri_img) bg_masker = NiftiMasker(mask_img).fit() masked_labels = bg_masker.inverse_transform(bg_masker.transform(labels_img)) masked_masker = NiftiLabelsMasker( masked_labels, resampling_target=None, mask_img=mask_img) masked_signals = masked_masker.fit().transform(fmri_img) assert np.allclose(signals, masked_signals)
def get_fc(func_mni_filename, atlas_filename, mask_filename, confounds_filename, output_filename, TR, TYPE, FWHM): """ Extract connectivity matrix given atlas and processed fMRI data. """ if FWHM == 0: FWHM = None confounds = pd.read_csv(confounds_filename, sep='\t') global_signal = confounds['GlobalSignal'].values FD = confounds['FramewiseDisplacement'].values # get average signal value masker = NiftiMasker(mask_img=mask_filename, memory_level=1, memory='nilearn_cache') mymean = mean_img(func_mni_filename) signal = masker.fit_transform(mymean) meansignal = np.mean(signal) labelsmasker = NiftiLabelsMasker( labels_img=atlas_filename, # mask_img = mask_filename, # smoothing_fwhm = FWHM, already smoothed t_r=TR, memory_level=1, memory='nilearn_cache') # X = labelsmasker.fit_transform(func_mni_filename, confounds = global_signal) X = labelsmasker.fit_transform(func_mni_filename) valid_voxels = labelsmasker.fit_transform( masker.inverse_transform(1 * (signal > meansignal * SIGNAL_FR))) nvols = X.shape[0] invalid_regions = valid_voxels < VOXELS_THR X[:, np.where(invalid_regions)] = np.nan fc = compute_fc(X, TYPE) fc = np.arctanh(fc) np.savetxt(output_filename, fc, delimiter=" ") # print average and max FD print("{};{};{};{};{}".format(np.mean(FD[1:]), np.max(FD[1:]), np.sum(FD[1:] > 0.2) / FD.size, np.sum(FD[1:] > 0.3) / FD.size, np.sum(invalid_regions)))
def filtered(PATH_GZ, Time_R): from nilearn.input_data import NiftiMasker from nilearn.signal import butterworth import os masker = NiftiMasker() signal = masker.fit_transform(PATH_GZ) X_filtered = butterworth(signals=signal, sampling_rate=1. / Time_R, high_pass=0.01, copy=True) fmri_filtered = masker.inverse_transform(X_filtered) OutFile = 'f' + PATH_GZ[PATH_GZ.rfind('/') + 1:] fmri_filtered.to_filename(OutFile) out_file = os.path.abspath(OutFile) return out_file
def residualize_imgs(in_file, mask_file, confounds_file): ''' * takes 4d file, mask file & confounds as np.array * regresses out confounds (only within mask) * writes residualized nii ''' from nilearn.input_data import NiftiMasker import os import numpy as np confounds = np.loadtxt(confounds_file) masker = NiftiMasker(mask_img=mask_file) brain_data_2d = masker.fit_transform(in_file, confounds=confounds) out_file = os.path.join(os.getcwd(), 'residualized_data.nii.gz') out_img = masker.inverse_transform(brain_data_2d) out_img.to_filename(out_file) return out_file
def correlate_roi_voxels(functional_run, mask, mean_ts, fwhm): import nibabel as nib import numpy as np import os func_img = nib.load(functional_run) func_data = func_img.get_fdata() affine = func_img.affine # flatten 4D data to 2D using NiftiMasker from nilearn.input_data import NiftiMasker subcort_masker = NiftiMasker(mask_img=mask, smoothing_fwhm=fwhm, mask_strategy='epi') func_data_2D = subcort_masker.fit_transform(func_img) all_roi_corr_files = [] for mx, mean_ts_file in enumerate(mean_ts): print('roi %d mean ts file: %s'%(mx, mean_ts_file)) single_roi_mean_ts = np.loadtxt(mean_ts_file) print(single_roi_mean_ts.shape) # turn list into 2D array: single_ts = np.asarray(single_roi_mean_ts).reshape(-1,1) # correlate ROI timeseries with 2D functional data # must have single timeseries first! that way can just take # output[0,1:] later for all the correlation coefficients corr_data_mat = np.corrcoef(single_ts, func_data_2D, rowvar=False) # Fisher transform correlation values corr_data_2D = np.arctanh(corr_data_mat)[0,1:] # turn 2D correlation data back into 4D corr_data = subcort_masker.inverse_transform(corr_data_2D) # save image roi_corr_file = os.path.abspath('roi-%02d_correlations.nii.gz'%mx) nib.save(corr_data, roi_corr_file) # add filename to output list all_roi_corr_files.append(roi_corr_file) return all_roi_corr_files
def compute_components(estimator, config, return_type='img'): """Compute components from a FactoredClassifier estimator""" module = curate_module(estimator) components = module.embedder.linear.weight.detach().numpy() if config['data']['reduced']: mask = fetch_mask() masker = NiftiMasker(mask_img=mask).fit() modl_atlas = fetch_atlas_modl() dictionary = modl_atlas['components_453_gm'] dictionary = masker.transform(dictionary) components = components.dot(dictionary) if return_type == 'img': components_img = masker.inverse_transform(components) return components_img elif return_type == 'arrays': return components else: raise ValueError
def test_dict_learning(): data, mask_img, components, rng = _make_canica_test_data(n_subjects=8) mask = NiftiMasker(mask_img=mask_img).fit() dict_init = mask.inverse_transform(components) dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., alpha=1) dict_learning_auto_init = DictLearning(n_components=4, random_state=0, mask=mask_img, smoothing_fwhm=0., n_epochs=10, alpha=1) maps = {} for estimator in [dict_learning, dict_learning_auto_init]: estimator.fit(data) maps[estimator] = estimator.masker_. \ inverse_transform(estimator.components_).get_data() maps[estimator] = np.reshape(np.rollaxis(maps[estimator], 3, 0), (4, 400)) for this_dict_learning in [dict_learning]: these_maps = maps[this_dict_learning] S = np.sqrt(np.sum(components ** 2, axis=1)) S[S == 0] = 1 components /= S[:, np.newaxis] S = np.sqrt(np.sum(these_maps ** 2, axis=1)) S[S == 0] = 1 these_maps /= S[:, np.newaxis] K = np.abs(components.dot(these_maps.T)) recovered_maps = np.sum(K > 0.9) assert(recovered_maps >= 2) # Smoke test n_epochs > 1 dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., n_epochs=2, alpha=1) dict_learning.fit(data)
def maps_from_model(estimator, dictionary, target_encoder, standard_scaler, lstsq=False): transformed_coef, names = coefs_from_model(estimator, target_encoder, standard_scaler) mask = fetch_mask() masker = NiftiMasker(mask_img=mask).fit() # components.shape = (n_components, n_voxels) dictionary = masker.transform(dictionary) if lstsq: gram = dictionary.dot(dictionary.T) dictionary = np.linalg.inv(gram).dot(dictionary) # coef.shape = (n_components, n_classes) transformed_coef = { study: masker.inverse_transform(coef) for study, coef in transformed_coef.items() } return transformed_coef, names
def GetOTM(path, name, spos, M): data = image.load_img(path) shape = data.shape l = shape[3] dataS = smooth_img(data, fwhm=None) masker = NiftiMasker(mask_img=mask_img, smoothing_fwhm=8.0, memory='nilearn_cache', memory_level=1) masker = masker.fit() data2d = masker.transform(dataS) data3d = masker.inverse_transform(data2d) odata = data3d.get_fdata() odata = odata[:, :, :, :] h,j = M.shape stasMap = np.zeros(h*j) stasMap = stasMap.reshape(h, j) for i in range(l-1): d1 = datamask(odata[:, :, :, i], spos) d2 = datamask(odata[:, :, :, i + 1], spos) T = ot.emd(d1, d2, M) stasMap = stasMap + T print("finish%s"%(name), (i/(l-1))*100, "%") np.save("stasMap%s.npy"%(name,), stasMap)
def invert_preprocessor_scaling(brain_data, preprocessor): brain_data_mask = preprocessor.brain_data_mask() # Rescale preprocessed_shape = tuple([int(float(d) * preprocessor.scale) for d in brain_data_mask.shape]) rescale_multiplier = float(brain_data_mask.shape[0]) / float(preprocessed_shape[0]) rescaled_affine = brain_data_mask.get_affine().copy() rescaled_affine[:3, :3] *= rescale_multiplier # Resample brain_img = nibabel.Nifti1Image(brain_data, rescaled_affine) rescaled_brain_img = resample_img(brain_img, target_affine=brain_data_mask.get_affine(), target_shape=brain_data_mask.shape, interpolation='continuous') # Zero out non-grey matter voxels in rescaled data zero_masker = NiftiMasker(mask_img=brain_data_mask, standardize=False) zero_masker.fit() rescaled_masked_brain_img = zero_masker.inverse_transform(zero_masker.transform(rescaled_brain_img)) return rescaled_masked_brain_img
def preprocess(num, subj, subj_dir, subj_warp_dir, force_warp=False, group_mode=False): bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1) bold_path = os.path.join(DATA_DIR, subj, bold_path) template_path = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz') warp_path = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'in_grpbold7Tp1', 'subj2tmpl_warp.nii.gz') output_path = os.path.join(subj_warp_dir, 'run00%i.nii.gz' % num) if group_mode: if force_warp or not os.path.exists(output_path): print 'Warping image #%i...' % num subprocess.call(['fsl5.0-applywarp', '-i', bold_path, '-o', output_path, '-r', template_path, '-w', warp_path, '-d', 'float']) else: print 'Reusing cached warp image #%i' % num mask = None bold = output_path else: mask = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'brain_mask.nii.gz') bold = bold_path masker = NiftiMasker(mask, standardize=True, detrend=True) img = masker.inverse_transform(masker.fit_transform(bold)) print 'Saving image #%i...' % num save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num)) print 'Finished with image #%i' % num
def calc_epi_image_from_power(eeg, mri, D=3, dt=0.2): """Create source image from power data Parameters ---------- seizure : EEG | dict eeg data mri : string path to file containing MRI Returns ------- Nifti1Image Image where voxel values represent corresponding t-values """ eeg['baseline']['img'], eeg['seizure']['img'] = \ map_seeg_data(eeg, mri) base_img = eeg['baseline']['img'] seiz_img = eeg['seizure']['img'] nifti_masker = NiftiMasker() base_masked = nifti_masker.fit_transform(base_img) seiz_masked = nifti_masker.transform(seiz_img) data = np.concatenate((base_masked, seiz_masked)) steps = int(D / dt) labels = np.zeros(2 * steps, dtype=(int)) labels[steps:] = 1 __, t_scores, _ = permuted_ols( labels, data, # + intercept as a covariate by default n_perm=10000, two_sided_test=True, n_jobs=2) # can be changed to use more CPUs return nifti_masker.inverse_transform(t_scores)
n_subjects = 100 # more subjects requires more memory ### Load Oasis dataset ######################################################## dataset_files = datasets.fetch_oasis_vbm(n_subjects=n_subjects) age = dataset_files.ext_vars['age'].astype(float) ### Preprocess data ########################################################### nifti_masker = NiftiMasker( standardize=False, smoothing_fwhm=2, memory='nilearn_cache') # cache options # remove features with too low between-subject variance gm_maps_masked = nifti_masker.fit_transform(dataset_files.gray_matter_maps) gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0. # final masking new_images = nifti_masker.inverse_transform(gm_maps_masked) gm_maps_masked = nifti_masker.fit_transform(new_images) n_samples, n_features = gm_maps_masked.shape print n_samples, "subjects, ", n_features, "features" ### Prediction with SVR ####################################################### print "ANOVA + SVR" ### Define the prediction function to be used. # Here we use a Support Vector Classification, with a linear kernel from sklearn.svm import SVR svr = SVR(kernel='linear') ### Dimension reduction from sklearn.feature_selection import SelectKBest, f_regression # Here we use a classical univariate feature selection based on F-test,
X_train, y_train, _, y_train_mean, _ = center_data(X_train, y_train, fit_intercept=True, normalize=False,copy=False) X_test-=X_train.mean(axis=0) X_test/=np.std(X_train,axis=0) alpha=1 ratio=0.5 k=200 solver_params = dict(tol=1e-6, max_iter=5000,prox_max_iter=100) init=None w,obj,init=tvksp_solver(X_train,y_train,alpha,ratio,k,mask=mask,init=init,loss="logistic",verbose=1,**solver_params) coef=w[:-1] intercept=w[-1] coef_img=masker.inverse_transform(coef) y_pred=np.sign(X_test.dot(coef)+intercept) accuracie=(y_pred==y_test).mean() print print "Results" print "=" * 80 plot_stat_map(coef_img, background_img, title="accuracy %g%% , k: %d, alpha: %d, ratio: %.2f" % (accuracie,k,alpha,ratio), cut_coords=(37, -48, -15),threshold=1e-7) print "Number of train samples : %i" % condition_mask_train.sum() print "Number of test samples : %i" % condition_mask_test.sum()
mask = masker.mask_img_.get_data().astype(np.bool) shape = mask.shape connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1], n_z=shape[2], mask=mask) # Computing the ward for the first time, this is long... start = time.time() ward = WardAgglomeration(n_clusters=1000, connectivity=connectivity, memory='nilearn_cache') ward.fit(pet_masked[0]) print "Ward agglomeration 1000 clusters: %.2fs" % (time.time() - start) labels = ward.labels_ + 1 labels_img = masker.inverse_transform(labels) first_plot = plot_roi(labels_img, pet_img[0], title="Ward parcellation", display_mode='xz') # labels_img is a Nifti1Image object, it can be saved to file with the # following code: labels_img.to_filename('parcellation.nii') """ ################################################################## # Compute the ward with more clusters, should be faster as we are using # the caching mechanism start = time.time() ward = WardAgglomeration(n_clusters=2000, connectivity=connectivity, memory='nilearn_cache') ward.fit(fmri_masked)
for train, test in cv: svc.fit(fmri_masked[train], target[train]) prediction = svc.predict(fmri_masked[test]) cv_scores.append(np.sum(prediction == target[test]) / float(np.size(target[test]))) print(cv_scores) ### Unmasking ################################################################# # Retrieve the SVC discriminating weights coef_ = svc.coef_ # Reverse masking thanks to the Nifti Masker coef_img = nifti_masker.inverse_transform(coef_) # Save the coefficients as a Nifti image coef_img.to_filename('haxby_svc_weights.nii') ### Visualization ############################################################# import matplotlib.pyplot as plt from nilearn.image.image import mean_img from nilearn.plotting import plot_roi, plot_stat_map mean_epi = mean_img(func_filename) plot_stat_map(coef_img, mean_epi, title="SVM weights", display_mode="yx") plot_roi(nifti_masker.mask_img_, mean_epi, title="Mask", display_mode="yx") plt.show()
class FirstLevelGLM(BaseEstimator, TransformerMixin, CacheMixin): """ Implementation of the General Linear Model for Single-session fMRI data Parameters ---------- mask: Niimg-like, NiftiMasker or MultiNiftiMasker object, optional, Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is given, it will be computed automatically by a MultiNiftiMasker with default parameters. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to nilearn.image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to nilearn.image.resample_img. Please see the related documentation for details. low_pass: False or float, optional This parameter is passed to nilearn.signal.clean. Please see the related documentation for details. high_pass: False or float, optional This parameter is passed to nilearn.signal.clean. Please see the related documentation for details. t_r: float, optional This parameter is passed to nilearn.signal.clean. Please see the related documentation for details. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. standardize : boolean, optional If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. percent_signal_change: bool, optional, If True, fMRI signals are scaled to percent of the mean value Incompatible with standardize (standardize=False is enforced when\ percent_signal_change is True). n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs', -2 'all CPUs but one', and so on. verbose : integer, optional Indicate the level of verbosity. By default, nothing is printed. noise_model : {'ar1', 'ols'}, optional the temporal variance model. Defaults to 'ar1' Attributes ---------- labels : array of shape (n_voxels,), a map of values on voxels used to identify the corresponding model results : dict, with keys corresponding to the different labels values values are RegressionResults instances corresponding to the voxels """ def __init__(self, mask=None, target_affine=None, target_shape=None, low_pass=None, high_pass=None, t_r=None, smoothing_fwhm=None, memory=Memory(None), memory_level=1, standardize=False, percent_signal_change=True, verbose=1, n_jobs=1, noise_model='ar1'): self.mask = mask self.memory = memory self.memory_level = memory_level self.verbose = verbose self.standardize = standardize self.n_jobs = n_jobs self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape self.smoothing_fwhm = smoothing_fwhm self.noise_model = noise_model self.percent_signal_change = percent_signal_change if self.percent_signal_change: self.standardize = False def fit(self, imgs, design_matrices): """ Fit the GLM 1. does a masker job: fMRI_data -> Y 2. fit an ols regression to (Y, X) 3. fit an AR(1) regression of require This results in an internal (labels_, regression_results_) parameters Parameters ---------- imgs: Niimg-like object or list of Niimg-like objects, See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. Data on which the GLM will be fitted. If this is a list, the affine is considered the same for all. design_matrices: pandas DataFrame or list of pandas DataFrames, fMRI design matrices """ # First, learn the mask if not isinstance(self.mask, NiftiMasker): self.masker_ = NiftiMasker(mask_img=self.mask, smoothing_fwhm=self.smoothing_fwhm, target_affine=self.target_affine, standardize=self.standardize, low_pass=self.low_pass, high_pass=self.high_pass, mask_strategy='epi', t_r=self.t_r, memory=self.memory, verbose=max(0, self.verbose - 1), target_shape=self.target_shape, memory_level=self.memory_level) else: self.masker_ = clone(self.mask) for param_name in [ 'target_affine', 'target_shape', 'smoothing_fwhm', 'low_pass', 'high_pass', 't_r', 'memory', 'memory_level' ]: our_param = getattr(self, param_name) if our_param is None: continue if getattr(self.masker_, param_name) is not None: warn('Parameter %s of the masker overriden' % param_name) setattr(self.masker_, param_name, our_param) # make design_matrices a list of arrays if isinstance(design_matrices, (str, pd.DataFrame)): design_matrices_ = [design_matrices] else: design_matrices_ = [X for X in design_matrices] design_matrices = [] for design_matrix in design_matrices_: if isinstance(design_matrix, str): loaded = pd.read_csv(design_matrix, index_col=0) design_matrices.append(loaded.values) elif isinstance(design_matrix, pd.DataFrame): design_matrices.append(design_matrix.values) else: raise TypeError( 'Design matrix can only be a pandas DataFrames or a' 'string. A %s was provided' % type(design_matrix)) # make imgs a list of Nifti1Images if isinstance(imgs, (Nifti1Image, str)): imgs = [imgs] if len(imgs) != len(design_matrices): raise ValueError( 'len(imgs) %d does not match len(design_matrices) %d' % (len(imgs), len(design_matrices))) # Loop on imgs and design matrices self.labels_, self.results_ = [], [] self.masker_.fit(imgs) for X, img in zip(design_matrices, imgs): Y = self.masker_.transform(img) if self.percent_signal_change: Y, _ = percent_mean_scaling(Y) labels_, results_ = session_glm(Y, X, noise_model=self.noise_model, bins=100) self.labels_.append(labels_) self.results_.append(results_) return self def transform(self, con_vals, contrast_type=None, contrast_name='', output_z=True, output_stat=False, output_effects=False, output_variance=False): """Generate different outputs corresponding to the contrasts provided e.g. z_map, t_map, effects and variance. In multi-session case, outputs the fixed effects map. Parameters ---------- con_vals : array or list of arrays of shape (n_col) or (n_dim, n_col) where ``n_col`` is the number of columns of the design matrix, numerical definition of the contrast (one array per run) contrast_type : {'t', 'F'}, optional type of the contrast contrast_name : str, optional name of the contrast output_z : bool, optional Return or not the corresponding z-stat image output_stat : bool, optional Return or not the base (t/F) stat image output_effects : bool, optional Return or not the corresponding effect image output_variance : bool, optional Return or not the corresponding variance image Returns ------- output_images : list of Nifti1Images The desired output images """ if self.labels_ is None or self.results_ is None: raise ValueError('The model has not been fit yet') if isinstance(con_vals, np.ndarray): con_vals = [con_vals] if len(con_vals) != len(self.results_): raise ValueError( 'contrasts must be a sequence of %d session contrasts' % len(self.results_)) contrast = None for i, (labels_, results_, con_val) in enumerate( zip(self.labels_, self.results_, con_vals)): if np.all(con_val == 0): warn('Contrast for session %d is null' % i) contrast_ = compute_contrast(labels_, results_, con_val, contrast_type) if contrast is None: contrast = contrast_ else: contrast = contrast + contrast_ if output_z or output_stat: # compute the contrast and stat contrast.z_score() # Prepare the returned images do_outputs = [output_z, output_stat, output_effects, output_variance] estimates = ['z_score_', 'stat_', 'effect', 'variance'] descrips = [ 'z statistic', 'Statistical value', 'Estimated effect', 'Estimated variance' ] output_images = [] for do_output, estimate, descrip in zip(do_outputs, estimates, descrips): if not do_output: continue estimate_ = getattr(contrast, estimate) if estimate_.ndim == 3: shape_ = estimate_.shape estimate_ = np.reshape(estimate_, (shape_[0] * shape_[1], shape_[2])) output = self.masker_.inverse_transform(estimate_) output.get_header()['descrip'] = ('%s of contrast %s' % (descrip, contrast_name)) output_images.append(output) return output_images def fit_transform(self, design_matrices, fmri_images, con_vals, contrast_type=None, contrast_name='', output_z=True, output_stat=False, output_effects=False, output_variance=False): """ Fit then transform. For more details, see FirstLevelGLM.fit and FirstLevelGLM.transform documentation""" return self.fit(design_matrices, fmri_images).transform(con_vals, contrast_type, contrast_name, output_z=True, output_stat=False, output_effects=False, output_variance=False)
grouped_conditions_encoded[2 * s + 1] = conditions_encoded[ session_face_mask][0] ############################################################################## # Perform massively univariate analysis with permuted OLS # # We use a two-sided t-test to compute p-values, but we keep trace of the # effect sign to add it back at the end and thus observe the signed effect from nilearn.mass_univariate import permuted_ols neg_log_pvals, t_scores_original_data, _ = permuted_ols( grouped_conditions_encoded, grouped_fmri_masked, # + intercept as a covariate by default n_perm=10000, two_sided_test=True, n_jobs=1) # can be changed to use more CPUs signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data) signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform( signed_neg_log_pvals) ############################################################################## # scikit-learn F-scores for comparison # # F-test does not allow to observe the effect sign (pure two-sided test) from nilearn._utils.fixes import f_regression _, pvals_bonferroni = f_regression( grouped_fmri_masked, grouped_conditions_encoded) # f_regression implicitly adds intercept pvals_bonferroni *= fmri_masked.shape[1] pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1 pvals_bonferroni[pvals_bonferroni > 1] = 1 neg_log_pvals_bonferroni = -np.log10(pvals_bonferroni) neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform( neg_log_pvals_bonferroni)
print("Actual number of subjects after quality check: %d" % n_samples) ### Mask data ################################################################# nifti_masker = NiftiMasker( smoothing_fwhm=5, memory='nilearn_cache', memory_level=1) # cache options fmri_masked = nifti_masker.fit_transform(contrast_map_filenames) ### Anova (parametric F-scores) ############################################### from nilearn._utils.fixes import f_regression _, pvals_anova = f_regression(fmri_masked, tested_var, center=True) pvals_anova *= fmri_masked.shape[1] pvals_anova[np.isnan(pvals_anova)] = 1 pvals_anova[pvals_anova > 1] = 1 neg_log_pvals_anova = - np.log10(pvals_anova) neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform( neg_log_pvals_anova) ### Perform massively univariate analysis with permuted OLS ################### neg_log_pvals_permuted_ols, _, _ = permuted_ols( tested_var, fmri_masked, model_intercept=True, n_perm=5000, # 5,000 for the sake of time. Idealy, this should be 10,000 n_jobs=1) # can be changed to use more CPUs neg_log_pvals_permuted_ols_unmasked = nifti_masker.inverse_transform( np.ravel(neg_log_pvals_permuted_ols)) ### Visualization ############################################################# from nilearn.plotting import plot_stat_map # Various plotting parameters z_slice = 12 # plotted slice
# ....................................... # # We retrieve the SVC discriminating weights coef_ = svc.coef_ print(coef_) ########################################################################### # It's a numpy array print(coef_.shape) ########################################################################### # We need to turn it back into a Nifti image, in essence, "inverting" # what the NiftiMasker has done. # # For this, we can call inverse_transform on the NiftiMasker: coef_img = masker.inverse_transform(coef_) print(coef_img) ########################################################################### # coef_img is now a NiftiImage. # # We can save the coefficients as a nii.gz file: coef_img.to_filename('haxby_svc_weights.nii.gz') ########################################################################### # Plotting the SVM weights # ......................... # # We can plot the weights, using the subject's anatomical as a background from nilearn.plotting import plot_stat_map, show
### F-scores computation ###################################################### from nilearn.input_data import NiftiMasker # For decoding, standardizing is often very important nifti_masker = NiftiMasker(mask=mask_img, sessions=session, standardize=True, memory='nilearn_cache', memory_level=1) fmri_masked = nifti_masker.fit_transform(fmri_img) from sklearn.feature_selection import f_classif f_values, p_values = f_classif(fmri_masked, y) p_values = -np.log10(p_values) p_values[np.isnan(p_values)] = 0 p_values[p_values > 10] = 10 p_unmasked = nifti_masker.inverse_transform(p_values).get_data() ### Visualization ############################################################# import matplotlib.pyplot as plt # Use the fmri mean image as a surrogate of anatomical data from nilearn import image mean_fmri = image.mean_img(fmri_img).get_data() ### Searchlight results plt.figure(1) # searchlight.scores_ contains per voxel cross validation scores s_scores = np.ma.array(searchlight.scores_, mask=np.logical_not(process_mask)) plt.imshow(np.rot90(mean_fmri[..., picked_slice]), interpolation='nearest', cmap=plt.cm.gray) plt.imshow(np.rot90(s_scores[..., picked_slice]), interpolation='nearest',
### Fit and predict anova_svr.fit(gm_maps_masked, age) age_pred = anova_svr.predict(gm_maps_masked) ############################################################################# # Visualization # -------------- # Look at the SVR's discriminating weights coef = svr.coef_ # reverse feature selection coef = feature_selection.inverse_transform(coef) # reverse variance threshold coef = variance_threshold.inverse_transform(coef) # reverse masking weight_img = nifti_masker.inverse_transform(coef) # Create the figure from nilearn.plotting import plot_stat_map, show bg_filename = gray_matter_map_filenames[0] z_slice = 0 fig = plt.figure(figsize=(5.5, 7.5), facecolor='k') # Hard setting vmax to highlight weights more display = plot_stat_map(weight_img, bg_img=bg_filename, display_mode='z', cut_coords=[z_slice], figure=fig, vmax=1) display.title('SVM weights', y=1.2) # Measure accuracy with cross validation
def make_functional_derivatives(population, workspace_dir, freesurfer_dir, derivatives_dir): print '========================================================================================' print '' print ' Tourettome - 008. CREATING FUNCTIONAL FEATURES ' print '' print '========================================================================================' # global IO sca_dir = mkdir_path(os.path.join(derivatives_dir, 'func_seed_correlation')) gm_group_mask = os.path.join(derivatives_dir, 'func_centrality/GROUP_GM_FUNC_3mm.nii') count = 0 for subject in population: count +=1 print '###################################################################' print 'Extracting functional derivatives for subject %s' % subject # subject I/0 subject_dir = os.path.join(workspace_dir, subject) for denoise_type in ['compcor', 'gsr','censor','gsr_censor']: print 'Calculating Derivatives for denoise type:', denoise_type sca_dir = mkdir_path(os.path.join(derivatives_dir, 'func_seed_correlation', denoise_type)) func_denoised = os.path.join(subject_dir, 'DENOISE', 'residuals_%s'%denoise_type, 'residual.nii.gz') if os.path.isfile(func_denoised): ################################################################################################################ ### 1- Seed-Based Correlation ################################################################################################################ print '1. Calculating SCA' for seed_name in seeds: if not os.path.isfile(os.path.join(sca_dir, seed_name, '%s_sca_z.nii.gz'%subject)): print seed_name seed_dir = mkdir_path(os.path.join(sca_dir, seed_name)) TR = nb.load(func_denoised).header['pixdim'][4] # Extract seed timeseries seed = seeds[seed_name] masker_seed = NiftiLabelsMasker(labels_img=seed, smoothing_fwhm=6, detrend = False, standardize=True, low_pass = 0.1, high_pass = 0.01, t_r = TR, memory='nilearn_cache', verbose=0, ) timeseries_seed = masker_seed.fit_transform(func_denoised) print 'seed_timeseries_shape', timeseries_seed.shape # Extract brain timeseries masker_brain = NiftiMasker(smoothing_fwhm=6, detrend=None, standardize=True, low_pass=0.1, high_pass=0.01, t_r=TR, memory='nilearn_cache', memory_level=1, verbose=0) timeseries_brain = masker_brain.fit_transform(func_denoised) print 'brain_timeseries_shape', timeseries_brain.shape # Seed Based Correlation # see Nilearn http://nilearn.github.io/auto_examples/03_connectivity/plot_seed_to_voxel_correlation.html#sphx-glr-auto-examples-03-connectivity-plot-seed-to-voxel-correlation-py sca = np.dot(timeseries_brain.T, timeseries_seed) / timeseries_seed.shape[0] sca_rz = np.arctanh(sca) print("seed-based correlation R: min = %.3f; max = %.3f" % (sca.min(), sca.max())) print("seed-based correlation R-to-Z : min = %.3f; max = %.3f" % (sca_rz.min(), sca_rz.max())) # Save seed-to-brain correlation as a Nifti image sca_img = masker_brain.inverse_transform(sca.T) sca_img.to_filename(os.path.join(seed_dir, '%s_sca_z.nii.gz'%subject)) ######### SKIP since already smoothed with nilearn #smooth # Smoothing kernel # FWHM = 6 # sigma = FWHM / 2.35482004503 # os.chdir(seed_dir) # os.system('fslmaths %s_sca_z -s %s %s_sca_z_fwhm6.nii.gz'%(subject, sigma, subject)) # skip the nilearn approach..... do it with freesurfer... # Map seed-to-voxel onto surface # sca_lh = surface.vol_to_surf(sca_img, fsaverage5['pial_left']).ravel() # sca_rh = surface.vol_to_surf(sca_img, fsaverage5['pial_right']).ravel() # # # Save seed-to-vertex correlation as a txt file # np.save(os.path.join(seed_dir, '%s_sca_z_fwhm6_lh.npy'%subject), sca_lh) # np.save(os.path.join(seed_dir, '%s_sca_z_fwhm6_rh.npy'%subject), sca_rh) #################### seed_dir = os.path.join(sca_dir, seed_name) if not os.path.isfile(os.path.join(seed_dir, '%s_sca_z_fsaverage5_fwhm10_rh.mgh' % subject)): os.chdir(seed_dir) for hemi in ['lh', 'rh']: # vol2surf os.system('mri_vol2surf ' '--mov %s_sca_z.nii.gz ' '--regheader %s ' '--projfrac-avg 0.2 0.8 0.1 ' '--interp nearest ' '--hemi %s ' '--out %s_sca_z_%s.mgh' %(subject, subject, hemi, subject, hemi)) #surf2surf os.system('mri_surf2surf ' '--s %s ' '--sval %s_sca_z_%s.mgh ' '--hemi %s ' '--trgsubject fsaverage5 ' '--tval %s_sca_z_fsaverage5_fwhm00_%s.mgh' % (subject, subject, hemi, hemi, subject, hemi)) os.system('mri_surf2surf ' '--s %s ' '--sval %s_sca_z_%s.mgh ' '--hemi %s ' '--trgsubject fsaverage5 ' '--fwhm-src 10 ' '--tval %s_sca_z_fsaverage5_fwhm10_%s.mgh' %(subject, subject, hemi, hemi, subject, hemi)) os.system('rm -rf %s_sca_z_lh.mgh %s_sca_z_rh.mgh' %(subject,subject)) ################################################################################################################ ### 1- connectome ################################################################################################################ print '2. Calculating Power-264 connectome' connectome_dir = mkdir_path(os.path.join(derivatives_dir, 'func_connectome', denoise_type)) if not os.path.isfile(os.path.join(connectome_dir, '%s_power264_tangent.npy'%subject)): # get power-264 coordinates atlas = datasets.fetch_coords_power_2011() coords = np.vstack((atlas.rois['x'], atlas.rois['y'], atlas.rois['z'])).T # Extract signals spheres_masker = input_data.NiftiSpheresMasker(seeds=coords, smoothing_fwhm=6, radius=5., detrend=False, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2.) timeseries = spheres_masker.fit_transform(func_denoised) print 'timsereies shape ', timeseries.shape for cor_type in ['correlation', 'tangent']: if not os.path.isfile(os.path.join(connectome_dir, '%s_power264_%s.npy' % (subject, cor_type))): correlation_measure = connectome.ConnectivityMeasure(kind=cor_type) cmat = correlation_measure.fit_transform([timeseries])[0] np.save(os.path.join(connectome_dir, '%s_power264_%s.npy'%(subject,cor_type)), cmat) else: print 'Need denoising first'
### Mask data ################################################################# nifti_masker = NiftiMasker( smoothing_fwhm=5, memory='nilearn_cache', memory_level=1) # cache options cmap_filenames = localizer_dataset.cmaps fmri_masked = nifti_masker.fit_transform(cmap_filenames) ### Anova (parametric F-scores) ############################################### from nilearn._utils.fixes import f_regression _, pvals_anova = f_regression(fmri_masked, tested_var, center=False) # do not remove intercept pvals_anova *= fmri_masked.shape[1] pvals_anova[np.isnan(pvals_anova)] = 1 pvals_anova[pvals_anova > 1] = 1 neg_log_pvals_anova = - np.log10(pvals_anova) neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform( neg_log_pvals_anova) ### Visualization ############################################################# from nilearn.plotting import plot_stat_map # Various plotting parameters z_slice = 45 # plotted slice from nilearn.image.resampling import coord_transform affine = neg_log_pvals_anova_unmasked.get_affine() _, _, k_slice = coord_transform(0, 0, z_slice, linalg.inv(affine)) k_slice = np.round(k_slice) threshold = - np.log10(0.1) # 10% corrected # Plot Anova p-values
neg_log_pvals, t_scores, _ = permuted_ols(gr_labels, gr_f, n_perm=1000, n_jobs=6, model_intercept=True) print('RPBI') neg_log_pvals_rpbi, _, _ = randomized_parcellation_based_inference( test_var, gr_f, # + intercept as a covariate by default np.asarray(masker.mask_img_.get_data()).astype(bool), n_parcellations=30, # 30 for the sake of time, 100 is recommended n_parcels=1000, threshold='auto', n_perm=1000, # 1,000 for the sake of time. 10,000 is recommended n_jobs=6, verbose=100) neg_log_pvals_rpbi_unmasked = masker.inverse_transform( np.ravel(neg_log_pvals_rpbi)) tscore = masker.inverse_transform(t_scores[0]) pscore = masker.inverse_transform(neg_log_pvals[0]) t_path = os.path.join('figures', 'pmap_perm_voxel_norm_'+gr[0]+'_'+gr[1]+'_baseline_adni') p_path = os.path.join('figures', 'pmap_rpbi_voxel_norm_'+gr[0]+'_'+gr[1]+'_baseline_adni') plot_stat_map(pscore, img, output_file=t_path, black_bg=True, title='/'.join(gr)) plot_stat_map(neg_log_pvals_rpbi_unmasked, img, output_file=p_path, black_bg=True, title='/'.join(gr)) neg_log_pvals_rpbi_unmasked.to_filename(p_path+'.nii')
### Apply ICA ################################################################# from sklearn.decomposition import FastICA n_components = 20 ica = FastICA(n_components=n_components, random_state=42) components_masked = ica.fit_transform(data_masked.T).T # Normalize estimated components, for thresholding to make sense components_masked -= components_masked.mean(axis=0) components_masked /= components_masked.std(axis=0) # Threshold components_masked[components_masked < .8] = 0 # Now invert the masking operation, going back to a full 3D # representation component_img = masker.inverse_transform(components_masked) ### Visualize the results ##################################################### # Show some interesting components import matplotlib.pyplot as plt from nilearn import image from nilearn.plotting import plot_stat_map # Use the mean as a background mean_img = image.mean_img(func_filename) plot_stat_map(image.index_img(component_img, 5), mean_img) plot_stat_map(image.index_img(component_img, 12), mean_img) plt.show()
class Brain_Data(object): """ Brain_Data is a class to represent neuroimaging data in python as a vector rather than a 3-dimensional matrix. This makes it easier to perform data manipulation and analyses. Args: data: nibabel data instance or list of files Y: Pandas DataFrame of training labels X: Pandas DataFrame Design Matrix for running univariate models mask: binary nifiti file to mask brain data output_file: Name to write out to nifti file **kwargs: Additional keyword arguments to pass to the prediction algorithm """ def __init__(self, data=None, Y=None, X=None, mask=None, output_file=None, **kwargs): if mask is not None: if not isinstance(mask, nib.Nifti1Image): if type(mask) is str: if os.path.isfile(mask): mask = nib.load(mask) else: raise ValueError("mask is not a nibabel instance") self.mask = mask else: self.mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz')) self.nifti_masker = NiftiMasker(mask_img=self.mask) if data is not None: if type(data) is str: data=nib.load(data) self.data = self.nifti_masker.fit_transform(data) elif type(data) is list: # Load and transform each image in list separately (nib.concat_images(data) can't handle images of different sizes) self.data = [] for i in data: if isinstance(i,six.string_types): self.data.append(self.nifti_masker.fit_transform(nib.load(i))) elif isinstance(i,nib.Nifti1Image): self.data.append(self.nifti_masker.fit_transform(i)) self.data = np.array(self.data) elif not isinstance(data, nib.Nifti1Image): raise ValueError("data is not a nibabel instance") # Collapse any extra dimension if any([x==1 for x in self.data.shape]): self.data=self.data.squeeze() else: self.data = np.array([]) if Y is not None: if type(Y) is str: if os.path.isfile(Y): Y=pd.read_csv(Y,header=None,index_col=None) if isinstance(Y, pd.DataFrame): if self.data.shape[0]!= len(Y): raise ValueError("Y does not match the correct size of data") self.Y = Y else: raise ValueError("Make sure Y is a pandas data frame.") else: self.Y = pd.DataFrame() if X is not None: if self.data.shape[0]!= X.shape[0]: raise ValueError("X does not match the correct size of data") self.X = X else: self.X = pd.DataFrame() if output_file is not None: self.file_name = output_file else: self.file_name = [] def __repr__(self): return '%s.%s(data=%s, Y=%s, X=%s, mask=%s, output_file=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.shape(), len(self.Y), self.X.shape, os.path.basename(self.mask.get_filename()), self.file_name ) def __getitem__(self, index): new = deepcopy(self) if isinstance(index, int): new.data = np.array(self.data[index,:]).flatten() else: new.data = np.array(self.data[index,:]) if not self.Y.empty: new.Y = self.Y.iloc[index] if self.X.size: if isinstance(self.X,pd.DataFrame): new.X = self.X.iloc[index] else: new.X = self.X[index,:] return new def __setitem__(self, index, value): if not isinstance(value,Brain_Data): raise ValueError('Make sure the value you are trying to set is a Brain_Data() instance.') self.data[index,:] = value.data if not value.Y.empty: self.Y.values[index] = value.Y if not value.X.empty: if self.X.shape[1] != value.X.shape[1]: raise ValueError('Make sure self.X is the same size as value.X.') self.X.values[index] = value.X def __len__(self): return self.shape()[0] def shape(self): """ Get images by voxels shape. Args: self: Brain_Data instance """ return self.data.shape def mean(self): """ Get mean of each voxel across images. Args: self: Brain_Data instance Returns: out: Brain_Data instance """ out = deepcopy(self) out.data = np.mean(out.data, axis=0) return out def std(self): """ Get standard deviation of each voxel across images. Args: self: Brain_Data instance Returns: out: Brain_Data instance """ out = deepcopy(self) out.data = np.std(out.data, axis=0) return out def to_nifti(self): """ Convert Brain_Data Instance into Nifti Object Args: self: Brain_Data instance Returns: out: nibabel instance """ return self.nifti_masker.inverse_transform(self.data) def write(self, file_name=None): """ Write out Brain_Data object to Nifti File. Args: self: Brain_Data instance file_name: name of nifti file """ self.to_nifti().to_filename(file_name) def plot(self, limit=5, anatomical=None): """ Create a quick plot of self.data. Will plot each image separately Args: limit: max number of images to return anatomical: nifti image or file name to overlay """ if anatomical is not None: if not isinstance(anatomical, nib.Nifti1Image): if type(anatomical) is str: anatomical = nib.load(anatomical) else: raise ValueError("anatomical is not a nibabel instance") else: anatomical = get_anatomical() if self.data.ndim == 1: plot_stat_map(self.to_nifti(), anatomical, cut_coords=range(-40, 50, 10), display_mode='z', black_bg=True, colorbar=True, draw_cross=False) else: for i in xrange(self.data.shape[0]): if i < limit: # plot_roi(self.nifti_masker.inverse_transform(self.data[i,:]), self.anatomical) # plot_stat_map(self.nifti_masker.inverse_transform(self.data[i,:]), plot_stat_map(self[i].to_nifti(), anatomical, cut_coords=range(-40, 50, 10), display_mode='z', black_bg=True, colorbar=True, draw_cross=False) def regress(self): """ run vectorized OLS regression across voxels. Args: self: Brain_Data instance Returns: out: dictionary of regression statistics in Brain_Data instances {'beta','t','p','df','residual'} """ if not isinstance(self.X, pd.DataFrame): raise ValueError('Make sure self.X is a pandas DataFrame.') if self.X.empty: raise ValueError('Make sure self.X is not empty.') if self.data.shape[0]!= self.X.shape[0]: raise ValueError("self.X does not match the correct size of self.data") b = np.dot(np.linalg.pinv(self.X), self.data) res = self.data - np.dot(self.X,b) sigma = np.std(res,axis=0) stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(self.X.T,self.X)))**.5).T,np.matrix(sigma)) b_out = deepcopy(self) b_out.data = b t_out = deepcopy(self) t_out.data = b /stderr df = np.array([self.X.shape[0]-self.X.shape[1]] * t_out.data.shape[1]) p_out = deepcopy(self) p_out.data = 2*(1-t.cdf(np.abs(t_out.data),df)) # Might want to not output this info df_out = deepcopy(self) df_out.data = df sigma_out = deepcopy(self) sigma_out.data = sigma res_out = deepcopy(self) res_out.data = res return {'beta':b_out, 't':t_out, 'p':p_out, 'df':df_out, 'sigma':sigma_out, 'residual':res_out} def ttest(self, threshold_dict=None): """ Calculate one sample t-test across each voxel (two-sided) Args: self: Brain_Data instance threshold_dict: a dictionary of threshold parameters {'unc':.001} or {'fdr':.05} Returns: out: dictionary of regression statistics in Brain_Data instances {'t','p'} """ # Notes: Need to add FDR Option t = deepcopy(self) p = deepcopy(self) t.data, p.data = ttest_1samp(self.data, 0, 0) if threshold_dict is not None: if type(threshold_dict) is dict: if 'unc' in threshold_dict: #Uncorrected Thresholding t.data[np.where(p.data>threshold_dict['unc'])] = np.nan elif 'fdr' in threshold_dict: pass else: raise ValueError("threshold_dict is not a dictionary. Make sure it is in the form of {'unc':.001} or {'fdr':.05}") out = {'t':t, 'p':p} return out def append(self, data): """ Append data to Brain_Data instance Args: data: Brain_Data instance to append Returns: out: new appended Brain_Data instance """ if not isinstance(data, Brain_Data): raise ValueError('Make sure data is a Brain_Data instance') if self.isempty(): out = deepcopy(data) else: out = deepcopy(self) if len(self.shape())==1 & len(data.shape())==1: if self.shape()[0]!=data.shape()[0]: raise ValueError('Data is a different number of voxels then the weight_map.') elif len(self.shape())==1 & len(data.shape())>1: if self.shape()[0]!=data.shape()[1]: raise ValueError('Data is a different number of voxels then the weight_map.') elif len(self.shape())>1 & len(data.shape())==1: if self.shape()[1]!=data.shape()[0]: raise ValueError('Data is a different number of voxels then the weight_map.') elif self.shape()[1]!=data.shape()[1]: raise ValueError('Data is a different number of voxels then the weight_map.') out.data = np.vstack([self.data,data.data]) if out.Y.size: out.Y = self.Y.append(data.Y) if self.X.size: if isinstance(self.X,pd.DataFrame): out.X = self.X.append(data.X) else: out.X = np.vstack([self.X, data.X]) return out def empty(self, data=True, Y=True, X=True): """ Initalize Brain_Data.data as empty """ tmp = deepcopy(self) if data: tmp.data = np.array([]) if Y: tmp.Y = pd.DataFrame() if X: tmp.X = np.array([]) # tmp.data = np.array([]).reshape(0,n_voxels) return tmp def isempty(self): """ Check if Brain_Data.data is empty Returns: bool """ if isinstance(self.data,np.ndarray): if self.data.size: boolean = False else: boolean = True if isinstance(self.data, list): if not self.data: boolean = True else: boolean = False return boolean def similarity(self, image, method='correlation'): """ Calculate similarity of Brain_Data() instance with single Brain_Data or Nibabel image Args: self: Brain_Data instance of data to be applied image: Brain_Data or Nibabel instance of weight map Returns: pexp: Outputs a vector of pattern expression values """ if not isinstance(image, Brain_Data): if isinstance(image, nib.Nifti1Image): image = Brain_Data(image) else: raise ValueError("Image is not a Brain_Data or nibabel instance") dim = image.shape() # Check to make sure masks are the same for each dataset and if not create a union mask # This might be handy code for a new Brain_Data method if np.sum(self.nifti_masker.mask_img.get_data()==1)!=np.sum(image.nifti_masker.mask_img.get_data()==1): new_mask = intersect_masks([self.nifti_masker.mask_img, image.nifti_masker.mask_img], threshold=1, connected=False) new_nifti_masker = NiftiMasker(mask_img=new_mask) data2 = new_nifti_masker.fit_transform(self.to_nifti()) image2 = new_nifti_masker.fit_transform(image.to_nifti()) else: data2 = self.data image2 = image.data # Calculate pattern expression if method is 'dot_product': if len(image2.shape) > 1: if image2.shape[0]>1: pexp = [] for i in range(image2.shape[0]): pexp.append(np.dot(data2, image2[i,:])) pexp = np.array(pexp) else: pexp = np.dot(data2, image2) else: pexp = np.dot(data2, image2) elif method is 'correlation': if len(image2.shape) > 1: if image2.shape[0]>1: pexp = [] for i in range(image2.shape[0]): pexp.append(pearson(image2[i,:], data2)) pexp = np.array(pexp) else: pexp = pearson(image2, data2) else: pexp = pearson(image2, data2) return pexp def distance(self, method='euclidean', **kwargs): """ Calculate distance between images within a Brain_Data() instance. Args: self: Brain_Data instance of data to be applied method: type of distance metric (can use any scikit learn or sciypy metric) Returns: dist: Outputs a 2D distance matrix. """ return pairwise_distances(self.data, metric = method, n_jobs=1) def multivariate_similarity(self, images, method='ols'): """ Predict spatial distribution of Brain_Data() instance from linear combination of other Brain_Data() instances or Nibabel images Args: self: Brain_Data instance of data to be applied images: Brain_Data instance of weight map Returns: out: dictionary of regression statistics in Brain_Data instances {'beta','t','p','df','residual'} """ ## Notes: Should add ridge, and lasso, elastic net options options if len(self.shape()) > 1: raise ValueError("This method can only decompose a single brain image.") if not isinstance(images, Brain_Data): raise ValueError("Images are not a Brain_Data instance") dim = images.shape() # Check to make sure masks are the same for each dataset and if not create a union mask # This might be handy code for a new Brain_Data method if np.sum(self.nifti_masker.mask_img.get_data()==1)!=np.sum(images.nifti_masker.mask_img.get_data()==1): new_mask = intersect_masks([self.nifti_masker.mask_img, images.nifti_masker.mask_img], threshold=1, connected=False) new_nifti_masker = NiftiMasker(mask_img=new_mask) data2 = new_nifti_masker.fit_transform(self.to_nifti()) image2 = new_nifti_masker.fit_transform(images.to_nifti()) else: data2 = self.data image2 = images.data # Add intercept and transpose image2 = np.vstack((np.ones(image2.shape[1]),image2)).T # Calculate pattern expression if method is 'ols': b = np.dot(np.linalg.pinv(image2), data2) res = data2 - np.dot(image2,b) sigma = np.std(res,axis=0) stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(image2.T,image2)))**.5).T,np.matrix(sigma)) t_out = b /stderr df = image2.shape[0]-image2.shape[1] p = 2*(1-t.cdf(np.abs(t_out),df)) return {'beta':b, 't':t_out, 'p':p, 'df':df, 'sigma':sigma, 'residual':res} def predict(self, algorithm=None, cv_dict=None, plot=True, **kwargs): """ Run prediction Args: algorithm: Algorithm to use for prediction. Must be one of 'svm', 'svr', 'linear', 'logistic', 'lasso', 'ridge', 'ridgeClassifier','randomforest', or 'randomforestClassifier' cv_dict: Type of cross_validation to use. A dictionary of {'type': 'kfolds', 'n_folds': n}, {'type': 'kfolds', 'n_folds': n, 'subject_id': holdout}, or {'type': 'loso', 'subject_id': holdout}, where n = number of folds, and subject = vector of subject ids that corresponds to self.Y plot: Boolean indicating whether or not to create plots. **kwargs: Additional keyword arguments to pass to the prediction algorithm Returns: output: a dictionary of prediction parameters """ # Set algorithm if algorithm is not None: predictor_settings = set_algorithm(algorithm, **kwargs) else: # Use SVR as a default predictor_settings = set_algorithm('svr', **{'kernel':"linear"}) # Initialize output dictionary output = {} output['Y'] = np.array(self.Y).flatten() # Overall Fit for weight map predictor = predictor_settings['predictor'] predictor.fit(self.data, output['Y']) output['yfit_all'] = predictor.predict(self.data) if predictor_settings['prediction_type'] == 'classification': if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']: output['prob_all'] = predictor.predict_proba(self.data)[:,1] else: output['dist_from_hyperplane_all'] = predictor.decision_function(self.data) if predictor_settings['algorithm'] == 'svm' and predictor.probability: output['prob_all'] = predictor.predict_proba(self.data)[:,1] output['intercept'] = predictor.intercept_ # Weight map output['weight_map'] = self.empty() if predictor_settings['algorithm'] == 'lassopcr': output['weight_map'].data = np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_lasso'].coef_) elif predictor_settings['algorithm'] == 'pcr': output['weight_map'].data = np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_regress'].coef_) else: output['weight_map'].data = predictor.coef_.squeeze() # Cross-Validation Fit if cv_dict is not None: output['cv'] = set_cv(cv_dict) predictor_cv = predictor_settings['predictor'] output['yfit_xval'] = output['yfit_all'].copy() output['intercept_xval'] = [] output['weight_map_xval'] = deepcopy(output['weight_map']) wt_map_xval = []; if predictor_settings['prediction_type'] == 'classification': if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']: output['prob_xval'] = np.zeros(len(self.Y)) else: output['dist_from_hyperplane_xval'] = np.zeros(len(self.Y)) if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability: output['prob_xval'] = np.zeros(len(self.Y)) for train, test in output['cv']: predictor_cv.fit(self.data[train], self.Y.loc[train]) output['yfit_xval'][test] = predictor_cv.predict(self.data[test]) if predictor_settings['prediction_type'] == 'classification': if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']: output['prob_xval'][test] = predictor_cv.predict_proba(self.data[test])[:,1] else: output['dist_from_hyperplane_xval'][test] = predictor_cv.decision_function(self.data[test]) if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability: output['prob_xval'][test] = predictor_cv.predict_proba(self.data[test])[:,1] output['intercept_xval'].append(predictor_cv.intercept_) # Weight map if predictor_settings['algorithm'] == 'lassopcr': wt_map_xval.append(np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_lasso'].coef_)) elif predictor_settings['algorithm'] == 'pcr': wt_map_xval.append(np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_regress'].coef_)) else: wt_map_xval.append(predictor_cv.coef_.squeeze()) output['weight_map_xval'].data = np.array(wt_map_xval) # Print Results if predictor_settings['prediction_type'] == 'classification': output['mcr_all'] = np.mean(output['yfit_all']==np.array(self.Y).flatten()) print 'overall accuracy: %.2f' % output['mcr_all'] if cv_dict is not None: output['mcr_xval'] = np.mean(output['yfit_xval']==np.array(self.Y).flatten()) print 'overall CV accuracy: %.2f' % output['mcr_xval'] elif predictor_settings['prediction_type'] == 'prediction': output['rmse_all'] = np.sqrt(np.mean((output['yfit_all']-output['Y'])**2)) output['r_all'] = np.corrcoef(output['Y'],output['yfit_all'])[0,1] print 'overall Root Mean Squared Error: %.2f' % output['rmse_all'] print 'overall Correlation: %.2f' % output['r_all'] if cv_dict is not None: output['rmse_xval'] = np.sqrt(np.mean((output['yfit_xval']-output['Y'])**2)) output['r_xval'] = np.corrcoef(output['Y'],output['yfit_xval'])[0,1] print 'overall CV Root Mean Squared Error: %.2f' % output['rmse_xval'] print 'overall CV Correlation: %.2f' % output['r_xval'] # Plot if plot: if cv_dict is not None: if predictor_settings['prediction_type'] == 'prediction': fig2 = scatterplot(pd.DataFrame({'Y': output['Y'], 'yfit_xval':output['yfit_xval']})) elif predictor_settings['prediction_type'] == 'classification': if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']: output['roc'] = Roc(input_values=output['prob_xval'], binary_outcome=output['Y'].astype('bool')) else: output['roc'] = Roc(input_values=output['dist_from_hyperplane_xval'], binary_outcome=output['Y'].astype('bool')) if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability: output['roc'] = Roc(input_values=output['prob_xval'], binary_outcome=output['Y'].astype('bool')) fig2 = output['roc'].plot() # output['roc'].summary() fig1=output['weight_map'].plot() return output def bootstrap(self, analysis_type=None, n_samples=10, save_weights=False, **kwargs): """ Bootstrap various Brain_Data analaysis methods (e.g., mean, std, regress, predict). Currently Args: analysis_type: Type of analysis to bootstrap (mean,std,regress,predict) n_samples: Number of samples to boostrap **kwargs: Additional keyword arguments to pass to the analysis method Returns: output: a dictionary of prediction parameters """ # Notes: # might want to add options for [studentized, percentile, bias corrected, bias corrected accelerated] methods # Regress method is pretty convoluted and slow, this should be optimized better. def summarize_bootstrap(sample): """ Calculate summary of bootstrap samples Args: sample: Brain_Data instance of samples Returns: output: dictionary of Brain_Data summary images """ output = {} # Calculate SE of bootstraps wstd = sample.std() wmean = sample.mean() wz = deepcopy(wmean) wz.data = wmean.data / wstd.data wp = deepcopy(wmean) wp.data = 2*(1-norm.cdf(np.abs(wz.data))) # Create outputs output['Z'] = wz output['p'] = wp output['mean'] = wmean if save_weights: output['samples'] = sample return output analysis_list = ['mean','std','regress','predict'] if analysis_type in analysis_list: data_row_id = range(self.shape()[0]) sample = self.empty() if analysis_type is 'regress': #initialize dictionary of empty betas beta={} for i in range(self.X.shape[1]): beta['b' + str(i)] = self.empty() for i in range(n_samples): this_sample = np.random.choice(data_row_id, size=len(data_row_id), replace=True) # gives sampled row numbers if analysis_type is 'mean': sample = sample.append(self[this_sample].mean()) elif analysis_type is 'std': sample = sample.append(self[this_sample].std()) elif analysis_type is 'regress': out = self[this_sample].regress() # Aggegate bootstraps for each beta separately for i, b in enumerate(beta.iterkeys()): beta[b]=beta[b].append(out['beta'][i]) elif analysis_type is 'predict': if 'algorithm' in kwargs: algorithm = kwargs['algorithm'] del kwargs['algorithm'] else: algorithm='ridge' if 'cv_dict' in kwargs: cv_dict = kwargs['cv_dict'] del kwargs['cv_dict'] else: cv_dict=None if 'plot' in ['kwargs']: plot=kwargs['plot'] del kwargs['plot'] else: plot=False out = self[this_sample].predict(algorithm=algorithm,cv_dict=cv_dict, plot=plot,**kwargs) sample = sample.append(out['weight_map']) else: raise ValueError('The analysis_type you specified (%s) is not yet implemented.' % (analysis_type)) # Save outputs if analysis_type is 'regress': reg_out={} for i, b in enumerate(beta.iterkeys()): reg_out[b] = summarize_bootstrap(beta[b]) output = {} for b in reg_out.iteritems(): for o in b[1].iteritems(): if o[0] in output: output[o[0]] = output[o[0]].append(o[1]) else: output[o[0]]=o[1] else: output = summarize_bootstrap(sample) return output def apply_mask(self, mask): """ Mask Brain_Data instance Args: mask: mask (Brain_Data or nifti object) """ if isinstance(mask,Brain_Data): mask = mask.to_nifti() # convert to nibabel if not isinstance(mask, nib.Nifti1Image): if type(mask) is str: if os.path.isfile(mask): mask = nib.load(mask) # Check if mask need to be resampled into Brain_Data mask space if not ((self.mask.get_affine()==mask.get_affine()).all()) & (self.mask.shape[0:3]==mask.shape[0:3]): mask = resample_img(mask,target_affine=self.mask.get_affine(),target_shape=self.mask.shape) else: raise ValueError("Mask is not a nibabel instance, Brain_Data instance, or a valid file name.") masked = deepcopy(self) nifti_masker = NiftiMasker(mask_img=mask) masked.data = nifti_masker.fit_transform(self.to_nifti()) if len(self.data.shape) > 2: masked.data = masked.data.squeeze() masked.nifti_masker = nifti_masker return masked def resample(self, target): """ Resample data into target space Args: self: Brain_Data instance target: Brain_Data instance of target space """ raise NotImplementedError() def searchlight(self, ncores, process_mask=None, parallel_out=None, radius=3, walltime='24:00:00', \ email=None, algorithm='svr', cv_dict=None, kwargs={}): if len(kwargs) is 0: kwargs['kernel']= 'linear' # new parallel job pbs_kwargs = {'algorithm':algorithm,\ 'cv_dict':cv_dict,\ 'predict_kwargs':kwargs} #cv_dict={'type': 'kfolds','n_folds': 5,'stratified':dat.Y} parallel_job = PBS_Job(self, parallel_out=parallel_out, process_mask=process_mask, radius=radius, kwargs=pbs_kwargs) # make and store data we will need to access on the worker core level parallel_job.make_searchlight_masks() cPickle.dump(parallel_job, open(os.path.join(parallel_out,"pbs_searchlight.pkl"), "w")) #make core startup script (python) parallel_job.make_startup_script("core_startup.py") # make email notification script (pbs) if type(email) is str: parallel_job.make_pbs_email_alert(email) # make pbs job submission scripts (pbs) for core_i in range(ncores): script_name = "core_pbs_script_" + str(core_i) + ".pbs" parallel_job.make_pbs_scripts(script_name, core_i, ncores, walltime) # create a script print "python " + os.path.join(parallel_out, script_name) os.system( "qsub " + os.path.join(parallel_out, script_name) ) # run it on a core def extract_roi(self, mask, method='mean'): """ Extract activity from mask Args: mask: nibabel mask can be binary or numbered for different rois method: type of extraction method (default=mean) Returns: out: mean within each ROI across images """ if not isinstance(mask, nib.Nifti1Image): raise ValueError('Make sure mask is a nibabel instance') if len(np.unique(mask.get_data())) == 2: all_mask = Brain_Data(mask) if method is 'mean': out = np.mean(self.data[:,np.where(all_mask.data)].squeeze(),axis=1) elif len(np.unique(mask.get_data())) > 2: all_mask = expand_mask(mask) out = [] for i in range(all_mask.shape()[0]): if method is 'mean': out.append(np.mean(self.data[:,np.where(all_mask[i].data)].squeeze(),axis=1)) out = np.array(out) return out def icc(self, icc_type='icc2'): ''' Calculate intraclass correlation coefficient for data within Brain_Data class ICC Formulas are based on: Shrout, P. E., & Fleiss, J. L. (1979). Intraclass correlations: uses in assessing rater reliability. Psychological bulletin, 86(2), 420. icc1: x_ij = mu + beta_j + w_ij icc2/3: x_ij = mu + alpha_i + beta_j + (ab)_ij + epsilon_ij Code modifed from nipype algorithms.icc https://github.com/nipy/nipype/blob/master/nipype/algorithms/icc.py Args: icc_type: type of icc to calculate (icc: voxel random effect, icc2: voxel and column random effect, icc3: voxel and column fixed effect) Returns: ICC: intraclass correlation coefficient ''' Y = self.data.T [n, k] = Y.shape # Degrees of Freedom dfc = k - 1 dfe = (n - 1) * (k-1) dfr = n - 1 # Sum Square Total mean_Y = np.mean(Y) SST = ((Y - mean_Y) ** 2).sum() # create the design matrix for the different levels x = np.kron(np.eye(k), np.ones((n, 1))) # sessions x0 = np.tile(np.eye(n), (k, 1)) # subjects X = np.hstack([x, x0]) # Sum Square Error predicted_Y = np.dot(np.dot(np.dot(X, np.linalg.pinv(np.dot(X.T, X))), X.T), Y.flatten('F')) residuals = Y.flatten('F') - predicted_Y SSE = (residuals ** 2).sum() MSE = SSE / dfe # Sum square column effect - between colums SSC = ((np.mean(Y, 0) - mean_Y) ** 2).sum() * n MSC = SSC / dfc / n # Sum Square subject effect - between rows/subjects SSR = SST - SSC - SSE MSR = SSR / dfr if icc_type == 'icc1': # ICC(2,1) = (mean square subject - mean square error) / (mean square subject + (k-1)*mean square error + k*(mean square columns - mean square error)/n) # ICC = (MSR - MSRW) / (MSR + (k-1) * MSRW) NotImplementedError("This method isn't implemented yet.") elif icc_type == 'icc2': # ICC(2,1) = (mean square subject - mean square error) / (mean square subject + (k-1)*mean square error + k*(mean square columns - mean square error)/n) ICC = (MSR - MSE) / (MSR + (k-1) * MSE + k * (MSC - MSE) / n) elif icc_type =='icc3': # ICC(3,1) = (mean square subject - mean square error) / (mean square subject + (k-1)*mean square error) ICC = (MSR - MSE) / (MSR + (k-1) * MSE) return ICC
param_grid={'anova__k': k_range}, verbose=1, n_jobs=1, cv=5) nested_cv_scores = cross_val_score(grid, X, y, cv=5, groups=session) print("Nested CV score: %.4f" % np.mean(nested_cv_scores)) # In[ ]: # Here is the image coef = svc.coef_ # reverse feature selection coef = feature_selection.inverse_transform(coef) # reverse masking weight_img = masker.inverse_transform(coef) # Use the mean image as a background to avoid relying on anatomical data from nilearn import image mean_img = image.mean_img(dataset) mean_img.to_filename( '/projects/niblab/bids_projects/Experiments/ChocoData/derivatives/images/4w_partA_LF_LS_milk_mean_nimask.nii' ) # Create the figure from nilearn.plotting import plot_stat_map, show display = plot_stat_map(weight_img, mean_img, title='SVM weights LF_LS vs h2O for 4 waves, partA')
from nilearn.plotting import plot_roi from nilearn.image.image import mean_img # calculate mean image for the background mean_func_img = mean_img(func_filename) plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask") ### Preprocess data ########################################################### nifti_masker.fit(func_filename) fmri_masked = nifti_masker.transform(func_filename) ### Run an algorithm ########################################################## from sklearn.decomposition import FastICA n_components = 20 ica = FastICA(n_components=n_components, random_state=42) components_masked = ica.fit_transform(fmri_masked.T).T ### Reverse masking ########################################################### components = nifti_masker.inverse_transform(components_masked) ### Show results ############################################################## from nilearn.plotting import plot_stat_map from nilearn.image import index_img plot_stat_map(index_img(components, 0), mean_func_img, display_mode='y', cut_coords=4, title="Component 0") plt.show()
class FirstLevelModel(BaseEstimator, TransformerMixin, CacheMixin): """ Implementation of the General Linear Model for single session fMRI data Parameters ---------- t_r: float This parameter indicates repetition times of the experimental runs. In seconds. It is necessary to correctly consider times in the design matrix. This parameter is also passed to nilearn.signal.clean. Please see the related documentation for details. slice_time_ref: float, optional (default 0.) This parameter indicates the time of the reference slice used in the slice timing preprocessing step of the experimental runs. It is expressed as a percentage of the t_r (time repetition), so it can have values between 0. and 1. hrf_model : string, optional This parameter specifies the hemodynamic response function (HRF) for the design matrices. It can be 'canonical', 'canonical with derivative' or 'fir'. drift_model : string, optional This parameter specifies the desired drift model for the design matrices. It can be 'polynomial', 'cosine' or 'blank'. period_cut : float, optional This parameter specifies the cut period of the low-pass filter in seconds for the design matrices. drift_order : int, optional This parameter specifices the order of the drift model (in case it is polynomial) for the design matrices. fir_delays : array of shape(n_onsets) or list, optional In case of FIR design, yields the array of delays used in the FIR model, in seconds. min_onset : float, optional This parameter specifies the minimal onset relative to the design (in seconds). Events that start before (slice_time_ref * t_r + min_onset) are not considered. mask: Niimg-like, NiftiMasker or MultiNiftiMasker object, optional, Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is given, it will be computed automatically by a MultiNiftiMasker with default parameters. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to nilearn.image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to nilearn.image.resample_img. Please see the related documentation for details. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. memory: string, optional Path to the directory used to cache the masking process and the glm fit. By default, no caching is done. Creates instance of joblib.Memory. memory_level: integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. standardize : boolean, optional If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. signal_scaling: False, int or (int, int), optional, If not False, fMRI signals are scaled to the mean value of scaling_axis given, which can be 0, 1 or (0, 1). 0 refers to mean scaling each voxel with respect to time, 1 refers to mean scaling each time point with respect to all voxels and (0, 1) refers to scaling with respect to voxels and time, which is known as grand mean scaling. Incompatible with standardize (standardize=False is enforced when signal_scaling is not False). noise_model : {'ar1', 'ols'}, optional The temporal variance model. Defaults to 'ar1' verbose : integer, optional Indicate the level of verbosity. By default, nothing is printed. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs', -2 'all CPUs but one', and so on. minimize_memory : boolean, optional Gets rid of some variables on the model fit results that are not necessary for contrast computation and would only be useful for further inspection of model details. This has an important impact on memory consumption. True by default. Attributes ---------- labels : array of shape (n_voxels,), a map of values on voxels used to identify the corresponding model results : dict, with keys corresponding to the different labels values values are RegressionResults instances corresponding to the voxels """ def __init__(self, t_r=None, slice_time_ref=0., hrf_model='glover', drift_model='cosine', period_cut=128, drift_order=1, fir_delays=[0], min_onset=-24, mask=None, target_affine=None, target_shape=None, smoothing_fwhm=None, memory=Memory(None), memory_level=1, standardize=False, signal_scaling=0, noise_model='ar1', verbose=1, n_jobs=1, minimize_memory=True): # design matrix parameters self.t_r = t_r self.slice_time_ref = slice_time_ref self.hrf_model = hrf_model self.drift_model = drift_model self.period_cut = period_cut self.drift_order = drift_order self.fir_delays = fir_delays self.min_onset = min_onset # glm parameters self.mask = mask self.target_affine = target_affine self.target_shape = target_shape self.smoothing_fwhm = smoothing_fwhm if isinstance(memory, _basestring): self.memory = Memory(memory) else: self.memory = memory self.memory_level = memory_level self.standardize = standardize if signal_scaling in [0, 1, (0, 1)]: self.scaling_axis = signal_scaling self.signal_scaling = True self.standardize = False elif signal_scaling is False: self.signal_scaling = signal_scaling else: raise ValueError('signal_scaling must be "False", "0", "1"' ' or "(0, 1)"') self.noise_model = noise_model self.verbose = verbose self.n_jobs = n_jobs self.minimize_memory = minimize_memory # attributes self.labels_ = None self.results_ = None def fit(self, run_imgs, paradigms=None, confounds=None, design_matrices=None): """ Fit the GLM For each run: 1. create design matrix X 2. do a masker job: fMRI_data -> Y 3. fit regression to (Y, X) Parameters ---------- run_imgs: Niimg-like object or list of Niimg-like objects, See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. Data on which the GLM will be fitted. If this is a list, the affine is considered the same for all. paradigms: pandas Dataframe or string or list of pandas DataFrames or strings, fMRI paradigms used to build design matrices. One paradigm expected per run_img. Ignored in case designs is not None. confounds: pandas Dataframe or string or list of pandas DataFrames or strings, Each column in a DataFrame corresponds to a confound variable to be included in the regression model of the respective run_img. The number of rows must match the number of volumes in the respective run_img. Ignored in case designs is not None. design_matrices: pandas DataFrame or list of pandas DataFrames, Design matrices that will be used to fit the GLM. """ # Check arguments # Check imgs type if not isinstance(run_imgs, (list, tuple)): run_imgs = [run_imgs] for rimg in run_imgs: if not isinstance(rimg, (_basestring, Nifti1Image)): raise ValueError('run_imgs must be Niimg-like object or list' ' of Niimg-like objects') # check all information necessary to build design matrices is available if design_matrices is None: if paradigms is None: raise ValueError('paradigms or design matrices must be provided') if self.t_r is None: raise ValueError('t_r not given to FirstLevelModel object' ' to compute design from paradigm') else: design_matrices = _check_run_tables(run_imgs, design_matrices, 'design_matrices') # check the number of paradigm and confound files match number of runs # Also check paradigm and confound files can be loaded as DataFrame if paradigms is not None: paradigms = _check_run_tables(run_imgs, paradigms, 'paradigms') if confounds is not None: confounds = _check_run_tables(run_imgs, confounds, 'confounds') # Learn the mask if not isinstance(self.mask, NiftiMasker): self.masker_ = NiftiMasker( mask_img=self.mask, smoothing_fwhm=self.smoothing_fwhm, target_affine=self.target_affine, standardize=self.standardize, mask_strategy='epi', t_r=self.t_r, memory=self.memory, verbose=max(0, self.verbose - 1), target_shape=self.target_shape, memory_level=self.memory_level) else: self.masker_ = clone(self.mask) for param_name in ['target_affine', 'target_shape', 'smoothing_fwhm', 'low_pass', 'high_pass', 't_r', 'memory', 'memory_level']: our_param = getattr(self, param_name) if our_param is None: continue if getattr(self.masker_, param_name) is not None: warn('Parameter %s of the masker overriden' % param_name) setattr(self.masker_, param_name, our_param) self.masker_.fit(run_imgs[0]) # For each run fit the model and keep only the regression results. self.labels_, self.results_, self.design_matrices_ = [], [], [] n_runs = len(run_imgs) t0 = time.time() for run_idx, run_img in enumerate(run_imgs): # Report progress if self.verbose > 0: percent = float(run_idx) / n_runs percent = round(percent * 100, 2) dt = time.time() - t0 # We use a max to avoid a division by zero if run_idx == 0: remaining = 'go take a coffee, a big one' else: remaining = (100. - percent) / max(0.01, percent) * dt remaining = '%i seconds remaining' % remaining sys.stderr.write(" " * 100 + "\r") sys.stderr.write( "Computing run %d out of %d runs (%s)\r" % (run_idx, n_runs, remaining)) # Build the experimental design for the glm run_img = check_niimg(run_img, ensure_ndim=4) if design_matrices is None: n_scans = run_img.get_data().shape[3] if confounds is not None: confounds_matrix = confounds[run_idx].values if confounds_matrix.shape[0] != n_scans: raise ValueError('Rows in confounds does not match' 'n_scans in run_img at index %d' % (run_idx,)) confounds_names = confounds[run_idx].columns else: confounds_matrix = None confounds_names = None start_time = self.slice_time_ref * self.t_r end_time = (n_scans - 1 + self.slice_time_ref) * self.t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_design_matrix(frame_times, paradigms[run_idx], self.hrf_model, self.drift_model, self.period_cut, self.drift_order, self.fir_delays, confounds_matrix, confounds_names, self.min_onset) else: design = design_matrices[run_idx] self.design_matrices_.append(design) # Compute GLM Y = self.masker_.transform(run_img) if self.signal_scaling: Y, _ = mean_scaling(Y, self.scaling_axis) if self.memory is not None: mem_glm = self.memory.cache(run_glm) else: mem_glm = run_glm labels, results = mem_glm(Y, design, noise_model=self.noise_model, bins=100, n_jobs=self.n_jobs) self.labels_.append(labels) # We save memory if inspecting model details is not necessary if self.minimize_memory: for key in results: results[key] = SimpleRegressionResults(results[key]) self.results_.append(results) del Y # Report progress if self.verbose > 0: sys.stderr.write("\nComputation of %d runs done in %i seconds\n" % (n_runs, time.time() - t0)) return self def compute_contrast(self, contrast_def, contrast_name=None, stat_type=None, output_type='z_score'): """Generate different outputs corresponding to the contrasts provided e.g. z_map, t_map, effects and variance. In multi-session case, outputs the fixed effects map. Parameters ---------- contrast_def : array or list of arrays of shape (n_col) or (n_run, n_col) where ``n_col`` is the number of columns of the design matrix, (one array per run). If only one array is provided when there are several runs, it will be assumed that the same contrast is desired for all runs contrast_name : str, optional name of the contrast stat_type : {'t', 'F'}, optional type of the contrast output_type : str, optional Type of the output map. Can be 'z_score', 'stat', 'p_value', 'effect_size' or 'effect_variance' Returns ------- output_image : Nifti1Image The desired output image """ if self.labels_ is None or self.results_ is None: raise ValueError('The model has not been fit yet') if isinstance(contrast_def, np.ndarray): con_vals = [contrast_def] elif isinstance(contrast_def, (list, tuple)): con_vals = contrast_def for cidx, con in enumerate(contrast_def): if not isinstance(con, np.ndarray): raise ValueError('contrast_def at index %i is not an' ' array' % cidx) else: raise ValueError('contrast_def must be an array or list of arrays') n_runs = len(self.labels_) if len(con_vals) != n_runs: warn('One contrast given, assuming it for all %d runs' % n_runs) con_vals = con_vals * n_runs if isinstance(output_type, _basestring): if output_type not in ['z_score', 'stat', 'p_value', 'effect_size', 'effect_variance']: raise ValueError('output_type must be one of "z_score", "stat",' ' "p_value","effect_size" or "effect_variance"') else: raise ValueError('output_type must be one of "z_score", "stat",' ' "p_value","effect_size" or "effect_variance"') if self.memory is not None: arg_ignore = ['labels', 'results'] mem_contrast = self.memory.cache(_fixed_effect_contrast, ignore=arg_ignore) else: mem_contrast = _fixed_effect_contrast contrast = mem_contrast(self.labels_, self.results_, con_vals, stat_type) estimate_ = getattr(contrast, output_type)() # Prepare the returned images output = self.masker_.inverse_transform(estimate_) if contrast_name is None: contrast_name = str(con_vals) output.get_header()['descrip'] = ( '%s of contrast %s' % (output_type, contrast_name)) return output
smoothing_fwhm=FWHM, memory='nilearn_cache', memory_level=1) # cache options # remove NaNs from images ref_affine = np.asarray(nibabel.load(images[0]).get_affine()) images_ = [np.asarray(nibabel.load(img).get_data()) for img in images] nonnan_images = [] for img in images_: img[np.isnan(img)] = 0. nonnan_images.append(nibabel.Nifti1Image(img, ref_affine)) print "Nifti masker" # remove features with zero between-subject variance images_masked = nifti_masker.fit_transform(images) images_masked[:, images_masked.var(0) < 0.01] = 0. # final masking new_images = nifti_masker.inverse_transform(images_masked) images_masked = nifti_masker.fit_transform(new_images) n_samples, n_features = images_masked.shape print n_samples, "subjects, ", n_features, "features" ### Perform massively univariate analysis with permuted OLS ################### print "Massively univariate model" neg_log_pvals, all_scores, _ = permuted_ols( cdr, images_masked, age, # + intercept as a covariate by default n_perm=1000, n_jobs=-1) # can be changed to use more CPUs neg_log_pvals_unmasked = nifti_masker.inverse_transform( neg_log_pvals).get_data()[..., 0] ### Show results print "Plotting results"
from nilearn import image mean_epi_img = image.mean_img(func_filename) # Restrict the decoding to face vs house condition_mask = np.logical_or(stimuli == b'face', stimuli == b'house') masked_timecourses = masked_timecourses[condition_mask[np.logical_not(resting_state)]] stimuli = stimuli[condition_mask] # Transform the stimuli to binary values stimuli = (stimuli == b'face').astype(np.int) from nilearn.plotting import plot_stat_map for classifier_name, classifier in sorted(classifiers.items()): classifier.fit(masked_timecourses, stimuli) if hasattr(classifier, 'coef_'): weights = classifier.coef_[0] elif hasattr(classifier, 'best_estimator_'): weights = classifier.best_estimator_.coef_[0] else: continue weight_img = masker.inverse_transform(weights) weight_map = weight_img.get_data() threshold = np.max(np.abs(weight_map)) * 1e-3 plot_stat_map(weight_img, bg_img=mean_epi_img, display_mode='z', cut_coords=[-17], threshold=threshold, title='%s: face vs house' % classifier_name) plt.show()
# calculate mean image for the background mean_func_img = mean_img(func_filename) plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask") ### Preprocess data ########################################################### nifti_masker.fit(func_filename) fmri_masked = nifti_masker.transform(func_filename) ### Run an algorithm ########################################################## from sklearn.decomposition import FastICA n_components = 20 ica = FastICA(n_components=n_components, random_state=42) components_masked = ica.fit_transform(fmri_masked.T).T ### Reverse masking ########################################################### components = nifti_masker.inverse_transform(components_masked) ### Show results ############################################################## from nilearn.plotting import plot_stat_map from nilearn.image import index_img plot_stat_map(index_img(components, 0), mean_func_img, display_mode='y', cut_coords=4, title="Component 0") plt.show()
from sklearn.pipeline import Pipeline anova_svc = Pipeline([('anova', feature_selection), ('svc', svc)]) ### Fit and predict ########################################################### anova_svc.fit(X, y) y_pred = anova_svc.predict(X) ### Visualization ############################################################# ### Look at the SVC's discriminating weights coef = svc.coef_ # reverse feature selection coef = feature_selection.inverse_transform(coef) # reverse masking weight_img = nifti_masker.inverse_transform(coef) ### Create the figure from nilearn import image import matplotlib.pyplot as plt from nilearn.plotting import plot_stat_map # Plot the mean image because we have no anatomic data mean_img = image.mean_img(dataset_files.func) plot_stat_map(weight_img, mean_img, title='SVM weights') ### Saving the results as a Nifti file may also be important import nibabel nibabel.save(weight_img, 'haxby_face_vs_house.nii')
data.folder.iloc[i] + os.sep + sub + '/RS/Counfounds*.mat') if len(confound_files) > 1: print 'too many confound files' stop else: confound_file = io.loadmat(confound_files[0]) new_filenames = [] for rs_file in rs_files: last_part = os.sep.join(rs_file.split(os.sep)[-5:]) new_filenames.append(data_dir + '/data/interim/rs_data_conags/' + last_part) masked_data = masker.transform(rs_files, confounds=[ confound_file['rp'], confound_file['rpp'], confound_file['rp']**2, confound_file['rpp']**2 ]) for i in range(len(masked_data)): img = masker.inverse_transform(masked_data[i]) if not os.path.exists(os.sep.join(new_filenames[i].split( os.sep)[:-1])): os.makedirs(os.sep.join(new_filenames[i].split(os.sep)[:-1])) img.to_filename(new_filenames[i])
def extract_atlas_rad(db, rad_column, rad_dir, stat, pca_explained_var=None, include_chim=False): """Replaces radiation presence by stat on ROIs from atlas. Assumes brain mask and radiation nifti file is in rad_dir.""" atlas_mask_file = 'labels_to_rd.nii.gz' xml_path = os.path.join(rad_dir, 'lpba40.label.xml') labels, rois_name = read_roiLabel(xml_path) # labels = labels[:2] # for debugging # rois_name = rois_name[:2] # for debugging labels.append(1000) rois_name.append('rest_brain') brain_mask_file = 'BrainMask_to_rd.nii.gz' extracted_rad_stat = {} # Memoization of radiation statistic # PCA estimation for idx, row in db.iterrows(): if row[rad_column] == 1: sub_id = row['patient'] if sub_id in extracted_rad_stat: continue else: atlas_mask_path = os.path.join(rad_dir, sub_id, atlas_mask_file) atlas_mask_check = os.path.isfile(atlas_mask_path) brain_mask_path = os.path.join(rad_dir, sub_id, brain_mask_file) brain_mask_check = os.path.isfile(brain_mask_path) rad_path = os.path.join(rad_dir, sub_id, sub_id + '.nii') rad_check = os.path.isfile(rad_path) if atlas_mask_check and brain_mask_check and rad_check: extracted_rad = [] brain_masker = NiftiMasker(brain_mask_path) atlas_brain = brain_masker.fit_transform(atlas_mask_path) atlas_brain = atlas_brain.astype(np.int16) atlas_brain[atlas_brain == 0] = 1000 atlas_brain = brain_masker.inverse_transform(atlas_brain) for idx, label in enumerate(labels): print ('processing ROI ' + str(label) + ' ' + rois_name[idx] + ' for subject ' + sub_id) masker = NiftiMasker(get_roi_mask(atlas_brain, label)) rad_stat = stat(masker.fit_transform(rad_path)) extracted_rad.append(rad_stat) extracted_rad_stat[sub_id] = extracted_rad rad_data = np.vstack([x for x in extracted_rad_stat.values()]) scaler = StandardScaler() rad_data = scaler.fit_transform(rad_data) if pca_explained_var is not None: pca = PCA(pca_explained_var, whiten=True) else: pca = PCA(whiten=True) pca.fit(rad_data) components_name = ['component_{0:02d}'.format(x) for x in range(1, pca.n_components_ + 1)] # Modify db to include pca components for c in components_name: db[c] = '' for idx, row in db.iterrows(): if row[rad_column] == 1: sub_id = row['patient'] atlas_mask_path = os.path.join(rad_dir, sub_id, atlas_mask_file) atlas_mask_check = os.path.isfile(atlas_mask_path) brain_mask_path = os.path.join(rad_dir, sub_id, brain_mask_file) brain_mask_check = os.path.isfile(brain_mask_path) rad_path = os.path.join(rad_dir, sub_id, sub_id + '.nii') rad_check = os.path.isfile(rad_path) if atlas_mask_check and brain_mask_check and rad_check: rois_v = np.array(extracted_rad_stat[sub_id]).reshape(1, -1) components_value = pca.transform(scaler.transform(rois_v)) components_value = np.ravel(components_value) for cidx, c in enumerate(components_name): db.loc[idx, c] = components_value[cidx] else: db.loc[idx, rad_column] = None elif not include_chim: db.loc[idx, rad_column] = None else: for c in components_name: db.loc[idx, c] = 0 db = db[db[rad_column].notnull()] db = db.drop(rad_column, 1) return db, rois_name, pca, components_name, rad_data
print(counts) for count_, label_ in zip(counts, ulabels): if count_ < size_min: labels[labels == label_] = 0 return labels Y_tf = hyperalign(path_train, path_test, algo, train_subject, test_subjects) X_test = np.array( [masker.transform(path_train[subject]).T for subject in subjects]) Y_test = np.array( [masker.transform(path_test[subject]).T for subject in subjects]) consistency_X = consistency_map(np.array(X_test)) filename = os.path.join(write_dir, 'consistency_X.nii.gz') masker.inverse_transform(consistency_X).to_filename(filename) consistency_Y = consistency_map(np.array(Y_test)) filename = os.path.join(write_dir, 'consistency_Y.nii.gz') masker.inverse_transform(consistency_Y).to_filename(filename) extract_blobs(consistency_X, masker) blobs = extract_blobs(consistency_Y, masker) label_img = nib.Nifti1Image(blobs, masker.affine_) label_img.to_filename(os.path.join(write_dir, 'blobs.nii.gz')) blob_vector = blobs[(masker.mask_img_).get_data() > 0] from sklearn.preprocessing import StandardScaler from sklearn import svm from sklearn.cross_validation import cross_val_score, ShuffleSplit from sklearn.ensemble import ExtraTreesClassifier
# Print the results print("Classification accuracy: %.4f / Chance level: %f" % (classification_accuracy, 1. / len(conditions.unique()))) # Classification accuracy: 0.70370 / Chance level: 0.5000 ############################################################################# # Visualize the results # ---------------------- # Look at the SVC's discriminating weights coef = svc.coef_ # reverse feature selection coef = feature_selection.inverse_transform(coef) # reverse masking weight_img = masker.inverse_transform(coef) # Use the mean image as a background to avoid relying on anatomical data from nilearn import image mean_img = image.mean_img(func_filename) # Create the figure from nilearn.plotting import plot_stat_map, show plot_stat_map(weight_img, mean_img, title='SVM weights') # Saving the results as a Nifti file may also be important weight_img.to_filename('haxby_face_vs_house.nii') show()