def fit(self, X, Y): """Fit data X and Y and learn transformation to map X to Y Parameters ---------- X: Niimg-like object Source data. Y: Niimg-like object Target data Returns ------- self """ self.masker_ = check_embedded_nifti_masker(self) self.masker_.n_jobs = self.n_jobs if self.masker_.mask_img is None: self.masker_.fit([X]) else: self.masker_.fit() if type(self.clustering) == nib.nifti1.Nifti1Image or os.path.isfile( self.clustering): # check that clustering provided fills the mask, if not, reduce the mask if 0 in self.masker_.transform(self.clustering): reduced_mask = _intersect_clustering_mask( self.clustering, self.masker_.mask_img) self.mask = reduced_mask self.masker_ = check_embedded_nifti_masker(self) self.masker_.n_jobs = self.n_jobs self.masker_.fit() warnings.warn( "Mask used was bigger than clustering provided. " + "Its intersection with the clustering was used instead.") if isinstance(X, (list, np.ndarray)): X_ = concat_imgs(X) else: X_ = load_img(X) if isinstance(X, (list, np.ndarray)): Y_ = concat_imgs(Y) else: Y_ = load_img(Y) self.fit_, self.labels_ = [], [] rs = ShuffleSplit(n_splits=self.n_bags, test_size=.8, random_state=0) outputs = Parallel( n_jobs=self.n_jobs, prefer="threads", verbose=self.verbose)( delayed(fit_one_parcellation) (X_, Y_, self.alignment_method, self.masker_, self.n_pieces, self.clustering, clustering_index, self.n_jobs, self.verbose) for clustering_index, _ in rs.split(range(X_.shape[-1]))) # change split self.labels_ = [output[0] for output in outputs] self.fit_ = [output[1] for output in outputs] return self
def average_image_pairs(image_pairs, image_paths, rotated_bvecs, bvals, confounds_tsvs, raw_concatenated_files, verbose=False): """Create 4D series of averaged images, gradients, and confounds""" averaged_images = [] new_bvecs = [] confounds = pd.concat( [pd.read_csv(fname, delimiter='\t') for fname in confounds_tsvs]) merged_confounds = [] merged_bvals = [] # Load the raw concatenated images for qc raw_concatenated_img = concat_imgs(raw_concatenated_files) raw_averaged_images = [] confounds1_rename = {col: col + "_1" for col in confounds.columns} confounds2_rename = {col: col + "_2" for col in confounds.columns} for index1, index2 in image_pairs: confounds1 = confounds.iloc[index1].copy().rename(confounds1_rename) confounds2 = confounds.iloc[index2].copy().rename(confounds2_rename) confounds_both = confounds1.append(confounds2) averaged_images.append( math_img('(a+b)/2', a=image_paths[index1], b=image_paths[index2])) raw_averaged_images.append( math_img('(a[..., %d] + a[..., %d]) / 2' % (index1, index2), a=raw_concatenated_img)) new_bval = (bvals[index1] + bvals[index2]) / 2. merged_bvals.append(new_bval) rotated1 = rotated_bvecs[:, index1] rotated2 = rotated_bvecs[:, index2] new_bvec, bvec_error = average_bvec(rotated1, rotated2) new_bvecs.append(new_bvec) confounds_both['vec_averaging_error'] = bvec_error confounds_both['rotated_grad_x_1'] = rotated1[0] confounds_both['rotated_grad_y_1'] = rotated1[1] confounds_both['rotated_grad_z_1'] = rotated1[2] confounds_both['rotated_grad_x_2'] = rotated2[0] confounds_both['rotated_grad_y_2'] = rotated2[1] confounds_both['rotated_grad_z_2'] = rotated2[2] confounds_both['mean_grad_x'] = new_bvec[0] confounds_both['mean_grad_y'] = new_bvec[1] confounds_both['mean_grad_z'] = new_bvec[2] confounds_both['mean_b'] = new_bval merged_confounds.append(confounds_both) if verbose: print('%d: %d [%.4fdeg error]\n\t%d (%.4f %.4f %.4f)' % (index1, index2, bvec_error, new_bval, new_bvec[0], new_bvec[1], new_bvec[2])) averaged_confounds = pd.DataFrame(merged_confounds) return concat_imgs(averaged_images), concat_imgs(raw_averaged_images), \ np.array(merged_bvals), np.array(new_bvecs), averaged_confounds
def main(args): # Set analysis directories root = '/net/synapse/nt/users/bmacintosh_lab/nluciw/' # Root directory of data. data_dir = root + 'data/EnF/sourcedata/' # Output directory. output_dir = root + 'outputs/perf_covar/' + args.output_dir # Create output if does not exist. if not os.path.exists(os.path.dirname(output_dir)): os.makedirs(os.path.dirname(output_dir)) # Save command line with open(output_dir + 'commandline_args.txt', 'w') as f: f.write('\n'.join(sys.argv[1:])) # Load 4d nifti objects for both groups bd_data = fetch_data(data_dir, args.nifti_name, metadata=args.metadata, subject_group=('BD', )) hc_data = fetch_data(data_dir, args.nifti_name, metadata=args.metadata, subject_group=('HC', )) hc_vols = concat_imgs(hc_data.imgs) bd_vols = concat_imgs(bd_data.imgs) # Construct or load parcellator object using the atlas we specify on # the command line. parcellator = NiftiLabelsMasker(labels_img=args.atlas, mask_img=data_dir + 'masks/cbf_80p_aal_merge_mni.nii.gz', standardize=False, strategy='mean') parcellator.fit() # Do the parcellation and correlation for both groups. hc_covar, bd_covar =\ covariance.parcellate_and_correlate([hc_vols,bd_vols], output_dir, parcellator, prefix = args.output_prefix, detrend=False#, # pve_gm_imgs=[concat_imgs(hc_data.struc_imgs), # concat_imgs(bd_data.struc_imgs)] ) print(len(bd_data.imgs), len(hc_data.imgs)) difference = statistics.compute_difference(bd_covar[0], hc_covar[0], len(bd_data.imgs), len(hc_data.imgs)) cors = np.stack((bd_covar[0], hc_covar[0], difference)) np.save(output_dir + args.output_prefix + 'cors', cors)
def __init__(self, echo_times, MPRAGE_tr=None, invtimesAB=None, flipangleABdegree=None, nZslices=None, FLASH_tr=None, sequence='normal', inversion_efficiency=0.96, B0=7, inv1=None, inv1ph=None, inv2=None, inv2ph=None, B1_fieldmap=None): if type(inv2) is list: inv2 = image.concat_imgs(inv2) if type(inv2ph) is list: inv2ph = image.concat_imgs(inv2ph) self.t2starw_echoes = inv2 self.inv2_echo_times = np.array(echo_times) self.n_echoes = len(echo_times) if inv2ph is not None: self.t2starw_echoes_phase = inv2ph if self.t2starw_echoes.shape[-1] != self.n_echoes: raise ValueError('Length of echo_times should correspond to the number of echoes'\ 'in INV2') inv2 = image.index_img(self.t2starw_echoes, 0) inv2ph = image.index_img(self.t2starw_echoes_phase, 0) self._s0 = None self._t2starmap = None self._t2starw = None super(MEMP2RAGE, self).__init__(MPRAGE_tr=MPRAGE_tr, invtimesAB=invtimesAB, flipangleABdegree=flipangleABdegree, nZslices=nZslices, FLASH_tr=FLASH_tr, sequence=sequence, inversion_efficiency=inversion_efficiency, B0=B0, inv1=inv1, inv1ph=inv1ph, inv2=inv2, inv2ph=inv2ph, B1_fieldmap=B1_fieldmap)
def transform(self, param_type='z_score'): if not self._fit_status: raise NotImplementedError( 'LSS not yet fit. Please run .fit() first') param_maps = [] list_ = [] for model in self.models: if not isinstance(model.img, str): raise Exception('{} not a string'.format(model.img)) # ensure that we get event of LSS using a lookup ev = model.design.columns.get_loc(model._event_of_interest) param_maps.append(model.extract_params(param_type, contrast_ix=ev)) # get trial info for the map event = model.events.loc[model.event_index] list_.append({ 'src_img': model.img, 'trial_type': event['trial_type'], 'onset': event['onset'] }) param_index = pd.DataFrame(list_) return concat_imgs(param_maps), param_index
def create_parcel_atlas(parcel_list): """ Create a 3D Nifti1Image atlas parcellation of consecutive integer intensities from an input list of ROI's. Parameters ---------- parcel_list : list List of 3D boolean numpy arrays or binarized Nifti1Images corresponding to ROI masks. Returns ------- net_parcels_map_nifti : Nifti1Image A nibabel-based nifti image consisting of a 3D array with integer voxel intensities corresponding to ROI membership. parcel_list_exp : list List of 3D boolean numpy arrays or binarized Nifti1Images corresponding to ROI masks, prepended with a background image of zeros. """ from nilearn.image import new_img_like, concat_imgs parcel_background = new_img_like( parcel_list[0], np.zeros(parcel_list[0].shape, dtype=bool)) parcel_list_exp = [parcel_background] + parcel_list parcellation = concat_imgs(parcel_list_exp).get_fdata() index_vec = np.array(range(len(parcel_list_exp))) + 1 net_parcels_sum = np.sum(index_vec * parcellation, axis=3) net_parcels_map_nifti = nib.Nifti1Image(net_parcels_sum, affine=parcel_list[0].affine) return net_parcels_map_nifti, parcel_list_exp
def transform(self, param_type='z_score'): if not self._fit_status: raise NotImplementedError( 'LSA not yet fit. Please run .fit() first') param_maps = [] list_ = [] for model in self.models: # iterate only through trial_types trial_reg_names = model.events['trial_type'].tolist() if len(np.unique(trial_reg_names)) != len(trial_reg_names): raise Exception('Trial regressor names are not unique') for ev in trial_reg_names: reg = model.design.columns.get_loc(ev) param_maps.append( model.extract_params(param_type, contrast_ix=reg)) trial_type, onset = model.design.columns[reg].split('___') list_.append({ 'src_img': model.img, 'trial_type': trial_type, 'onset': onset }) param_index = pd.DataFrame(list_) return concat_imgs(param_maps), param_index
def _check_inputs(self, X, y): from nibabel.nifti1 import Nifti1Image from nilearn.image import load_img, concat_imgs if type(X) == list: # First, load images X = [load_img(img) for img in X] # check that all elements of the list have dimension 3 (should be 3D images) dims_X = [img.ndim != 3 for img in X] if np.any(dims_X): raise print( "List of images provided, so they all should be 3D") X = concat_imgs(X) elif type(X) == Nifti1Image: if X.ndim != 4: raise print("One Nifti image, so it should be 4D") n_obs = X.shape[3] if n_obs < 3: print( "check the results, with observations < 3 the correlations" " will NaN or always 1") y = np.asarray(y) if y.ndim > 1: raise print("Dependent variable y should be unidimensional") return X, y
def process_img(stat_img, cluster_extent, voxel_thresh=1.96): """ Parameters ---------- stat_img : Niimg_like object Thresholded statistical map image cluster_extent : int Minimum number of voxels required to consider a cluster voxel_thresh : int, optional Threshold to apply to `stat_img`. If a negative number is provided a percentile threshold is used instead, where the percentile is determined by the equation `100 - voxel_thresh`. Default: 1.96 Returns ------- cluster_img : Nifti1Image 4D image of brain regions, where each volume is a distinct cluster """ # get input data image stat_img = image.index_img(check_niimg(stat_img, atleast_4d=True), 0) # threshold image if voxel_thresh < 0: voxel_thresh = '{}%'.format(100 + voxel_thresh) else: # ensure that threshold is not greater than most extreme value in image if voxel_thresh > np.abs(stat_img.get_data()).max(): empty = np.zeros(stat_img.shape + (1,)) return image.new_img_like(stat_img, empty) thresh_img = image.threshold_img(stat_img, threshold=voxel_thresh) # extract clusters min_region_size = cluster_extent * np.prod(thresh_img.header.get_zooms()) clusters = [] for sign in ['pos', 'neg']: # keep only data of given sign data = thresh_img.get_data().copy() data[(data < 0) if sign == 'pos' else (data > 0)] = 0 # Do nothing if data array contains only zeros if np.any(data): try: clusters += [connected_regions( image.new_img_like(thresh_img, data), min_region_size=min_region_size, extract_type='connected_components')[0]] except TypeError: # for no clusters pass # Return empty image if no clusters were found if len(clusters) == 0: return image.new_img_like(thresh_img, np.zeros(data.shape + (1,))) # Reorder clusters by their size clust_img = image.concat_imgs(clusters) cluster_size = (clust_img.get_data() != 0).sum(axis=(0, 1, 2)) new_order = np.argsort(cluster_size)[::-1] clust_img_ordered = image.index_img(clust_img, new_order) return clust_img_ordered
def gen_network_parcels(parlistfile, network, labels, dir_path): from nilearn.image import new_img_like, concat_imgs bna_img = nib.load(parlistfile) bna_data = np.round(bna_img.get_data(),1) ##Get an array of unique parcels bna_data_for_coords_uniq = np.unique(bna_data) ##Number of parcels: par_max = len(bna_data_for_coords_uniq) - 1 bna_data = bna_data.astype('int16') img_stack = [] for idx in range(1, par_max+1): roi_img = bna_data == bna_data_for_coords_uniq[idx].astype('int16') roi_img = roi_img.astype('int16') img_stack.append(roi_img) img_stack = np.array(img_stack).astype('int16') img_list = [] for idy in range(par_max): roi_img_nifti = new_img_like(bna_img, img_stack[idy]) img_list.append(roi_img_nifti) print('\nExtracting parcels associated with ' + network + ' network locations...\n') net_parcels = [i for j, i in enumerate(img_list) if j in labels] bna_4D = concat_imgs(net_parcels).get_data() index_vec = np.array(range(len(net_parcels))) + 1 net_parcels_sum = np.sum(index_vec * bna_4D, axis=3) net_parcels_map_nifti = nib.Nifti1Image(net_parcels_sum, affine=np.eye(4)) out_path = dir_path + '/' + network + '_parcels.nii.gz' nib.save(net_parcels_map_nifti, out_path) return(out_path)
def spread_labels(labels_img, roi_values=None, background_label=0): """ Spread each ROI in labels_img into a 4D image. Parameters ---------- labels_img: 3D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Region definitions, as one image of labels. roi_values: List of int or the values in the labels_img, optional The values of `labels_img` that you want to use. If None, will extract all the values except from `background_label`. background_label: number, optional Label used in labels_img to represent background. Return ------ 4d_labels_img: 4D Niimg-like object """ lbls_img = niimg.load_img(labels_img) lbls_vol = lbls_img.get_data() if roi_values is None: roi_values = np.unique(lbls_vol) roi_values = roi_values[roi_values != background_label] atlas_roi_vols = [(lbls_vol == val) * val for val in roi_values] atlas_roi_imgs = [ niimg.new_img_like(lbls_img, vol) for vol in atlas_roi_vols ] # atlas_roi_imgs = [niimg.new_img_like(lbls_img, (lbls_vol == val) * val) for val in roi_values] return niimg.concat_imgs(atlas_roi_imgs)
def concat_3D_imgs(in_files, out_file=None): """ Use nilearn.image.concat_imgs to concat 3D volumes into one 4D volume. If `in_files` is a list of 3D volumes the return value is the path to one 4D volume. Else if `in_files` is a list of 4D volumes the return value is `in_files`. Returns ------- out_file: str The absolute path to the output file. """ import nilearn.image as niimg from nilearn._utils import check_niimg_3d all_3D = True for idx, img in enumerate(in_files): try: _ = check_niimg_3d(img) except Exception: all_3D = False break if not all_3D: #raise AttributeError('Expected all input images to be 3D volumes, but ' # ' at least the {}th is not.'.format(idx)) return in_files else: return niimg.concat_imgs(in_files)
def create_parcel_atlas(parcel_list): """ Create a 3D Nifti1Image atlas parcellation of consecutive integer intensities from an input list of ROI's. Parameters ---------- parcel_list : list List of 3D boolean numpy arrays or binarized Nifti1Images corresponding to ROI masks. Returns ------- net_parcels_map_nifti : Nifti1Image A nibabel-based nifti image consisting of a 3D array with integer voxel intensities corresponding to ROI membership. parcel_list_exp : list List of 3D boolean numpy arrays or binarized Nifti1Images corresponding to ROI masks, prepended with a background image of zeros. """ import gc from nilearn.image import new_img_like, concat_imgs parcel_list_exp = [new_img_like(parcel_list[0], np.zeros(parcel_list[0].shape, dtype=bool))] + parcel_list concatted_parcels = concat_imgs(parcel_list_exp, dtype=np.float32) parcel_list_exp = np.array(range(len(parcel_list_exp))).astype('float32') parcel_sum = np.sum(parcel_list_exp * np.asarray(concatted_parcels.dataobj), axis=3, dtype=np.uint16) net_parcels_map_nifti = nib.Nifti1Image(parcel_sum, affine=parcel_list[0].affine) del concatted_parcels, parcel_sum, parcel_list gc.collect() return net_parcels_map_nifti, parcel_list_exp
def transform(self, seeds_img=None): seeds_img = seeds_img or self.vt_mask_img X = self.masker.transform(self.func_img) self.func_img = self.masker.inverse_transform(X) # Reordering data print("Sorting data...") self.class_labels, self.stim_labels, sorted_idx \ = reorder_haxby_labels(self.stim_labels, self.sessions) imgs = [] for sess_idx, sess_id in zip(sorted_idx, np.unique(self.sessions)): for img_idx in sess_idx: # Convert the relative img_idx (to the session start) into # absolute stim_idx (over all sessions) stim_idx = np.nonzero(self.sessions == sess_id)[0][img_idx] imgs += nibabel.four_to_three( index_img(self.func_img, stim_idx)) self.func_img = concat_imgs(imgs) # Average across sessions print("Averaging data...") self.func_img, self.img_labels = self.my_cache(average_data)( grouping=self.grouping, func_img=self.func_img, ordered_class_labels=self.class_labels, stim_labels=self.stim_labels, sessions=self.sessions) self.searchlight = RsaSearchlight(mask_img=self.mask_img, seeds_img=seeds_img, memory_params=self.memory_params, radius=self.radius) self.searchlight.fit() self.similarity_comparisons, self.similarity_std, self.n_voxels = \ self.searchlight.transform(func_img=self.func_img)
def concat_3D_imgs(in_files, out_file=None): """ Use nilearn.image.concat_imgs to concat 3D volumes into one 4D volume. If `in_files` is a list of 3D volumes the return value is the path to one 4D volume. Else if `in_files` is a list of 4D volumes the return value is `in_files`. Returns ------- out_file: str The absolute path to the output file. """ import nilearn.image as niimg from nilearn._utils import check_niimg_3d all_3D = True for idx, img in enumerate(in_files): try: _ = check_niimg_3d(img) except Exception: all_3D = False break if not all_3D: # raise AttributeError('Expected all input images to be 3D volumes, but ' # ' at least the {}th is not.'.format(idx)) return in_files else: return niimg.concat_imgs(in_files)
def _check_vol_to_surf_results(img, mesh): mni_mask = datasets.load_mni152_brain_mask() for kind, interpolation, mask_img in itertools.product( ['ball', 'line'], ['linear', 'nearest'], [mni_mask, None]): proj_1 = vol_to_surf(img, mesh, kind=kind, interpolation=interpolation, mask_img=mask_img) assert_true(proj_1.ndim == 1) img_rot = image.resample_img(img, target_affine=rotation( np.pi / 3., np.pi / 4.)) proj_2 = vol_to_surf(img_rot, mesh, kind=kind, interpolation=interpolation, mask_img=mask_img) # The projection values for the rotated image should be close # to the projection for the original image diff = np.abs(proj_1 - proj_2) / np.abs(proj_1) assert_true(np.mean(diff[diff < np.inf]) < .03) img_4d = image.concat_imgs([img, img]) proj_4d = vol_to_surf(img_4d, mesh, kind=kind, interpolation=interpolation, mask_img=mask_img) nodes, _ = surface.load_surf_mesh(mesh) assert_array_equal(proj_4d.shape, [nodes.shape[0], 2]) assert_array_almost_equal(proj_4d[:, 0], proj_1, 3)
def transform(self, X): """Predict data from X Parameters ---------- X: Niimg-like object Source data Returns ------- X_transform: Niimg-like object Predicted data """ if isinstance(X, (list, np.ndarray)): X = concat_imgs(X) X_ = self.masker_.transform(X) X_transform = np.zeros_like(X_) for i in range(self.n_bags): X_transform += piecewise_transform(self.labels_[i], self.fit_[i], X_) X_transform /= self.n_bags return self.masker_.inverse_transform(X_transform)
def get_coords_ball(r2): vox_coords = np.unravel_index(r2.get_fdata().argmax(), r2.shape) coords = image.coord_transform(*vox_coords, thr_r2.affine) _, ball =_apply_mask_and_get_affinity([list(coords)], image.concat_imgs([r2]), 3., True) ball = ball.toarray().reshape(r2.shape)* 100. ball = image.new_img_like(r2, ball.astype(int)) return coords, ball
def concat_imgs(imgs): """ Simple wrapper for concat_imgs :param imgs: list of nifti images or filenames :param logger: logfile ID :return: Nifti1Image """ return image.concat_imgs(imgs, dtype=np.float32)
def correlation_searchlight(A, B, mask=None, radius=2, interpolate=False, n_jobs=1, data_dir=None): """ Parameters ---------- A : list list of (even_trials, odd_trials) tuples for the first condition (A); each tuple represents a subject's mean fMRI data for both trials, and even/odd_trials should be a 3D niimg or path (string) to a NIfTI file B : list list of (even_trials, odd_trials) tuples for the second condition (B); formatted the same as A mask : Niimg-like object boolean image giving location of voxels containing usable signals radius : int radius of the searchlight sphere interpolate : bool whether or not to skip every other sphere and interpolate the results; used to speed up the analysis n_jobs : int number of CPUs to split the work up between (-1 means "all CPUs") data_dir : string path to directory where MVPA results should be stored Returns ------- score_map_fpaths : list list of paths to NIfTI files representing the MVPA scores for each subject """ if not mask: niimgs = [niimg for trials in A + B for niimg in trials] mask = compute_epi_mask(mean_img(concat_imgs(niimgs))) spheres = extract_spheres(get_data(mask), radius, interpolate) _analyze_subject = partial(analyze_subject, spheres=spheres, interpolate=interpolate, mask=mask, data_dir=data_dir) if n_jobs > 1 or n_jobs == -1: score_map_fpaths = concurrent_exec( _analyze_subject, [(i, _A, _B) for i, (_A, _B) in enumerate(zip(A, B))], n_jobs) else: score_map_fpaths = [] for subject_id, (_A, _B) in enumerate(zip(A, B)): score_map_fpath = _analyze_subject(subject_id, _A, _B) score_map_fpaths.append(score_map_fpath) return score_map_fpaths
def add_epi_fmaps_to_dwi_b0s(epi_fmaps, b0_threshold, max_per_spec, dwi_spec_lines, dwi_imain): """Add additional images from EPI fieldmaps for distortion correction. In order to fill out the maximum number of images per distortion group, images from files in the fmap/ directory can be added to those already extracted from the DWI series. Examples: --------- >>> epi_fmaps = ["/data/sub-1/fmap/sub-1_dir-AP_epi.nii.gz", ... "/data/sub-1/fmap/sub-1_dir-PA_epi.nii.gz"] """ # Extract b=0 images as if we were only pulling images from epi fmaps. fmaps_4d, fmap_b0_indices, fmap_original_files = load_epi_dwi_fieldmaps( epi_fmaps, b0_threshold) fmap_spec_lines, fmap_imain, fmap_report, fmap_spec_map = topup_inputs_from_4d_file( fmaps_4d, fmap_b0_indices, fmap_original_files, image_source="EPI fieldmap", max_per_spec=max_per_spec) # Check how many are present in each group from just the dwi files spec_counts = defaultdict(int) for dwi_spec in dwi_spec_lines: spec_counts[dwi_spec] += 1 # Only add as many as you need to fill out max_per_spec fmap_indices_to_add = [] for image_num, epi_spec in enumerate(fmap_spec_lines): if spec_counts[epi_spec] + 1 > max_per_spec: continue fmap_indices_to_add.append(image_num) spec_counts[epi_spec] += 1 # No additional epi fmaps to add if not fmap_indices_to_add: return dwi_imain, dwi_spec_lines, \ ' No Additional images from EPI fieldmaps were added because the maximum ' \ 'number of images per distortion group was reached.' # Add the epi b=0's to the dwi b=0's topup_imain = concat_imgs( [dwi_imain, index_img(fmap_imain, fmap_indices_to_add)], auto_resample=True) topup_spec_lines = dwi_spec_lines + [ fmap_spec_lines[idx] for idx in fmap_indices_to_add ] new_report = topup_selection_to_report(fmap_indices_to_add, fmap_original_files, fmap_spec_map, image_source='EPI fieldmap') return topup_imain, topup_spec_lines, new_report
def test_fmri_inputs_for_non_parametric_inference(): # Test processing of FMRI inputs with InTemporaryDirectory(): # prepare fake data p, q = 80, 10 X = np.random.randn(p, q) shapes = ((7, 8, 9, 10),) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) T = func_img.shape[-1] des = pd.DataFrame(np.ones((T, 1)), columns=['a']) des_fname = 'design.csv' des.to_csv(des_fname) # prepare correct input first level models flm = FirstLevelModel(subject_label='01').fit(FUNCFILE, design_matrices=des) # prepare correct input dataframe and lists shapes = ((7, 8, 9, 1),) _, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] dfcols = ['subject_label', 'map_name', 'effects_map_path'] dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE], ['03', 'a', FUNCFILE]] niidf = pd.DataFrame(dfrows, columns=dfcols) niimgs = [FUNCFILE, FUNCFILE, FUNCFILE] niimg_4d = concat_imgs(niimgs) confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]], columns=['subject_label', 'conf1']) sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c']) # test missing second-level contrast # niimgs as input assert_raises(ValueError, non_parametric_inference, niimgs, None, sdes) assert_raises(ValueError, non_parametric_inference, niimgs, confounds, sdes) # 4d niimg as input assert_raises(ValueError, non_parametric_inference, niimg_4d, None, sdes) # test wrong input errors # test first level model assert_raises(ValueError, non_parametric_inference, flm) # test list of less than two niimgs assert_raises(ValueError, non_parametric_inference, [FUNCFILE]) # test dataframe assert_raises(ValueError, non_parametric_inference, niidf) # test niimgs requirements assert_raises(ValueError, non_parametric_inference, niimgs) assert_raises(ValueError, non_parametric_inference, niimgs + [[]], confounds) assert_raises(ValueError, non_parametric_inference, [FUNCFILE]) # test other objects assert_raises(ValueError, non_parametric_inference, 'random string object') del X, FUNCFILE, func_img
def sbc_group(in_dir, out_dir): """ for each session: load sbc (z transformed) of all subjects and calculate mean & sd """ os.makedirs(out_dir, exist_ok=True) niis = glob(os.path.join(in_dir, "*.nii.gz")) sessions = list( set( map(lambda f: os.path.basename(f).split("_")[-1].split(".")[0], niis))) sessions.sort() print("calc mean sbc for {}".format(sessions)) for ses in sessions: niis = glob(os.path.join(in_dir, "*{}.nii.gz".format(ses))) niis.sort() print(niis) from nilearn import image, plotting out_filename_mean_nii = os.path.join( out_dir, "sbc_1_mean_ses-{}.nii.gz".format(ses)) out_filename_sd_nii = os.path.join( out_dir, "sbc_1_sd_ses-{}.nii.gz".format(ses)) out_filename_mean_png = os.path.join( out_dir, "sbc_2_mean_ses-{}.png".format(ses)) out_filename_sd_png = os.path.join(out_dir, "sbc_2_sd_ses-{}.png".format(ses)) out_filename_list = os.path.join(out_dir, "sbc_ses-{}_scans.txt".format(ses)) mean_sbc = image.mean_img(niis) mean_sbc.to_filename(out_filename_mean_nii) concat_img = image.concat_imgs(niis) sd_sbc = image.math_img("np.std(img, axis=-1)", img=concat_img) sd_sbc.to_filename(out_filename_sd_nii) with open(out_filename_list, "w") as fi: fi.write("\n".join(niis)) display = plotting.plot_stat_map( mean_sbc, threshold=0.5, cut_coords=(0, -52, 18), title="mean sbc {} (fisher z)".format(ses)) display.add_markers(marker_coords=[(0, -52, 18)], marker_color='g', marker_size=300) display.savefig(out_filename_mean_png) display.close() display = plotting.plot_stat_map( sd_sbc, cut_coords=(0, -52, 18), title="sd sbc {} (fisher z)".format(ses)) display.add_markers(marker_coords=[(0, -52, 18)], marker_color='g', marker_size=300) display.savefig(out_filename_sd_png) display.close()
def test_fmri_inputs_for_non_parametric_inference(): # Test processing of FMRI inputs with InTemporaryDirectory(): # prepare fake data p, q = 80, 10 X = np.random.randn(p, q) shapes = ((7, 8, 9, 10), ) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) T = func_img.shape[-1] des = pd.DataFrame(np.ones((T, 1)), columns=['a']) des_fname = 'design.csv' des.to_csv(des_fname) # prepare correct input first level models flm = FirstLevelModel(subject_label='01').fit(FUNCFILE, design_matrices=des) # prepare correct input dataframe and lists shapes = ((7, 8, 9, 1), ) _, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] dfcols = ['subject_label', 'map_name', 'effects_map_path'] dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE], ['03', 'a', FUNCFILE]] niidf = pd.DataFrame(dfrows, columns=dfcols) niimgs = [FUNCFILE, FUNCFILE, FUNCFILE] niimg_4d = concat_imgs(niimgs) confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]], columns=['subject_label', 'conf1']) sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c']) # test missing second-level contrast # niimgs as input assert_raises(ValueError, non_parametric_inference, niimgs, None, sdes) assert_raises(ValueError, non_parametric_inference, niimgs, confounds, sdes) # 4d niimg as input assert_raises(ValueError, non_parametric_inference, niimg_4d, None, sdes) # test wrong input errors # test first level model assert_raises(ValueError, non_parametric_inference, flm) # test list of less than two niimgs assert_raises(ValueError, non_parametric_inference, [FUNCFILE]) # test dataframe assert_raises(ValueError, non_parametric_inference, niidf) # test niimgs requirements assert_raises(ValueError, non_parametric_inference, niimgs) assert_raises(ValueError, non_parametric_inference, niimgs + [[]], confounds) assert_raises(ValueError, non_parametric_inference, [FUNCFILE]) # test other objects assert_raises(ValueError, non_parametric_inference, 'random string object') del X, FUNCFILE, func_img
def concat_imgs(in_files, out_file=None): """ Use nilearn.image.concat_imgs to concat images of up to 4 dimensions. Returns ------- out_file: str The absolute path to the output file. """ import nilearn.image as niimg return niimg.concat_imgs(in_files)
def create_parcel_atlas(parcel_list): from nilearn.image import new_img_like, concat_imgs parcel_background = new_img_like(parcel_list[0], np.zeros(parcel_list[0].shape, dtype=bool)) parcel_list_exp = [parcel_background] + parcel_list parcellation = concat_imgs(parcel_list_exp).get_data() index_vec = np.array(range(len(parcel_list_exp))) + 1 net_parcels_sum = np.sum(index_vec * parcellation, axis=3) net_parcels_map_nifti = nib.Nifti1Image(net_parcels_sum, affine=parcel_list[0].affine) return(net_parcels_map_nifti, parcel_list_exp)
def _process_inputs(self): """ validate and process inputs into useful form. Returns a list of nilearn maskers and the list of corresponding label names.""" import nilearn.input_data as nl import nilearn.image as nli label_data = nli.concat_imgs(self.inputs.label_files) maskers = [] # determine form of label files, choose appropriate nilearn masker if np.amax(label_data.dataobj) > 1: # 3d label file n_labels = np.amax(label_data.dataobj) maskers.append(nl.NiftiLabelsMasker(label_data)) else: # 4d labels n_labels = label_data.shape[3] if self.inputs.incl_shared_variance: # independent computation for img in nli.iter_img(label_data): maskers.append( nl.NiftiMapsMasker(self._4d(img.dataobj, img.affine)) ) else: # one computation fitting all maskers.append(nl.NiftiMapsMasker(label_data)) # check label list size if not np.isclose(int(n_labels), n_labels): raise ValueError( "The label files {} contain invalid value {}. Check input.".format( self.inputs.label_files, n_labels ) ) if len(self.inputs.class_labels) != n_labels: raise ValueError( "The length of class_labels {} does not " "match the number of regions {} found in " "label_files {}".format( self.inputs.class_labels, n_labels, self.inputs.label_files ) ) if self.inputs.include_global: global_label_data = label_data.dataobj.sum(axis=3) # sum across all regions global_label_data = ( np.rint(global_label_data).astype(int).clip(0, 1) ) # binarize global_label_data = self._4d(global_label_data, label_data.affine) global_masker = nl.NiftiLabelsMasker( global_label_data, detrend=self.inputs.detrend ) maskers.insert(0, global_masker) self.inputs.class_labels.insert(0, "GlobalSignal") for masker in maskers: masker.set_params(detrend=self.inputs.detrend) return maskers
def _concat_copes(cope_file, output_dir): """Concatenate COPE images and save as a file""" from nilearn.image import concat_imgs copes = [] for i in cope_file: copes.append(i) copes_concat = concat_imgs(copes, auto_resample=True) copes_concat.to_filename(output_dir) return output_dir
def join_files(in_files): import os.path as op from nilearn.image import concat_imgs from nipype.utils.filemanip import split_filename _, base, _ = split_filename(in_files[0]) base = '_'.join(base.split('_')[:-1]) out_file = op.abspath(base + '.nii.gz') img = concat_imgs(in_files) img.to_filename(out_file) return out_file
def subject_data(sub): sessions = np.zeros(360) sessions[:90] = 1 sessions[90:180] = 2 sessions[180:270] = 3 sessions[270:] = 4 return image.smooth_img( image.clean_img(image.concat_imgs( src.format(sub=sub) for src in NIFTI_SRC), sessions=sessions), SMOOTHING)
def load_epi_dwi_fieldmaps(fmap_list, b0_threshold): """Creates a 4D image of b=0s from a list of input images. Parameters: ----------- fmap_list: list List of paths to epi fieldmap images b0_threshold: int Maximum b value for an image to be considered a b=0 Returns: -------- concatenated_images: spatial image The b=0 volumes concatenated into a 4D image b0_indices: list List of the indices in the concatenated images that contain usable images original_files: list List of the original files where each b=0 image came from. """ # Add in the rpe data, if it exists b0_indices = [] original_files = [] image_series = [] for fmap_file in fmap_list: pth, fname, _ = split_filename(fmap_file) potential_bval_file = op.join(pth, fname) + ".bval" starting_index = len(original_files) fmap_img = load_img(fmap_file) image_series.append(fmap_img) num_images = 1 if fmap_img.ndim == 3 else fmap_img.shape[3] original_files += [fmap_file] * num_images # Which images are b=0 images? if op.exists(potential_bval_file): bvals = np.loadtxt(potential_bval_file) too_large = np.flatnonzero(bvals > b0_threshold) too_large_values = bvals[too_large] if too_large.size: LOGGER.warning( "Excluding volumes %s from the %s because b=%s is greater than %d", str(too_large), fmap_file, str(too_large_values), b0_threshold) _b0_indices = np.flatnonzero(bvals < b0_threshold) + starting_index else: _b0_indices = np.arange(num_images) + starting_index b0_indices += _b0_indices.tolist() concatenated_images = concat_imgs(image_series, auto_resample=True) return concatenated_images, b0_indices, original_files
def test_generate_report_from_4d(self): ''' if the in_file was 4d, it should be able to produce the same report anyway (using arbitrary volume) ''' # makeshift 4d in_file mni_file = MNI_2MM mni_4d = image.concat_imgs([mni_file, mni_file, mni_file]) mni_4d_file = os.path.join(os.getcwd(), 'mni_4d.nii.gz') nb.save(mni_4d, mni_4d_file) _smoke_test_report( BETRPT(in_file=mni_4d_file, generate_report=True, mask=True), 'testBET4d.html')
def _process_inputs(self): ''' validate and process inputs into useful form. Returns a list of nilearn maskers and the list of corresponding label names.''' import nilearn.input_data as nl import nilearn.image as nli label_data = nli.concat_imgs(self.inputs.label_files) maskers = [] # determine form of label files, choose appropriate nilearn masker if np.amax(label_data.get_data()) > 1: # 3d label file n_labels = np.amax(label_data.get_data()) maskers.append(nl.NiftiLabelsMasker(label_data)) else: # 4d labels n_labels = label_data.get_data().shape[3] if self.inputs.incl_shared_variance: # independent computation for img in nli.iter_img(label_data): maskers.append( nl.NiftiMapsMasker( self._4d(img.get_data(), img.affine))) else: # one computation fitting all maskers.append(nl.NiftiMapsMasker(label_data)) # check label list size if not np.isclose(int(n_labels), n_labels): raise ValueError( 'The label files {} contain invalid value {}. Check input.' .format(self.inputs.label_files, n_labels)) if len(self.inputs.class_labels) != n_labels: raise ValueError('The length of class_labels {} does not ' 'match the number of regions {} found in ' 'label_files {}'.format(self.inputs.class_labels, n_labels, self.inputs.label_files)) if self.inputs.include_global: global_label_data = label_data.get_data().sum( axis=3) # sum across all regions global_label_data = np.rint(global_label_data).astype(int).clip( 0, 1) # binarize global_label_data = self._4d(global_label_data, label_data.affine) global_masker = nl.NiftiLabelsMasker( global_label_data, detrend=self.inputs.detrend) maskers.insert(0, global_masker) self.inputs.class_labels.insert(0, 'GlobalSignal') for masker in maskers: masker.set_params(detrend=self.inputs.detrend) return maskers
def _run_interface(self, runtime): self._results['out_file'] = genfname( self.inputs.in_files[0], suffix='merged') new_nii = concat_imgs(self.inputs.in_files, dtype=self.inputs.dtype) if isdefined(self.inputs.header_source): src_hdr = nb.load(self.inputs.header_source).header new_nii.header.set_xyzt_units(t=src_hdr.get_xyzt_units()[-1]) new_nii.header.set_zooms(list(new_nii.header.get_zooms()[:3]) + [src_hdr.get_zooms()[3]]) new_nii.to_filename(self._results['out_file']) return runtime
def _run_interface(self, runtime): ext = '.nii.gz' if self.inputs.compress else '.nii' self._results['out_file'] = fname_presuffix( self.inputs.in_files[0], suffix='_merged' + ext, newpath=runtime.cwd, use_ext=False) new_nii = concat_imgs(self.inputs.in_files, dtype=self.inputs.dtype) if isdefined(self.inputs.header_source): src_hdr = nb.load(self.inputs.header_source).header new_nii.header.set_xyzt_units(t=src_hdr.get_xyzt_units()[-1]) new_nii.header.set_zooms(list(new_nii.header.get_zooms()[:3]) + [src_hdr.get_zooms()[3]]) new_nii.to_filename(self._results['out_file']) return runtime
def _check_vol_to_surf_results(img, mesh): mni_mask = datasets.load_mni152_brain_mask() for kind, interpolation, mask_img in itertools.product( ['ball', 'line'], ['linear', 'nearest'], [mni_mask, None]): proj_1 = vol_to_surf( img, mesh, kind=kind, interpolation=interpolation, mask_img=mask_img) assert_true(proj_1.ndim == 1) img_rot = image.resample_img( img, target_affine=rotation(np.pi / 3., np.pi / 4.)) proj_2 = vol_to_surf( img_rot, mesh, kind=kind, interpolation=interpolation, mask_img=mask_img) # The projection values for the rotated image should be close # to the projection for the original image diff = np.abs(proj_1 - proj_2) / np.abs(proj_1) assert_true(np.mean(diff[diff < np.inf]) < .03) img_4d = image.concat_imgs([img, img]) proj_4d = vol_to_surf( img_4d, mesh, kind=kind, interpolation=interpolation, mask_img=mask_img) nodes, _ = surface.load_surf_mesh(mesh) assert_array_equal(proj_4d.shape, [nodes.shape[0], 2]) assert_array_almost_equal(proj_4d[:, 0], proj_1, 3)
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None): """ Spatially clusters `img` Parameters ---------- img : str or img_like Image file or object to be clustered min_cluster_size : int Minimum cluster size (in voxels) threshold : float, optional Whether to threshold `img` before clustering index : array_like, optional Whether to extract volumes from `img` for clustering mask : (S,) array_like, optional Boolean array for masking resultant data array Returns ------- clustered : :obj:`numpy.ndarray` Boolean array of clustered (and thresholded) `img` data """ # we need a 4D image for `niimg.iter_img`, below img = niimg.copy_img(check_niimg(img, atleast_4d=True)) # temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size` # represents the minimum number of voxels we want to be in a cluster, # rather than the minimum size of the desired clusters in mm^3 if not np.all(np.abs(np.diag(img.affine)) == 1): img.set_sform(np.sign(img.affine)) # grab desired volumes from provided image if index is not None: if not isinstance(index, list): index = [index] img = niimg.index_img(img, index) # threshold image if threshold is not None: img = niimg.threshold_img(img, float(threshold)) clout = [] for subbrick in niimg.iter_img(img): # `min_region_size` is not inclusive (as in AFNI's `3dmerge`) # subtract one voxel to ensure we aren't hitting this thresholding issue try: clsts = connected_regions(subbrick, min_region_size=int(min_cluster_size) - 1, smoothing_fwhm=None, extract_type='connected_components')[0] # if no clusters are detected we get a TypeError; create a blank 4D # image object as a placeholder instead except TypeError: clsts = niimg.new_img_like(subbrick, np.zeros(subbrick.shape + (1,))) # if multiple clusters detected, collapse into one volume clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)] # convert back to data array and make boolean clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0 # if mask provided, mask output if mask is not None: clustered = clustered[mask] return clustered
zmap_filenames.append('/home/jmuraskin/Projects/CCD/working_v1/seed-to-voxel/%s/%s/%s_%s.nii.gz' % (fc,secondlevel_folder_names[fb],fc,subj)) mask_filename='/home/jmuraskin/Projects/CCD/working_v1/seg_probabilities/grey_matter_mask-20-percent.nii.gz' from scipy.stats import zscore #load phenotypic data phenoFile='/home/jmuraskin/Projects/CCD/Pheno/narsad+vt_new.csv' pheno=read_csv(phenoFile) pheno=pheno.set_index('participant') ages=zscore(pheno.loc[goodsubj]['V1_DEM_001']) mf=zscore(pheno.loc[goodsubj]['V1_DEM_002']) motionTest=read_csv('/home/jmuraskin/Projects/CCD/CCD-scripts/analysis/CCD_meanFD.csv') meanFD=zscore(motionTest[motionTest.FB=='FEEDBACK'][motionTest.Subject_ID.isin(goodsubj)]['train_meanFD']) imgs=image.concat_imgs(zmap_filenames) clean_imgs=image.clean_img(imgs,confounds=[ages,mf,meanFD],detrend=False,standardize=True) from nilearn.decoding import SpaceNetRegressor decoder = SpaceNetRegressor(mask=mask_filename, penalty="tv-l1", eps=1e-1, # prefer large alphas memory="nilearn_cache",n_jobs=30) decoder.fit(clean_imgs, behavioral_target)
# grab the LR and RL phase encoding rest images from one subject rs_files1 = glob.glob('/Volumes/TRESOR/neurospin/Volumes/DANILO2/neurospin/population/HCP/S500-1/*/MNINonLinear/Results/rfMRI_REST1_LR/rfMRI_REST1_LR.nii.gz') for i_file, rs_file in enumerate(rs_files1): sub_id = int(rs_file.split('/')[10]) print('Doing %i/%i: %s...' % (i_file + 1, len(rs_files1), rs_file)) if len(glob.glob('%i_regs_prec*' % sub_id)) > 0: print('Skipped...') continue # may take a while ! -> unpacks 2 1TB gz archives # timeit on MBP: 1 loops, best of 1: 2min 13s if isinstance(rs_file, list): all_sub_rs_maps = concat_imgs(rs_file) else: all_sub_rs_maps = nib.load(rs_file) cur_shape = all_sub_rs_maps.get_data().shape size_in_GB = all_sub_rs_maps.get_data().nbytes / 1e9 print('% rs images: %.2f GB' % (cur_shape[-1], size_in_GB)) ############################################################################### # dump network projections ############################################################################### # retrieve network projections from nilearn import datasets as ds smith_pkg = ds.fetch_atlas_smith_2009() icas_path = smith_pkg['rsn20']
def test_fmri_inputs(): # Test processing of FMRI inputs with InTemporaryDirectory(): # prepare fake data p, q = 80, 10 X = np.random.randn(p, q) shapes = ((7, 8, 9, 10),) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) T = func_img.shape[-1] des = pd.DataFrame(np.ones((T, 1)), columns=['a']) des_fname = 'design.csv' des.to_csv(des_fname) # prepare correct input first level models flm = FirstLevelModel(subject_label='01').fit(FUNCFILE, design_matrices=des) flms = [flm, flm, flm] # prepare correct input dataframe and lists shapes = ((7, 8, 9, 1),) _, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] dfcols = ['subject_label', 'map_name', 'effects_map_path'] dfrows = [['01', 'a', FUNCFILE], ['02', 'a', FUNCFILE], ['03', 'a', FUNCFILE]] niidf = pd.DataFrame(dfrows, columns=dfcols) niimgs = [FUNCFILE, FUNCFILE, FUNCFILE] niimg_4d = concat_imgs(niimgs) confounds = pd.DataFrame([['01', 1], ['02', 2], ['03', 3]], columns=['subject_label', 'conf1']) sdes = pd.DataFrame(X[:3, :3], columns=['intercept', 'b', 'c']) # smoke tests with correct input # First level models as input SecondLevelModel(mask_img=mask).fit(flms) SecondLevelModel().fit(flms) # Note : the following one creates a singular design matrix SecondLevelModel().fit(flms, confounds) SecondLevelModel().fit(flms, None, sdes) # dataframes as input SecondLevelModel().fit(niidf) SecondLevelModel().fit(niidf, confounds) SecondLevelModel().fit(niidf, confounds, sdes) SecondLevelModel().fit(niidf, None, sdes) # niimgs as input SecondLevelModel().fit(niimgs, None, sdes) # 4d niimg as input SecondLevelModel().fit(niimg_4d, None, sdes) # test wrong input errors # test first level model requirements assert_raises(ValueError, SecondLevelModel().fit, flm) assert_raises(ValueError, SecondLevelModel().fit, [flm]) # test dataframe requirements assert_raises(ValueError, SecondLevelModel().fit, niidf['subject_label']) # test niimgs requirements assert_raises(ValueError, SecondLevelModel().fit, niimgs) assert_raises(ValueError, SecondLevelModel().fit, niimgs + [[]], confounds) # test first_level_conditions, confounds, and design assert_raises(ValueError, SecondLevelModel().fit, flms, ['', []]) assert_raises(ValueError, SecondLevelModel().fit, flms, []) assert_raises(ValueError, SecondLevelModel().fit, flms, confounds['conf1']) assert_raises(ValueError, SecondLevelModel().fit, flms, None, [])
subject_data = fetch_spm_multimodal_fmri() ######################################################################### # Timing and design matrix parameter specification tr = 2. # repetition time, in seconds slice_time_ref = 0. # we will sample the design matrix at the beggining of each acquisition drift_model = 'Cosine' # We use a discrete cosin transform to model signal drifts. period_cut = 128. # The cutoff for the drift model is 1/128 Hz. hrf_model = 'spm + derivative' # The hemodunamic response finction is the SPM canonical one ######################################################################### # Resample the images. # # This is achieved by the concat_imgs function of Nilearn. from nilearn.image import concat_imgs, resample_img, mean_img fmri_img = [concat_imgs(subject_data.func1, auto_resample=True), concat_imgs(subject_data.func2, auto_resample=True)] affine, shape = fmri_img[0].affine, fmri_img[0].shape print('Resampling the second image (this takes time)...') fmri_img[1] = resample_img(fmri_img[1], affine, shape[:3]) ######################################################################### # Create mean image for display mean_image = mean_img(fmri_img) ######################################################################### # Make design matrices import numpy as np import pandas as pd from nistats.design_matrix import make_first_level_design_matrix design_matrices = []
subject_data = fetch_spm_auditory() print(subject_data.func) # print the list of names of functional images ############################################################################### # We can display the first functional image and the subject's anatomy: from nilearn.plotting import plot_stat_map, plot_anat, plot_img, show plot_img(subject_data.func[0]) plot_anat(subject_data.anat) ############################################################################### # Next, we concatenate all the 3D EPI image into a single 4D image, # then we average them in order to create a background # image that will be used to display the activations: from nilearn.image import concat_imgs, mean_img fmri_img = concat_imgs(subject_data.func) mean_img = mean_img(fmri_img) ############################################################################### # Specifying the experimental paradigm # ------------------------------------ # # We must now provide a description of the experiment, that is, define the # timing of the auditory stimulation and rest periods. This is typically # provided in an events.tsv file. The path of this file is # provided in the dataset. import pandas as pd events = pd.read_table(subject_data['events']) print(events) ###############################################################################