def apply_mask_all(subject_id, bold): import nibabel as nb import pylab as pl from nilearn.masking import _load_mask_img, compute_epi_mask, apply_mask, _unmask_nd, _apply_mask_fmri from nilearn import _utils, resampling from nilearn._utils.ndimage import largest_connected_component from nilearn._utils.cache_mixin import cache ra = '/gablab/p/eegfmri/analysis/iaps/pilot%s/segstats/right_amygdala_mask_2.nii.gz' %(subject_id) ra = nb.load(ra) la = '/gablab/p/eegfmri/analysis/iaps/pilot%s/segstats/left_amygdala_mask_2.nii.gz' %(subject_id) la = nb.load(la) # f = _apply_mask_fmri(bold_1, b) and NiftiMasker do not work -- doing this manually: rd = _utils.as_ndarray(ra.get_data(),dtype=np.bool) ld = _utils.as_ndarray(la.get_data(),dtype=np.bool) rl = rd+ld mask_img = ra # or la mask_data = rl mask_affine = mask_img.get_affine() # gets only affine of ra print 'Mask [ra] affine' print mask_affine print 'fMRI affine' print affine data = bold.get_data() series = _utils.as_ndarray(data, order="C", copy=True) X = series[mask_data].T # mask_img = mask_data.get_data() print 'Masked Data', X.shape return X, mask_data
def generate_random_img(shape, length=1, affine=np.eye(4), rand_gen=np.random.RandomState(0)): data = rand_gen.randn(*(shape + (length,))) return nibabel.Nifti1Image(data, affine), nibabel.Nifti1Image( as_ndarray(data[..., 0] > 0.2, dtype=np.int8), affine)
def find_cut_coords(img, mask=None, activation_threshold=None): import warnings import numpy as np from scipy import ndimage from nilearn._utils import as_ndarray, new_img_like from nilearn._utils.ndimage import largest_connected_component from nilearn._utils.extmath import fast_abs_percentile """ Find the center of the largest activation connected component. Parameters ----------- img : 3D Nifti1Image The brain map. mask : 3D ndarray, boolean, optional An optional brain mask. activation_threshold : float, optional The lower threshold to the positive activation. If None, the activation threshold is computed using the 80% percentile of the absolute value of the map. Returns ------- x : float the x world coordinate. y : float the y world coordinate. z : float the z world coordinate. """ data = img.get_data() # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) # Deal with masked arrays: if hasattr(data, 'mask'): not_mask = np.logical_not(data.mask) if mask is None: mask = not_mask else: mask *= not_mask data = np.asarray(data) # Get rid of potential memmapping data = as_ndarray(data) my_map = data.copy() if mask is not None: slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # Testing min and max is faster than np.all(my_map == 0) if (my_map.max() == 0) and (my_map.min() == 0): return .5 * np.array(data.shape) if activation_threshold is None: activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(), 80) mask = np.abs(my_map) > activation_threshold - 1.e-15 # mask may be zero everywhere in rare cases if mask.max() == 0: return .5 * np.array(data.shape) mask = largest_connected_component(mask) slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # For the second threshold, we use a mean, as it is much faster, # althought it is less robust second_threshold = np.abs(np.mean(my_map[mask])) second_mask = (np.abs(my_map) > second_threshold) if second_mask.sum() > 50: my_map *= largest_connected_component(second_mask) cut_coords = ndimage.center_of_mass(np.abs(my_map)) x_map, y_map, z_map = cut_coords + offset coords = [] coords.append(x_map) coords.append(y_map) coords.append(z_map) # Return as a list of scalars return coords
def fit(self, imgs, y, data_train=None): """Fit the searchlight Parameters ---------- img : Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. 4D image. y : 1D array-like Target variable to predict. Must have exactly as many elements as 3D images in img. data_train : np.array, optional Data to train on, if data for training is different from X. Attributes ---------- `scores_` : numpy.ndarray search_light scores. Same shape as input parameter process_mask_img. """ # Compute world coordinates of all in-mask voxels. mask, mask_affine = masking._load_mask_img(self.mask_img) mask_coords = np.where(mask != 0) mask_coords = np.asarray(mask_coords + (np.ones(len(mask_coords[0]), dtype=np.int),)) mask_coords = np.dot(mask_affine, mask_coords)[:3].T # Compute world coordinates of all in-process mask voxels if self.process_mask_img is None: process_mask = mask process_mask_coords = mask_coords else: process_mask, process_mask_affine = \ masking._load_mask_img(self.process_mask_img) process_mask_coords = np.where(process_mask != 0) process_mask_coords = \ np.asarray(process_mask_coords + (np.ones(len(process_mask_coords[0]), dtype=np.int),)) process_mask_coords = np.dot(process_mask_affine, process_mask_coords)[:3].T clf = neighbors.NearestNeighbors(radius=self.radius) A = clf.fit(mask_coords).radius_neighbors_graph(process_mask_coords) del process_mask_coords, mask_coords A = A.tolil() # scores is an 1D array of CV scores with length equals to the number # of voxels in processing mask (columns in process_mask) X = masking._apply_mask_fmri(imgs, nibabel.Nifti1Image(as_ndarray(mask, dtype=np.int8), mask_affine)) estimator = self.estimator if isinstance(estimator, basestring): estimator = ESTIMATOR_CATALOG[estimator]() # From here starts Dima's modifications, added nii_optional argument # to .fit method if data_train is None: scores = search_light(X, y, estimator, A, self.scoring, self.cv, self.n_jobs, self.verbose) else: scores = search_light(X, y, estimator, A, self.scoring, self.cv, self.n_jobs, self.verbose, data_train) scores_3D = np.zeros(process_mask.shape) scores_3D[process_mask] = scores self.scores_ = scores_3D return self