def compute_confounds(imgs, mask_img, n_confounds=5, get_randomized_svd=False, compute_not_mask=False): """ """ confounds = [] if not isinstance(imgs, collections.Iterable) or \ isinstance(imgs, _basestring): imgs = [imgs, ] img = _utils.check_niimg_4d(imgs[0]) shape = img.shape[:3] affine = get_affine(img) if isinstance(mask_img, _basestring): mask_img = _utils.check_niimg_3d(mask_img) if not _check_same_fov(img, mask_img): mask_img = resample_img( mask_img, target_shape=shape, target_affine=affine, interpolation='nearest') if compute_not_mask: print("Non mask based confounds extraction") not_mask_data = np.logical_not(mask_img.get_data().astype(np.int)) whole_brain_mask = masking.compute_multi_epi_mask(imgs) not_mask = np.logical_and(not_mask_data, whole_brain_mask.get_data()) mask_img = new_img_like(img, not_mask.astype(np.int), affine) for img in imgs: print("[Confounds Extraction] {0}".format(img)) img = _utils.check_niimg_4d(img) print("[Confounds Extraction] high ariance confounds computation]") high_variance = high_variance_confounds(img, mask_img=mask_img, n_confounds=n_confounds) if compute_not_mask and get_randomized_svd: signals = masking.apply_mask(img, mask_img) non_constant = np.any(np.diff(signals, axis=0) != 0, axis=0) signals = signals[:, non_constant] signals = signal.clean(signals, detrend=True) print("[Confounds Extraction] Randomized SVD computation") U, s, V = randomized_svd(signals, n_components=n_confounds, random_state=0) if high_variance is not None: confound_ = np.hstack((U, high_variance)) else: confound_ = U else: confound_ = high_variance confounds.append(confound_) return confounds
def extract_confounds(imgs, mask_img, n_confounds=10): """To extract confounds on list of subjects See nilearn.image.high_variance_confounds for technical details. Parameters ---------- imgs : list of Nifti images, either str or nibabel.Nifti1Image Functional images on which confounds should be extracted mask_img : str or nibabel.Nifti1Image Mask image with binary values. This image is imposed on each functional image for confounds extraction.. n_confounds : int, optional By default, 10 high variance confounds are extracted. Otherwise, confounds as specified are extracted. Returns ------- confounds : list of Numpy arrays. Each numpy array is a confound corresponding to imgs provided. Each numpy array will have shape (n_timepoints, n_confounds) """ confounds = [] if not isinstance(imgs, collections.Iterable) or \ isinstance(imgs, _basestring): imgs = [imgs, ] img = _utils.check_niimg_4d(imgs[0]) shape = img.shape[:3] affine = img.affine if isinstance(mask_img, _basestring): mask_img = _utils.check_niimg_3d(mask_img) if not _check_same_fov(img, mask_img): mask_img = resample_img( mask_img, target_shape=shape, target_affine=affine, interpolation='nearest') for img in imgs: print("[Confounds Extraction] Image selected {0}".format(img)) img = _utils.check_niimg_4d(img) print("Extracting high variance confounds") high_variance = high_variance_confounds(img, mask_img=mask_img, n_confounds=n_confounds) confounds.append(high_variance) return confounds
def roi_grand_std(in_file, dseg_file, out_file=None): from nilearn import image as nli from nilearn._utils.niimg import _safe_get_data from nilearn._utils import check_niimg_4d import pandas as pd import os n_dummy = 4 if out_file is None: out_file = os.getcwd() + '/grand_std.csv' atlaslabels = nli.load_img(dseg_file).get_fdata() img_nii = check_niimg_4d( in_file, dtype="auto", ) func_data = nli.load_img(img_nii).get_fdata()[:, :, :, n_dummy:] ntsteps = func_data.shape[-1] data = func_data[atlaslabels > 0].reshape(-1, ntsteps) oseg = atlaslabels[atlaslabels > 0].reshape(-1) df = pd.DataFrame(data) df['oseg'] = oseg df['oseg'] = df.oseg.astype(int) grand_stats = df.groupby('oseg').apply( lambda x: pd.Series(x.values.flatten().std())) grand_stats.columns = ['grand_std'] grand_stats.to_csv(out_file) return out_file
def _get_volume(img, threshold=0, atlas=None, stride=1, t_start=0, t_end=-1, n_t=50, t_r=None, marker_size=3, cmap=cm.cold_hot, symmetric_cmap=True, vmax=None, vmin=None): connectome = {} img = check_niimg_4d(img) t_unit = "" if not t_r else " s" if not t_r: t_r = 1 if t_end < 0: t_end = img.shape[3] + t_end if not n_t: n_t = t_end - t_start t_idx = np.round(np.linspace(t_start, t_end, n_t)).astype(int) t_labels = [str(t_r * t) + t_unit for t in t_idx] data = _safe_get_data(img)[::stride, ::stride, ::stride, t_idx] mask = np.abs(data[:, :, :, 0]) > threshold i, j, k = mask.nonzero() x, y, z = coord_transform(i * stride, j * stride, k * stride, img.affine) for coord, cname in [(x, "x"), (y, "y"), (z, "z")]: connectome["_con_{}".format(cname)] = encode( np.asarray(coord, dtype='<f4')) colors = colorscale(cmap, data.ravel(), symmetric_cmap=symmetric_cmap, vmax=vmax, vmin=vmin) if atlas: atlas = check_niimg_3d(atlas) atlas_data = _safe_get_data(atlas)[::stride, ::stride, ::stride] connectome['atlas'] = encode( np.asarray(atlas_data[i, j, k], dtype='<f4')) connectome['atlas_nb'] = int(np.max(atlas_data)) connectome['colorscale'] = colors['colors'] connectome['cmin'] = float(colors['vmin']) connectome['cmax'] = float(colors['vmax']) connectome['n_time'] = n_t connectome['t_labels'] = t_labels values = [ encode(np.asarray(data[i, j, k, t], dtype='<f4')) for t in range(data.shape[3]) ] connectome['values'] = values return connectome
def inverse_transform(self, img, mask): """Transform data back to its original space. In other words, return an input X_original whose transform would be X. Parameters ---------- img : 4D niimg_like Component weight maps. mask : 3D niimg_like Mask to apply on ``img``. Returns ------- img_orig : 4D niimg_like Reconstructed original data, with fourth axis corresponding to time. Notes ----- This is different from scikit-learn's approach, which ignores explained variance. """ img = check_niimg_4d(img) mask = check_niimg_3d(mask) data = img.get_fdata() mask = mask.get_fdata() [n_x, n_y, n_z, n_components] = data.shape data_nib_V = np.reshape(data, (n_x * n_y * n_z, n_components), order="F") mask_vec = np.reshape(mask, n_x * n_y * n_z, order="F") X = data_nib_V[mask_vec == 1, :] X_orig = np.dot(np.dot(X, np.diag(self.explained_variance_)), self.components_) if self.normalize: X_orig = self.scaler_.inverse_transform(X_orig.T).T n_t = X_orig.shape[1] out_data = np.zeros((n_x * n_y * n_z, n_t)) out_data[mask_vec == 1, :] = X_orig out_data = np.reshape(out_data, (n_x, n_y, n_z, n_t), order="F") img_orig = nib.Nifti1Image(out_data, img.affine, img.header) return img_orig
def test_check_niimg_wildcards(): tmp_dir = tempfile.tempdir + os.sep nofile_path = "/tmp/nofile" nofile_path_wildcards = "/tmp/no*file" wildcards_msg = ("No files matching the entered niimg expression: " "'%s'.\n You may have left wildcards usage " "activated: please set the global constant " "'nilearn.EXPAND_PATH_WILDCARDS' to False to " "deactivate this behavior.") file_not_found_msg = "File not found: '%s'" assert_equal(ni.EXPAND_PATH_WILDCARDS, True) # Check bad filename # Non existing file (with no magic) raise a ValueError exception assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path) # Non matching wildcard raises a ValueError exception assert_raises_regex(ValueError, wildcards_msg % re.escape(nofile_path_wildcards), _utils.check_niimg, nofile_path_wildcards) # First create some testing data data_3d = np.zeros((40, 40, 40)) data_3d[20, 20, 20] = 1 img_3d = Nifti1Image(data_3d, np.eye(4)) data_4d = np.zeros((40, 40, 40, 3)) data_4d[20, 20, 20] = 1 img_4d = Nifti1Image(data_4d, np.eye(4)) ####### # Testing with an existing filename with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_3d.get_data()) # No globbing behavior with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename, wildcards=False).get_data(), img_3d.get_data()) ####### # Testing with an existing filename with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_4d.get_data()) # No globbing behavior with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename, wildcards=False).get_data(), img_4d.get_data()) ####### # Testing with a glob matching exactly one filename # Using a glob matching one file containing a 3d image returns a 4d image # with 1 as last dimension. with testing.write_tmp_imgs(img_3d, create_files=True, use_wildcards=True) as globs: glob_input = tmp_dir + globs assert_array_equal(_utils.check_niimg(glob_input).get_data()[..., 0], img_3d.get_data()) # Disabled globbing behavior should raise an ValueError exception with testing.write_tmp_imgs(img_3d, create_files=True, use_wildcards=True) as globs: glob_input = tmp_dir + globs assert_raises_regex(ValueError, file_not_found_msg % re.escape(glob_input), _utils.check_niimg, glob_input, wildcards=False) ####### # Testing with a glob matching multiple filenames img_4d = _utils.check_niimg_4d((img_3d, img_3d)) with testing.write_tmp_imgs(img_3d, img_3d, create_files=True, use_wildcards=True) as globs: assert_array_equal(_utils.check_niimg(glob_input).get_data(), img_4d.get_data()) ####### # Test when global variable is set to False => no globbing allowed ni.EXPAND_PATH_WILDCARDS = False # Non existing filename (/tmp/nofile) could match an existing one through # globbing but global wildcards variable overrides this feature => raises # a ValueError assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path) # Verify wildcards function parameter has no effect assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path, wildcards=False) # Testing with an exact filename matching (3d case) with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_3d.get_data()) # Testing with an exact filename matching (4d case) with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_4d.get_data()) # Reverting to default behavior ni.EXPAND_PATH_WILDCARDS = True
def test_check_niimg_4d(): assert_raises_regex(TypeError, 'nibabel format', _utils.check_niimg_4d, 0) assert_raises_regex(TypeError, 'empty object', _utils.check_niimg_4d, []) affine = np.eye(4) img_3d = Nifti1Image(np.ones((10, 10, 10)), affine) # Tests with return_iterator=False img_4d_1 = _utils.check_niimg_4d([img_3d, img_3d]) assert_true(img_4d_1.get_data().shape == (10, 10, 10, 2)) assert_array_equal(img_4d_1.get_affine(), affine) img_4d_2 = _utils.check_niimg_4d(img_4d_1) assert_array_equal(img_4d_2.get_data(), img_4d_2.get_data()) assert_array_equal(img_4d_2.get_affine(), img_4d_2.get_affine()) # Tests with return_iterator=True img_3d_iterator = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_length = sum(1 for _ in img_3d_iterator) assert_true(img_3d_iterator_length == 2) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_2 = _utils.check_niimg_4d(img_3d_iterator_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) assert_array_equal(img_1.get_affine(), img_2.get_affine()) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_2 = _utils.check_niimg_4d(img_4d_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) assert_array_equal(img_1.get_affine(), img_2.get_affine()) # This should raise an error: a 3D img is given and we want a 4D assert_raises_regex(DimensionError, 'Data must be a 4D Niimg-like object ' 'but you provided a 3D', _utils.check_niimg_4d, img_3d) # Test a Niimg-like object that does not hold a shape attribute phony_img = PhonyNiimage() _utils.check_niimg_4d(phony_img) a = nibabel.Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)) b = np.zeros((10, 10, 10)) c = _utils.check_niimg_4d([a, b], return_iterator=True) assert_raises_regex(TypeError, 'Error encountered while loading image #1', list, c) b = nibabel.Nifti1Image(np.zeros((10, 20, 10)), np.eye(4)) c = _utils.check_niimg_4d([a, b], return_iterator=True) assert_raises_regex( ValueError, 'Field of view of image #1 is different from reference FOV', list, c)
def check_embedded_atlas_masker(estimator, atlas_type=None, img=None, multi_subject=True, seeds=None, radius=None, t_r=None, low_pass=None, high_pass=None): """Base function to return masker type and its parameters Accepts all Nilearn masker types but returns only Maps or Labels masker. The idea being that this function returns an object with essential parameters embedded in it such as repetition time, low pass, high pass, standardize, detrend for resting state fMRI data analysis. Mostly useful for pipelined analysis. Parameters ---------- estimator : object, instance of all masker types Accepts any instance masker type from nilearn.input_data img : maps_img or labels_img Used in initialization of instance masker object depending upon the length/type of the image. If maps_img related then used in NiftiMapsMasker instance or labels_img then NiftiLabelsMasker. seeds : List of triplet of coordinates in native space Used in NiftiSpheresMasker initialization. atlas_type : str {'maps', 'labels'} 'maps' implies NiftiMapsMasker 'labels' implies NiftiLabelsMasker multi_subject : bool, optional Indicates whether to return masker of multi subject type. List of subjects. Returns ------- masker : NiftiMapsMasker, NiftiLabelsMasker Depending upon atlas type. params : dict Masker parameters """ if atlas_type is not None: if atlas_type not in ['maps', 'labels', 'spheres', 'auto']: raise ValueError( "You provided unsupported masker type for atlas_type={0} " "selection. Choose one among them ['maps', 'labels', 'spheres']" "for atlas type masker. Otherwise atlas_type=None for general " "Nifti or MultiNifti Maskers.".format(atlas_type)) if not isinstance(estimator, (NiftiMasker, MultiNiftiMasker, NiftiMapsMasker, NiftiLabelsMasker, NiftiSpheresMasker)): raise ValueError("Unsupported 'estimator' instance of masker is " "provided".format(estimator)) if atlas_type == 'spheres' and seeds is None: raise ValueError("'seeds' must be specified for atlas_type='spheres'." "See documentation nilearn.input_data.NiftiSpheresMasker.") if (atlas_type == 'maps' or atlas_type == 'labels') and img is None: raise ValueError("'img' should not be None for atlas_type={0} related " "instance of masker. Atlas related maskers is created " "by provided a valid atlas image. See documenation in " "nilearn.input_data for specific " "masker related either maps or labels".format(atlas_type)) if atlas_type == 'auto' and img is not None: img = check_niimg(img) if len(img.shape) > 3: atlas_type = 'maps' else: atlas_type = 'labels' if atlas_type == 'maps' and img is not None: img = check_niimg_4d(img) if atlas_type == 'labels' and img is not None: img = check_niimg_3d(img) new_masker = check_embedded_nifti_masker(estimator, multi_subject=multi_subject) mask = getattr(new_masker, 'mask_img', None) estimator_mask = getattr(estimator, 'mask_img', None) if mask is None and estimator_mask is not None: new_masker.mask_img = estimator.mask_img if atlas_type is None: return new_masker else: masker_params = new_masker.get_params() new_masker_params = dict() _ignore = set(('mask_strategy', 'mask_args', 'n_jobs', 'target_affine', 'target_shape')) for param in masker_params: if param in _ignore: continue if hasattr(new_masker, param): new_masker_params[param] = getattr(new_masker, param) # Append atlas extraction related parameters if t_r is not None: new_masker_params['t_r'] = t_r if low_pass is not None: new_masker_params['low_pass'] = low_pass if high_pass is not None: new_masker_params['high_pass'] = high_pass if atlas_type is not None: if len(img.shape) > 3 and atlas_type == 'maps': new_masker_params['maps_img'] = img new_masker = NiftiMapsMasker(**new_masker_params) elif len(img.shape) == 3 and atlas_type == 'labels': new_masker_params['labels_img'] = img new_masker = NiftiLabelsMasker(**new_masker_params) return new_masker if seeds is not None and atlas_type == 'spheres': if radius is not None: new_masker_params['radius'] = radius new_masker_params['seeds'] = seeds new_masker = NiftiSpheresMasker(**new_masker_params) return new_masker
def sliced_gif_eff(nifti, gif_path, time_index=range(100, 200), slice_index=range(-4, 52, 7), name=None, vmax=3.5, duration=1000, symmetric_cbar=False, alpha=0.5, **kwargs): """ Plots series of nifti timepoints as nilearn plot_anat_brain in .png format :param nifti: Nifti object to be plotted :param gif_path: Directory to save .png files :param time_index: Time indices to be plotted :param slice_index: Coordinates to be plotted :param name: Name of output gif :param vmax: scale of colorbar (IMPORTANT! change if scale changes are necessary) :param kwargs: Other args to be passed to nilearn's plot_anat_brain :return: """ images = [] num_slice = len(slice_index) nrow = int(np.floor(np.sqrt(num_slice))) ncol = int(np.ceil(num_slice / nrow)) fig, ax = plt.subplots(nrows=nrow, ncols=ncol, figsize=(10, 10)) # move out of for loop, set ax ax = ax.reshape(-1) displays = [] nifti = _utils.check_niimg_4d(nifti) for currax, loc in enumerate(slice_index): nii_i = image.index_img(nifti, 0) if loc == slice_index[len(slice_index) - 1]: display = ni_plt.plot_stat_map(nii_i, cut_coords=[loc], colorbar=True, symmetric_cbar=symmetric_cbar, figure=fig, axes=ax[currax], vmax=vmax, alpha=alpha, **kwargs) else: display = ni_plt.plot_stat_map(nii_i, cut_coords=[loc], colorbar=False, symmetric_cbar=symmetric_cbar, figure=fig, axes=ax[currax], vmax=vmax, alpha=alpha, **kwargs) displays.append(display) buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) images.append(Image.open(buf)) for i in time_index[1:]: nii_i = image.index_img(nifti, i) for display in displays: display.add_overlay(nii_i, colorbar=False, vmax=vmax, alpha=alpha) buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) images.append(Image.open(buf)) plt.close() if name is None: gif_outfile = os.path.join( gif_path, 'gif_' + str(min(time_index)) + '_' + str(max(time_index)) + '.gif') else: gif_outfile = os.path.join(gif_path, str(name) + '.gif') # creates the gif from the frames images[0].save(gif_outfile, format='GIF', append_images=images[1:], save_all=True, duration=duration, loop=0)
def test_check_niimg_wildcards(): tmp_dir = tempfile.tempdir + os.sep nofile_path = "/tmp/nofile" nofile_path_wildcards = "/tmp/no*file" wildcards_msg = ("No files matching the entered niimg expression: " "'%s'.\n You may have left wildcards usage " "activated: please set the global constant " "'nilearn.EXPAND_PATH_WILDCARDS' to False to " "deactivate this behavior.") file_not_found_msg = "File not found: '%s'" assert_equal(ni.EXPAND_PATH_WILDCARDS, True) # Check bad filename # Non existing file (with no magic) raise a ValueError exception assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path) # Non matching wildcard raises a ValueError exception assert_raises_regex(ValueError, wildcards_msg % re.escape(nofile_path_wildcards), _utils.check_niimg, nofile_path_wildcards) # First create some testing data data_3d = np.zeros((40, 40, 40)) data_3d[20, 20, 20] = 1 img_3d = Nifti1Image(data_3d, np.eye(4)) data_4d = np.zeros((40, 40, 40, 3)) data_4d[20, 20, 20] = 1 img_4d = Nifti1Image(data_4d, np.eye(4)) ####### # Testing with an existing filename with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal( _utils.check_niimg(filename).get_data(), img_3d.get_data()) # No globbing behavior with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal( _utils.check_niimg(filename, wildcards=False).get_data(), img_3d.get_data()) ####### # Testing with an existing filename with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal( _utils.check_niimg(filename).get_data(), img_4d.get_data()) # No globbing behavior with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal( _utils.check_niimg(filename, wildcards=False).get_data(), img_4d.get_data()) ####### # Testing with a glob matching exactly one filename # Using a glob matching one file containing a 3d image returns a 4d image # with 1 as last dimension. with testing.write_tmp_imgs(img_3d, create_files=True, use_wildcards=True) as globs: glob_input = tmp_dir + globs assert_array_equal( _utils.check_niimg(glob_input).get_data()[..., 0], img_3d.get_data()) # Disabled globbing behavior should raise an ValueError exception with testing.write_tmp_imgs(img_3d, create_files=True, use_wildcards=True) as globs: glob_input = tmp_dir + globs assert_raises_regex(ValueError, file_not_found_msg % re.escape(glob_input), _utils.check_niimg, glob_input, wildcards=False) ####### # Testing with a glob matching multiple filenames img_4d = _utils.check_niimg_4d((img_3d, img_3d)) with testing.write_tmp_imgs(img_3d, img_3d, create_files=True, use_wildcards=True) as globs: assert_array_equal( _utils.check_niimg(glob_input).get_data(), img_4d.get_data()) ####### # Test when global variable is set to False => no globbing allowed ni.EXPAND_PATH_WILDCARDS = False # Non existing filename (/tmp/nofile) could match an existing one through # globbing but global wildcards variable overrides this feature => raises # a ValueError assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path) # Verify wildcards function parameter has no effect assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path, wildcards=False) # Testing with an exact filename matching (3d case) with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal( _utils.check_niimg(filename).get_data(), img_3d.get_data()) # Testing with an exact filename matching (4d case) with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal( _utils.check_niimg(filename).get_data(), img_4d.get_data()) # Reverting to default behavior ni.EXPAND_PATH_WILDCARDS = True
def test_check_niimg_4d(): assert_raises_regex(TypeError, 'nibabel format', _utils.check_niimg_4d, 0) assert_raises_regex(TypeError, 'empty object', _utils.check_niimg_4d, []) affine = np.eye(4) img_3d = Nifti1Image(np.ones((10, 10, 10)), affine) # Tests with return_iterator=False img_4d_1 = _utils.check_niimg_4d([img_3d, img_3d]) assert_true(img_4d_1.get_data().shape == (10, 10, 10, 2)) assert_array_equal(img_4d_1.affine, affine) img_4d_2 = _utils.check_niimg_4d(img_4d_1) assert_array_equal(img_4d_2.get_data(), img_4d_2.get_data()) assert_array_equal(img_4d_2.affine, img_4d_2.affine) # Tests with return_iterator=True img_3d_iterator = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_length = sum(1 for _ in img_3d_iterator) assert_true(img_3d_iterator_length == 2) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_2 = _utils.check_niimg_4d(img_3d_iterator_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) assert_array_equal(img_1.affine, img_2.affine) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_2 = _utils.check_niimg_4d(img_4d_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) assert_array_equal(img_1.affine, img_2.affine) # This should raise an error: a 3D img is given and we want a 4D assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided a " "3D image.", _utils.check_niimg_4d, img_3d) # Test a Niimg-like object that does not hold a shape attribute phony_img = PhonyNiimage() _utils.check_niimg_4d(phony_img) a = nibabel.Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)) b = np.zeros((10, 10, 10)) c = _utils.check_niimg_4d([a, b], return_iterator=True) assert_raises_regex(TypeError, 'Error encountered while loading image #1', list, c) b = nibabel.Nifti1Image(np.zeros((10, 20, 10)), np.eye(4)) c = _utils.check_niimg_4d([a, b], return_iterator=True) assert_raises_regex( ValueError, 'Field of view of image #1 is different from reference FOV', list, c)
def plot_carpet(img, atlaslabels, detrend=True, nskip=0, size=(950, 800), subplot=None, title=None, output_file=None, legend=False, lut=None, tr=None): """ Plot an image representation of voxel intensities across time also know as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage 2017 Jul 1; 154:150-158. Parameters ---------- img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html 4D input image atlaslabels: ndarray A 3D array of integer labels from an atlas, resampled into ``img`` space. detrend : boolean, optional Detrend and standardize the data prior to plotting. nskip : int Number of volumes at the beginning of the scan marked as nonsteady state. long_cutoff : int Number of TRs to consider img too long (and decimate the time direction to save memory) axes : matplotlib axes, optional The axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. legend : bool Whether to render the average functional series with ``atlaslabels`` as overlay. tr : float , optional Specify the TR, if specified it uses this value. If left as None, # Frames is plotted instead of time. """ # Define TR and number of frames notr = False if tr is None: notr = True tr = 1. img_nii = check_niimg_4d(img, dtype='auto',) func_data = _safe_get_data(img_nii, ensure_finite=True) ntsteps = func_data.shape[-1] data = func_data[atlaslabels > 0].reshape(-1, ntsteps) seg = atlaslabels[atlaslabels > 0].reshape(-1) # Map segmentation if lut is None: lut = np.zeros((256, ), dtype='int') lut[1:11] = 1 lut[255] = 2 lut[30:99] = 3 lut[100:201] = 4 # Apply lookup table newsegm = lut[seg.astype(int)] p_dec = 1 + data.shape[0] // size[0] if p_dec: data = data[::p_dec, :] newsegm = newsegm[::p_dec] t_dec = 1 + data.shape[1] // size[1] if t_dec: data = data[:, ::t_dec] # Detrend data v = (None, None) if detrend: data = clean(data.T, t_r=tr).T v = (-2, 2) # Order following segmentation labels order = np.argsort(newsegm)[::-1] # If subplot is not defined if subplot is None: subplot = mgs.GridSpec(1, 1)[0] # Define nested GridSpec wratios = [1, 100, 20] gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot, width_ratios=wratios[:2 + int(legend)], wspace=0.0) mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1]) # Segmentation colorbar ax0 = plt.subplot(gs[0]) ax0.set_yticks([]) ax0.set_xticks([]) ax0.imshow(newsegm[order, np.newaxis], interpolation='none', aspect='auto', cmap=mycolors, vmin=1, vmax=4) ax0.grid(False) ax0.spines["left"].set_visible(False) ax0.spines["bottom"].set_color('none') ax0.spines["bottom"].set_visible(False) # Carpet plot ax1 = plt.subplot(gs[1]) ax1.imshow(data[order, ...], interpolation='nearest', aspect='auto', cmap='gray', vmin=v[0], vmax=v[1]) ax1.grid(False) ax1.set_yticks([]) ax1.set_yticklabels([]) # Set 10 frame markers in X axis interval = max((int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1)) xticks = list(range(0, data.shape[-1])[::interval]) ax1.set_xticks(xticks) if notr: ax1.set_xlabel('time (frame #)') else: ax1.set_xlabel('time (s)') labels = tr * (np.array(xticks)) * t_dec ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5) # Remove and redefine spines for side in ["top", "right"]: # Toggle the spine objects ax0.spines[side].set_color('none') ax0.spines[side].set_visible(False) ax1.spines[side].set_color('none') ax1.spines[side].set_visible(False) ax1.yaxis.set_ticks_position('left') ax1.xaxis.set_ticks_position('bottom') ax1.spines["bottom"].set_visible(False) ax1.spines["left"].set_color('none') ax1.spines["left"].set_visible(False) if legend: gslegend = mgs.GridSpecFromSubplotSpec( 5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0) epiavg = func_data.mean(3) epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header) segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine, epinii.header) segnii.set_data_dtype('uint8') nslices = epiavg.shape[-1] coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(np.uint8) for i, c in enumerate(coords.tolist()): ax2 = plt.subplot(gslegend[i]) plot_img(segnii, bg_img=epinii, axes=ax2, display_mode='z', annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors, interpolation='nearest') if output_file is not None: figure = plt.gcf() figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file return [ax0, ax1], gs
def voxelwise_std(img: niimg_like) -> nib.nifti1.Nifti1Image: """Compute the standard deviation of a Nifti 4D file. This is the denominator of the first term of the SFS expression.""" img = check_niimg_4d(img) return image.math_img("np.std(img, axis=-1)", img=img)
def plot_carpet( func, atlaslabels=None, detrend=True, nskip=0, size=(950, 800), subplot=None, title=None, output_file=None, legend=False, tr=None, lut=None, ): """ Plot an image representation of voxel intensities across time also know as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage 2017 Jul 1; 154:150-158. Parameters ---------- func : string Path to NIfTI or CIFTI BOLD image atlaslabels: ndarray, optional A 3D array of integer labels from an atlas, resampled into ``img`` space. Required if ``func`` is a NIfTI image. detrend : boolean, optional Detrend and standardize the data prior to plotting. nskip : int, optional Number of volumes at the beginning of the scan marked as nonsteady state. Not used. size : tuple, optional Size of figure. subplot : matplotlib Subplot, optional Subplot to plot figure on. title : string, optional The title displayed on the figure. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. legend : bool Whether to render the average functional series with ``atlaslabels`` as overlay. tr : float , optional Specify the TR, if specified it uses this value. If left as None, # of frames is plotted instead of time. lut : ndarray, optional Look up table for segmentations """ epinii = None segnii = None nslices = None img = nb.load(func) if isinstance(img, nb.Cifti2Image): assert ( img.nifti_header.get_intent()[0] == "ConnDenseSeries" ), "Not a dense timeseries" data = img.get_fdata().T matrix = img.header.matrix struct_map = { "LEFT_CORTEX": 1, "RIGHT_CORTEX": 2, "SUBCORTICAL": 3, "CEREBELLUM": 4, } seg = np.zeros((data.shape[0],), dtype="uint32") for bm in matrix.get_index_map(1).brain_models: if "CORTEX" in bm.brain_structure: lidx = (1, 2)["RIGHT" in bm.brain_structure] elif "CEREBELLUM" in bm.brain_structure: lidx = 4 else: lidx = 3 index_final = bm.index_offset + bm.index_count seg[bm.index_offset:index_final] = lidx assert len(seg[seg < 1]) == 0, "Unassigned labels" # Decimate data data, seg = _decimate_data(data, seg, size) # preserve as much continuity as possible order = seg.argsort(kind="stable") cmap = ListedColormap([cm.get_cmap("Paired").colors[i] for i in (1, 0, 7, 3)]) assert len(cmap.colors) == len( struct_map ), "Mismatch between expected # of structures and colors" # ensure no legend for CIFTI legend = False else: # Volumetric NIfTI img_nii = check_niimg_4d(img, dtype="auto",) func_data = _safe_get_data(img_nii, ensure_finite=True) ntsteps = func_data.shape[-1] data = func_data[atlaslabels > 0].reshape(-1, ntsteps) oseg = atlaslabels[atlaslabels > 0].reshape(-1) # Map segmentation if lut is None: lut = np.zeros((256,), dtype="int") lut[1:11] = 1 lut[255] = 2 lut[30:99] = 3 lut[100:201] = 4 # Apply lookup table seg = lut[oseg.astype(int)] # Decimate data data, seg = _decimate_data(data, seg, size) # Order following segmentation labels order = np.argsort(seg)[::-1] # Set colormap cmap = ListedColormap(cm.get_cmap("tab10").colors[:4][::-1]) if legend: epiavg = func_data.mean(3) epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header) segnii = nb.Nifti1Image( lut[atlaslabels.astype(int)], epinii.affine, epinii.header ) segnii.set_data_dtype("uint8") nslices = epiavg.shape[-1] return _carpet( data, seg, order, cmap, epinii=epinii, segnii=segnii, nslices=nslices, tr=tr, subplot=subplot, title=title, output_file=output_file, )
def _fit(self, img, mask): LGR.info( "Performing dimensionality reduction based on GIFT " "(https://trendscenter.org/software/gift/) and Li, Y. O., Adali, T., " "& Calhoun, V. D. (2007). Estimating the number of independent components " "for functional magnetic resonance imaging data. Human Brain Mapping, 28(11), " "1251–1266. https://doi.org/10.1002/hbm.20359") img = check_niimg_4d(img) mask = check_niimg_3d(mask) data = img.get_fdata() mask = mask.get_fdata() [n_x, n_y, n_z, n_timepoints] = data.shape data_nib_V = np.reshape(data, (n_x * n_y * n_z, n_timepoints), order="F") mask_vec = np.reshape(mask, n_x * n_y * n_z, order="F") X = data_nib_V[mask_vec == 1, :] n_samples = np.sum(mask_vec) self.scaler_ = StandardScaler(with_mean=True, with_std=True) if self.normalize: # TODO: determine if tedana is already normalizing before this X = self.scaler_.fit_transform(X.T).T # This was X_sc # X = ((X.T - X.T.mean(axis=0)) / X.T.std(axis=0)).T LGR.info("Performing SVD on original data...") V, eigenvalues = utils._icatb_svd(X, n_timepoints) LGR.info("SVD done on original data") # Reordering of values eigenvalues = eigenvalues[::-1] dataN = np.dot(X, V[:, ::-1]) # Potentially the small differences come from the different signs on V # Using 12 gaussian components from middle, top and bottom gaussian # components to determine the subsampling depth. # Final subsampling depth is determined using median kurt = kurtosis(dataN, axis=0, fisher=True) kurt[kurt < 0] = 0 kurt = np.expand_dims(kurt, 1) kurt[eigenvalues > np.mean(eigenvalues)] = 1000 idx_gauss = np.where( ((kurt[:, 0] < 0.3) & (kurt[:, 0] > 0) & (eigenvalues > np.finfo(float).eps) ) == 1)[0] # NOTE: make sure np.where is giving us just one tuple idx = np.array(idx_gauss[:]).T dfs = np.sum(eigenvalues > np.finfo(float).eps) # degrees of freedom minTp = 12 if len(idx) >= minTp: middle = int(np.round(len(idx) / 2)) idx = np.hstack([idx[0:4], idx[middle - 1:middle + 3], idx[-4:]]) else: minTp = np.min([minTp, dfs]) idx = np.arange(dfs - minTp, dfs) idx = np.unique(idx) # Estimate the subsampling depth for effectively i.i.d. samples LGR.info( "Estimating the subsampling depth for effective i.i.d samples...") mask_ND = np.reshape(mask_vec, (n_x, n_y, n_z), order="F") sub_depth = len(idx) sub_iid_sp = np.zeros((sub_depth, )) for i in range(sub_depth): x_single = np.zeros(n_x * n_y * n_z) x_single[mask_vec == 1] = dataN[:, idx[i]] x_single = np.reshape(x_single, (n_x, n_y, n_z), order="F") sub_iid_sp[i] = utils._est_indp_sp(x_single)[0] + 1 if i > 6: tmp_sub_sp = sub_iid_sp[0:i] tmp_sub_median = np.round(np.median(tmp_sub_sp)) if np.sum(tmp_sub_sp == tmp_sub_median) > 6: sub_iid_sp = tmp_sub_sp break dim_n = x_single.ndim sub_iid_sp_median = int(np.round(np.median(sub_iid_sp))) if np.floor(np.power(n_samples / n_timepoints, 1 / dim_n)) < sub_iid_sp_median: sub_iid_sp_median = int( np.floor(np.power(n_samples / n_timepoints, 1 / dim_n))) N = np.round(n_samples / np.power(sub_iid_sp_median, dim_n)) if sub_iid_sp_median != 1: mask_s = utils._subsampling(mask_ND, sub_iid_sp_median) mask_s_1d = np.reshape(mask_s, np.prod(mask_s.shape), order="F") dat = np.zeros((int(np.sum(mask_s_1d)), n_timepoints)) LGR.info("Generating subsampled i.i.d. data...") for i_vol in range(n_timepoints): x_single = np.zeros(n_x * n_y * n_z) x_single[mask_vec == 1] = X[:, i_vol] x_single = np.reshape(x_single, (n_x, n_y, n_z), order="F") dat0 = utils._subsampling(x_single, sub_iid_sp_median) dat0 = np.reshape(dat0, np.prod(dat0.shape), order="F") dat[:, i_vol] = dat0[mask_s_1d == 1] # Perform Variance Normalization temp_scaler = StandardScaler(with_mean=True, with_std=True) dat = temp_scaler.fit_transform(dat.T).T # (completed) LGR.info("Performing SVD on subsampled i.i.d. data...") V, eigenvalues = utils._icatb_svd(dat, n_timepoints) LGR.info("SVD done on subsampled i.i.d. data") eigenvalues = eigenvalues[::-1] LGR.info("Effective number of i.i.d. samples %d" % N) # Make eigen spectrum adjustment LGR.info("Perform eigen spectrum adjustment ...") eigenvalues = utils._eigensp_adj(eigenvalues, N, eigenvalues.shape[0]) # (completed) if np.sum(np.imag(eigenvalues)): raise ValueError( "Invalid eigenvalue found for the subsampled data.") # Correction on the ill-conditioned results (when tdim is large, # some least significant eigenvalues become small negative numbers) if eigenvalues[ np.real(eigenvalues) <= np.finfo(float).eps].shape[0] > 0: eigenvalues[np.real(eigenvalues) <= np.finfo(float).eps] = np.min( eigenvalues[np.real(eigenvalues) >= np.finfo(float).eps]) LGR.info("Estimating the dimensionality ...") p = n_timepoints aic = np.zeros(p - 1) kic = np.zeros(p - 1) mdl = np.zeros(p - 1) for k_idx, k in enumerate(np.arange(1, p)): LH = np.log( np.prod(np.power(eigenvalues[k:], 1 / (p - k))) / np.mean(eigenvalues[k:])) mlh = 0.5 * N * (p - k) * LH df = 1 + 0.5 * k * (2 * p - k + 1) aic[k_idx] = (-2 * mlh) + (2 * df) kic[k_idx] = (-2 * mlh) + (3 * df) mdl[k_idx] = -mlh + (0.5 * df * np.log(N)) itc = np.row_stack([aic, kic, mdl]) dlap = np.diff(itc, axis=1) # Calculate optimal number of components with each criterion # AIC a_aic = np.where(dlap[0, :] > 0)[0] + 1 if a_aic.size == 0: n_aic = itc[0, :].shape[0] else: n_aic = a_aic[0] # KIC a_kic = np.where(dlap[1, :] > 0)[0] + 1 if a_kic.size == 0: n_kic = itc[1, :].shape[0] else: n_kic = a_kic[0] # MDL a_mdl = np.where(dlap[2, :] > 0)[0] + 1 if a_mdl.size == 0: n_mdl = itc[2, :].shape[0] else: n_mdl = a_mdl[0] if self.criterion == "aic": n_components = n_aic elif self.criterion == "kic": n_components = n_kic elif self.criterion == "mdl": n_components = n_mdl LGR.info("Performing PCA") # PCA with all possible components (the estimated selection is made after) ppca = PCA(n_components=None, svd_solver="full", copy=False, whiten=False) ppca.fit(X) # Get cumulative explained variance as components are added cumsum_varexp = np.cumsum(ppca.explained_variance_ratio_) # Calculate number of components for 90% varexp n_comp_varexp_90 = np.where(cumsum_varexp >= 0.9)[0][0] + 1 # Calculate number of components for 95% varexp n_comp_varexp_95 = np.where(cumsum_varexp >= 0.95)[0][0] + 1 LGR.info("Estimated number of components is %d" % n_components) # Save results of each criterion into dictionaries self.aic_ = { "n_components": n_aic, "value": aic, "explained_variance_total": cumsum_varexp[n_aic - 1], } self.kic_ = { "n_components": n_kic, "value": kic, "explained_variance_total": cumsum_varexp[n_kic - 1], } self.mdl_ = { "n_components": n_mdl, "value": mdl, "explained_variance_total": cumsum_varexp[n_mdl - 1], } self.varexp_90_ = { "n_components": n_comp_varexp_90, "explained_variance_total": cumsum_varexp[n_comp_varexp_90 - 1], } self.varexp_95_ = { "n_components": n_comp_varexp_95, "explained_variance_total": cumsum_varexp[n_comp_varexp_95 - 1], } # Assign attributes from model self.components_ = ppca.components_[:n_components, :] self.explained_variance_ = ppca.explained_variance_[:n_components] self.explained_variance_ratio_ = ppca.explained_variance_ratio_[: n_components] self.singular_values_ = ppca.singular_values_[:n_components] self.mean_ = ppca.mean_ self.n_components_ = n_components self.n_features_ = ppca.n_features_ self.n_samples_ = ppca.n_samples_ # Commenting out noise variance as it depends on the covariance of the estimation # self.noise_variance_ = ppca.noise_variance_ component_maps = np.dot(np.dot(X, self.components_.T), np.diag(1.0 / self.explained_variance_)) component_maps_3d = np.zeros((n_x * n_y * n_z, n_components)) component_maps_3d[mask_vec == 1, :] = component_maps component_maps_3d = np.reshape(component_maps_3d, (n_x, n_y, n_z, n_components), order="F") self.u_ = component_maps self.u_nii_ = nib.Nifti1Image(component_maps_3d, img.affine, img.header)
def compute_spheres_values(self, imgs): start_t = time.time() # Force to get a list of imgs even if only one is given imgs_to_check = [imgs] if not isinstance(imgs, list) else imgs # Load Nifti images ref_shape = imgs_to_check[0].dataobj.shape ref_affine = imgs_to_check[0].affine imgs = [] for img in imgs_to_check: # check if image is 4D imgs.append(check_niimg_4d(img)) # Check that all images have same number of volumes if ref_shape != img.dataobj.shape: raise ValueError("All fMRI image must have same shape") if np.array_equal(ref_affine, img.affine): warnings.warn("fMRI images do not have same affine") self.ref_img = imgs[0] # Compute world coordinates of the seeds process_mask_img = check_niimg_3d(self.process_mask_img) process_mask_img = image.resample_to_img( process_mask_img, imgs[0], interpolation='nearest' ) self.process_mask_img = process_mask_img process_mask, process_mask_affine = masking._load_mask_img( process_mask_img ) process_mask_coords = np.where(process_mask != 0) self.vx_mask_coords = process_mask_coords process_mask_coords = coord_transform( process_mask_coords[0], process_mask_coords[1], process_mask_coords[2], process_mask_affine) process_mask_coords = np.asarray(process_mask_coords).T if self.verbose: print("{} seeds found in the mask".format(len(process_mask_coords))) # Compute spheres _, A = _apply_mask_and_get_affinity( process_mask_coords, imgs[0], self.radius, True, mask_img=self.mask_img ) # Number of runs: 1 4D fMRI image / run n_runs = len(imgs) # Number of spheres (or seed voxels) n_spheres = A.shape[0] # Number of volumes in each 4D fMRI image n_conditions = imgs[0].dataobj.shape[3] mask_img = check_niimg_3d(self.mask_img) mask_img = image.resample_img( mask_img, target_affine=imgs[0].affine, target_shape=imgs[0].shape[:3], interpolation='nearest' ) masked_imgs_data = [] for i_run, img in enumerate(imgs): masked_imgs_data.append(masking._apply_mask_fmri(img, mask_img)) # Extract data of each sphere # X will be #spheres x #run x #conditions x #values X = [] for i_sph in range(n_spheres): # Indexes of all voxels included in the current sphere sph_indexes = A.rows[i_sph] if len(sph_indexes) == 0: # Append when no data are available around the process voxel X.append(np.full((n_runs, n_conditions, 1), 0)) print("Empty sphere") else: # Number of voxel in the current sphere n_values = len(sph_indexes) sub_X = np.empty((n_runs, n_conditions, n_values), dtype=object) for i_run, img in enumerate(imgs): for i_cond in range(n_conditions): sub_X[i_run, i_cond] = masked_imgs_data[i_run][i_cond][ sph_indexes] X.append(sub_X) if self.verbose: dt = time.time() - start_t print("Elapsed time to extract spheres values: {:.01f}s".format(dt)) self.spheres_values = X
def index_image(self): ind_map = - np.ones(check_niimg_4d(self.ref_img).shape[:3]) for i, (x, y, z) in enumerate(np.array(self.vx_mask_coords).T): ind_map[x, y, z] = i return image.new_img_like(self.ref_img, ind_map)
def jumeg_plot_stat_map(stat_map_img, t, bg_img=MNI152TEMPLATE, cut_coords=None, output_file=None, display_mode='ortho', colorbar=True, figure=None, axes=None, title=None, threshold=1e-6, annotate=True, draw_cross=True, black_bg='auto', cmap='magma', symmetric_cbar="auto", cbar_range=None, dim='auto', vmax=None, resampling_interpolation='continuous', **kwargs): """ Plot cuts of an ROI/mask image (by default 3 cuts: Frontal, Axial, and Lateral) This is based on nilearn.plotting.plot_stat_map Parameters ---------- stat_map_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The statistical map image t : int Plot activity at time point given by time t. bg_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The background image that the ROI/mask will be plotted on top of. If nothing is specified, the MNI152 template will be used. To turn off background image, just pass "bg_img=False". cut_coords : None, a tuple of floats, or an integer The MNI coordinates of the point where the cut is performed If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) For display_mode == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. If display_mode is 'x', 'y' or 'z', cut_coords can be an integer, in which case it specifies the number of cuts to perform output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : {'ortho', 'x', 'y', 'z', 'yx', 'xz', 'yz'} Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'ortho' - three cuts are performed in orthogonal directions. colorbar : boolean, optional If True, display a colorbar on the right of the plots. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. threshold : a number, None, or 'auto' If None is given, the image is not thresholded. If a number is given, it is used to threshold the image: values below the threshold (in absolute value) are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the image. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross : boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg : boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. cmap : matplotlib colormap, optional The colormap for specified image. The ccolormap *must* be symmetrical. symmetric_cbar : boolean or 'auto', optional, default 'auto' Specifies whether the colorbar should range from -vmax to vmax or from vmin to vmax. Setting to 'auto' will select the latter if the range of the whole image is either positive or negative. Note: The colormap will always be set to range from -vmax to vmax. cbar_range : None, 2-tuple Color range of the plot. dim : float, 'auto' (by default), optional Dimming factor applied to background image. By default, automatic heuristics are applied based upon the background image intensity. Accepted float values, where a typical scan is between -2 and 2 (-2 = increase constrast; 2 = decrease contrast), but larger values can be used for a more pronounced effect. 0 means no dimming. vmax : float Upper bound for plotting, passed to matplotlib.pyplot.imshow resampling_interpolation : str Interpolation to use when resampling the image to the destination space. Can be "continuous" (default) to use 3rd-order spline interpolation, or "nearest" to use nearest-neighbor mapping. "nearest" is faster but can be noisier in some cases. Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. For visualization, non-finite values found in passed 'stat_map_img' or 'bg_img' are set to zero. See Also -------- nilearn.plotting.plot_anat : To simply plot anatomical images nilearn.plotting.plot_epi : To simply plot raw EPI images nilearn.plotting.plot_glass_brain : To plot maps in a glass brain """ # noqa: E501 # dim the background from nilearn.plotting.img_plotting import _load_anat, _plot_img_with_bg, _get_colorbar_and_data_ranges from nilearn._utils import check_niimg_3d, check_niimg_4d from nilearn._utils.niimg_conversions import _safe_get_data bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim, black_bg=black_bg) stat_map_img = check_niimg_4d(stat_map_img, dtype='auto') cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(_safe_get_data(stat_map_img, ensure_finite=True), vmax, symmetric_cbar, kwargs) if cbar_range is not None: cbar_vmin = cbar_range[0] cbar_vmax = cbar_range[1] vmin = cbar_range[0] vmax = cbar_range[1] stat_map_img_at_time_t = index_img(stat_map_img, t) stat_map_img_at_time_t = check_niimg_3d(stat_map_img_at_time_t, dtype='auto') display = _plot_img_with_bg( img=stat_map_img_at_time_t, bg_img=bg_img, cut_coords=cut_coords, output_file=output_file, display_mode=display_mode, figure=figure, axes=axes, title=title, annotate=annotate, draw_cross=draw_cross, black_bg=black_bg, threshold=threshold, bg_vmin=bg_vmin, bg_vmax=bg_vmax, cmap=cmap, vmin=vmin, vmax=vmax, colorbar=colorbar, cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax, resampling_interpolation=resampling_interpolation, **kwargs) return display
def get_params_for_grid_slice(vstc, vsrc, tstep, subjects_dir, cbar_range=None, **kwargs): """ Makes calculations that would be executed repeatedly every time a slice is computed and saves the results in a dictionary which is then read by plot_vstc_grid_slice(). Parameters: ----------- vstc : mne.VolSourceEstimate The volume source estimate. vsrc : mne.SourceSpaces The source space of the subject equivalent to the tstep : int Time step between successive samples in data. subjects_dir: Path to the subject directory. cbar_range : None, 2-tuple Color range of the plot. Returns: -------- params_plot_img_with_bg : dict Dictionary containing the parameters for plotting. """ img = vstc.as_volume(vsrc, dest='mri', mri_resolution=False) # TODO: why should vstc ever be 0? if vstc == 0: # TODO: how would _make_image work if vstc is zero anyways? if tstep is not None: img = _make_image(vstc, vsrc, tstep, dest='mri', mri_resolution=False) else: print(' Please provide the tstep value !') subject = vsrc[0]['subject_his_id'] temp_t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz') bg_img = temp_t1_fname dim = 'auto' black_bg = 'auto' vmax = None symmetric_cbar = False from nilearn.plotting.img_plotting import _load_anat, _get_colorbar_and_data_ranges from nilearn._utils import check_niimg_4d from nilearn._utils.niimg_conversions import _safe_get_data bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim, black_bg=black_bg) stat_map_img = check_niimg_4d(img, dtype='auto') cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( _safe_get_data(stat_map_img, ensure_finite=True), vmax, symmetric_cbar, kwargs) if cbar_range is not None: cbar_vmin = cbar_range[0] cbar_vmax = cbar_range[1] vmin = cbar_range[0] vmax = cbar_range[1] params_plot_img_with_bg = dict() params_plot_img_with_bg['bg_img'] = bg_img params_plot_img_with_bg['black_bg'] = black_bg params_plot_img_with_bg['bg_vmin'] = bg_vmin params_plot_img_with_bg['bg_vmax'] = bg_vmax params_plot_img_with_bg['stat_map_img'] = stat_map_img params_plot_img_with_bg['cbar_vmin'] = cbar_vmin params_plot_img_with_bg['cbar_vmax'] = cbar_vmax params_plot_img_with_bg['vmin'] = vmin params_plot_img_with_bg['vmax'] = vmax return params_plot_img_with_bg
def plot_carpet(img, mask_img=None, detrend=True, output_file=None, figure=None, axes=None, title=None): """Plot an image representation of voxel intensities across time also know as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage 2017 Jul 1; 154:150-158. Parameters ---------- img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html 4D input image mask_img : Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html Limit plotted voxels to those inside the provided mask. If not specified a new mask will be derived from data. detrend : boolean, optional Detrend and standardize the data prior to plotting. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. figure : matplotlib figure, optional Matplotlib figure used. If None is given, a new figure is created. axes : matplotlib axes, optional The axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. """ img_nii = _utils.check_niimg_4d(img, dtype='auto') img_data = _safe_get_data(img_nii, ensure_finite=True) # Define TR and number of frames tr = img_nii.header.get_zooms()[-1] ntsteps = img_nii.shape[-1] if not mask_img: nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False) nifti_masker.fit(img_nii) mask_data = nifti_masker.mask_img_.get_data().astype(bool) else: mask_nii = _utils.check_niimg_3d(mask_img, dtype='auto') mask_data = _safe_get_data(mask_nii, ensure_finite=True) data = img_data[mask_data > 0].reshape(-1, ntsteps) # Detrend data if detrend: data = clean(data.T, t_r=tr).T if not figure: if not axes: figure = plt.figure() else: figure = axes.figure if not axes: axes = figure.add_subplot(1, 1, 1) else: assert axes.figure is figure, ("The axes passed are not " "in the figure") # Avoid segmentation faults for long acquisitions by decimating the input # data long_cutoff = 800 if data.shape[1] > long_cutoff: data = data[:, ::2] else: data = data[:, :] axes.imshow(data, interpolation='nearest', aspect='auto', cmap='gray', vmin=-2, vmax=2) axes.grid(False) axes.set_yticks([]) axes.set_yticklabels([]) # Set 10 frame markers in X axis interval = max( (int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1)) xticks = list(range(0, data.shape[-1])[::interval]) axes.set_xticks(xticks) axes.set_xlabel('time (s)') axes.set_ylabel('voxels') if title: axes.set_title(title) labels = tr * (np.array(xticks)) if data.shape[1] > long_cutoff: labels *= 2 axes.set_xticklabels(['%.02f' % t for t in labels.tolist()]) # Remove and redefine spines for side in ["top", "right"]: # Toggle the spine objects axes.spines[side].set_color('none') axes.spines[side].set_visible(False) axes.yaxis.set_ticks_position('left') axes.xaxis.set_ticks_position('bottom') axes.spines["bottom"].set_position(('outward', 20)) axes.spines["left"].set_position(('outward', 20)) if output_file is not None: figure.savefig(output_file) figure.close() figure = None return figure
def plot_carpet(img, atlaslabels, detrend=True, nskip=0, size=(4000, 3000), subplot=None, title=None, output_file=None, legend=False, lut=None): """ Adapted from: https://github.com/poldracklab/niworkflows Plot an image representation of voxel intensities across time also know as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage 2017 Jul 1; 154:150-158. Parameters ---------- img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html 4D input image atlaslabels: ndarray A 3D array of integer labels from an atlas, resampled into ``img`` space. detrend : boolean, optional Detrend and standardize the data prior to plotting. nskip : int Number of volumes at the beginning of the scan marked as nonsteady state. long_cutoff : int Number of TRs to consider img too long (and decimate the time direction to save memory) axes : matplotlib axes, optional The axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. legend : bool Whether to render the average functional series with ``atlaslabels`` as overlay. """ import numpy as np import nibabel as nb import matplotlib.pyplot as plt from matplotlib import gridspec as mgs import matplotlib.cm as cm from matplotlib.colors import ListedColormap from nilearn.plotting import plot_img from nilearn.signal import clean from nilearn._utils import check_niimg_4d from nilearn._utils.niimg import _safe_get_data # actually load data img = nb.load(img) atlaslabels = nb.load(atlaslabels).get_data() img_nii = check_niimg_4d(img, dtype='auto') func_data = _safe_get_data(img_nii, ensure_finite=True) minimum = np.min(func_data) maximum = np.max(func_data) myrange = maximum - minimum # Define TR and number of frames tr = img_nii.header.get_zooms()[-1] ntsteps = func_data.shape[-1] data = func_data.reshape(-1, ntsteps)#[atlaslabels > 0].reshape(-1, ntsteps) seg = atlaslabels[atlaslabels > 0].reshape(-1) # Map segmentation if lut is None: lut = np.zeros((256,), dtype='int') #lut[1:11] = 1 #lut[255] = 2 #lut[30:99] = 3 #lut[100:201] = 4 lut[1] = 1 lut[2] = 2 lut[3] = 3 lut[4] = 4 lut[5] = 5 lut[6] = 6 lut[7] = 7 # Apply lookup table newsegm = lut[seg.astype(int)] p_dec = 1 + data.shape[0] // size[0] #if p_dec: # data = data[::p_dec, :] # newsegm = newsegm[::p_dec] #t_dec = 1 + data.shape[1] // size[1] #if t_dec: # data = data[:, ::t_dec] # Detrend data v = (None, None) if detrend: data = clean(data.T, t_r=tr).T v = (-2, 2) # Order following segmentation labels order = np.argsort(newsegm)[::-1] # If subplot is not defined if subplot is None: subplot = mgs.GridSpec(1, 1)[0] # Define nested GridSpec wratios = [1, 100, 20] gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot, width_ratios=wratios[:2 + int(legend)], wspace=0.0) mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1]) # Segmentation colorbar ax0 = plt.subplot(gs[0]) ax0.set_yticks([]) ax0.set_xticks([]) ax0.imshow(newsegm[order, np.newaxis], interpolation='none', aspect='auto', cmap=mycolors, vmin=1, vmax=4) ax0.grid(False) ax0.spines["left"].set_visible(False) ax0.spines["bottom"].set_color('none') ax0.spines["bottom"].set_visible(False) # Carpet plot ax1 = plt.subplot(gs[1]) print("*****************************************************************************************************") print(order) print(data.shape) ax1.imshow(data[order, ...], interpolation='nearest', aspect='auto', cmap='gray', vmin=v[0], vmax=v[1]) ax1.grid(False) ax1.set_yticks([]) ax1.set_yticklabels([]) ax1.annotate( 'intensity range: ' + str(myrange), xy=(0.0, 1.02), xytext=(0, 0), xycoords='axes fraction', textcoords='offset points', va='center', ha='left', color='r', size=6, bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none', 'color': 'none', 'lw': 0, 'alpha': 0.0}) # Set 10 frame markers in X axis interval = max((int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1)) xticks = list(range(0, data.shape[-1])[::interval]) ax1.set_xticks(xticks) ax1.set_xlabel('time (s)') labels = tr * (np.array(xticks)) * t_dec ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5) # Remove and redefine spines for side in ["top", "right"]: # Toggle the spine objects ax0.spines[side].set_color('none') ax0.spines[side].set_visible(False) ax1.spines[side].set_color('none') ax1.spines[side].set_visible(False) ax1.yaxis.set_ticks_position('left') ax1.xaxis.set_ticks_position('bottom') ax1.spines["bottom"].set_visible(False) ax1.spines["left"].set_color('none') ax1.spines["left"].set_visible(False) if legend: gslegend = mgs.GridSpecFromSubplotSpec( 5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0) epiavg = func_data.mean(3) epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header) segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine, epinii.header) segnii.set_data_dtype('uint8') nslices = epiavg.shape[-1] coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(np.uint8) for i, c in enumerate(coords.tolist()): ax2 = plt.subplot(gslegend[i]) plot_img(segnii, axes=ax2, display_mode='z', annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors, interpolation='nearest') if output_file is not None: figure = plt.gcf() figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file return [ax0, ax1], gs
import os import numpy as np import nibabel from decereb.chain import SimpleChain, Data from decereb.estimators.searchlight import SearchLight from nilearn import datasets from nilearn.image import index_img from nilearn._utils import check_niimg_4d from sklearn.cross_validation import KFold haxby_dataset = datasets.fetch_haxby_simple() conditions = np.recfromtxt(haxby_dataset.conditions_target)['f0'] condition_mask = np.logical_or(conditions == beta'face', conditions == beta'house') labels = conditions[condition_mask] fmri_img = nibabel.load(haxby_dataset.func) fmri_img = index_img(fmri_img, condition_mask) fmri_img = [img for img in check_niimg_4d(fmri_img, return_iterator=True)] mask_img = nibabel.load(haxby_dataset.mask) cv = KFold(len(labels), n_folds=4) clf_args = dict(mask_img=mask_img, process_mask_img=mask_img, cv=cv, radius=5.6) data = Data(data=fmri_img, labels=labels) chain = SimpleChain(clf=SearchLight, clf_args=clf_args, data=data) root = os.path.dirname(haxby_dataset['session_target']) output_path = os.path.join(root, 'searchlight.nii') chain.run(n_jobs_folds=1, verbose=3, output_path=output_path)
def plot_carpet(img, atlaslabels, detrend=True, nskip=0, size=(950, 800), subplot=None, title=None, output_file=None, legend=False, lut=None, tr=None): """ Plot an image representation of voxel intensities across time also know as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage 2017 Jul 1; 154:150-158. Parameters ---------- img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html 4D input image atlaslabels: ndarray A 3D array of integer labels from an atlas, resampled into ``img`` space. detrend : boolean, optional Detrend and standardize the data prior to plotting. nskip : int Number of volumes at the beginning of the scan marked as nonsteady state. long_cutoff : int Number of TRs to consider img too long (and decimate the time direction to save memory) axes : matplotlib axes, optional The axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. legend : bool Whether to render the average functional series with ``atlaslabels`` as overlay. tr : float , optional Specify the TR, if specified it uses this value. If left as None, # Frames is plotted instead of time. """ # Define TR and number of frames notr = False if tr is None: notr = True tr = 1. img_nii = check_niimg_4d( img, dtype='auto', ) func_data = _safe_get_data(img_nii, ensure_finite=True) ntsteps = func_data.shape[-1] data = func_data[atlaslabels > 0].reshape(-1, ntsteps) seg = atlaslabels[atlaslabels > 0].reshape(-1) # Map segmentation if lut is None: lut = np.zeros((256, ), dtype='int') lut[1:11] = 1 lut[255] = 2 lut[30:99] = 3 lut[100:201] = 4 # Apply lookup table newsegm = lut[seg.astype(int)] p_dec = 1 + data.shape[0] // size[0] if p_dec: data = data[::p_dec, :] newsegm = newsegm[::p_dec] t_dec = 1 + data.shape[1] // size[1] if t_dec: data = data[:, ::t_dec] # Detrend data v = (None, None) if detrend: data = clean(data.T, t_r=tr).T v = (-2, 2) # Order following segmentation labels order = np.argsort(newsegm)[::-1] # If subplot is not defined if subplot is None: subplot = mgs.GridSpec(1, 1)[0] # Define nested GridSpec wratios = [1, 100, 20] gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot, width_ratios=wratios[:2 + int(legend)], wspace=0.0) mycolors = ListedColormap(cm.get_cmap('tab10').colors[:4][::-1]) # Segmentation colorbar ax0 = plt.subplot(gs[0]) ax0.set_yticks([]) ax0.set_xticks([]) ax0.imshow(newsegm[order, np.newaxis], interpolation='none', aspect='auto', cmap=mycolors, vmin=1, vmax=4) ax0.grid(False) ax0.spines["left"].set_visible(False) ax0.spines["bottom"].set_color('none') ax0.spines["bottom"].set_visible(False) # Carpet plot ax1 = plt.subplot(gs[1]) ax1.imshow(data[order, ...], interpolation='nearest', aspect='auto', cmap='gray', vmin=v[0], vmax=v[1]) ax1.grid(False) ax1.set_yticks([]) ax1.set_yticklabels([]) # Set 10 frame markers in X axis interval = max( (int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1)) xticks = list(range(0, data.shape[-1])[::interval]) ax1.set_xticks(xticks) if notr: ax1.set_xlabel('time (frame #)') else: ax1.set_xlabel('time (s)') labels = tr * (np.array(xticks)) * t_dec ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=5) # Remove and redefine spines for side in ["top", "right"]: # Toggle the spine objects ax0.spines[side].set_color('none') ax0.spines[side].set_visible(False) ax1.spines[side].set_color('none') ax1.spines[side].set_visible(False) ax1.yaxis.set_ticks_position('left') ax1.xaxis.set_ticks_position('bottom') ax1.spines["bottom"].set_visible(False) ax1.spines["left"].set_color('none') ax1.spines["left"].set_visible(False) if legend: gslegend = mgs.GridSpecFromSubplotSpec(5, 1, subplot_spec=gs[2], wspace=0.0, hspace=0.0) epiavg = func_data.mean(3) epinii = nb.Nifti1Image(epiavg, img_nii.affine, img_nii.header) segnii = nb.Nifti1Image(lut[atlaslabels.astype(int)], epinii.affine, epinii.header) segnii.set_data_dtype('uint8') nslices = epiavg.shape[-1] coords = np.linspace(int(0.10 * nslices), int(0.95 * nslices), 5).astype(np.uint8) for i, c in enumerate(coords.tolist()): ax2 = plt.subplot(gslegend[i]) plot_img(segnii, bg_img=epinii, axes=ax2, display_mode='z', annotate=False, cut_coords=[c], threshold=0.1, cmap=mycolors, interpolation='nearest') if output_file is not None: figure = plt.gcf() figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file return [ax0, ax1], gs
def voxelwise_mean(img: niimg_like) -> nib.nifti1.Nifti1Image: """Compute the voxelwise mean of a Nifti 4D file. This is the numerator of the first term of the SFS expression.""" img = check_niimg_4d(img) # Application is idempotent. return image.mean_img(img)