############################################################################### # One plot of each component for i, cur_img in enumerate(iter_img(dictlearning_components_img)): plot_stat_map(cur_img, display_mode="z", title="Comp %d" % i, cut_coords=1, colorbar=False) ############################################################################### # Estimate explained variance per component and plot using matplotlib # # The fitted object `dict_learning` can be used to calculate the score per component scores = dict_learning.score(func_filenames, per_component=True) # Plot the scores import numpy as np from matplotlib import pyplot as plt from matplotlib.ticker import FormatStrFormatter plt.figure(figsize=(4, 4)) positions = np.arange(len(scores)) plt.barh(positions, scores) plt.ylabel('Component #', size=12) plt.xlabel('Explained variance', size=12) plt.yticks(np.arange(20)) plt.gca().xaxis.set_major_formatter(FormatStrFormatter('%.3f')) plt.tight_layout()
class CanICAInterface(BaseInterface): """ Nipype Interface to NiLearn methods to perform Canonical Independent Component Analysis. For more information look at: nilearn.decomposition.CanICA """ input_spec = CanICAInputSpec output_spec = CanICAOutputSpec def _run_interface(self, runtime): algorithm = get_trait_value(self.inputs, 'algorithm', default='canica') mask = get_trait_value(self.inputs, 'mask') n_components = get_trait_value(self.inputs, 'n_components') do_cca = get_trait_value(self.inputs, 'do_cca') smoothing_fwhm = get_trait_value(self.inputs, 'smoothing_fwhm', default=None) standardize = get_trait_value(self.inputs, 'standardize', default=None) threshold = get_trait_value(self.inputs, 'threshold', default=None) random_state = get_trait_value(self.inputs, 'random_state', default=None) n_init = get_trait_value(self.inputs, 'n_init') n_jobs = get_trait_value(self.inputs, 'n_jobs') n_epochs = get_trait_value(self.inputs, 'n_epochs') alpha = get_trait_value(self.inputs, 'alpha') memory = get_trait_value(self.inputs, 'memory') memory_level = get_trait_value(self.inputs, 'memory_level') confounds = get_trait_value(self.inputs, 'confounds') # init the estimator if algorithm == 'canica': self._estimator = CanICA( mask=mask, n_components=n_components, threshold=threshold, random_state=random_state, standardize=standardize, smoothing_fwhm=smoothing_fwhm, do_cca=do_cca, verbose=1, n_init=n_init, memory=memory, memory_level=memory_level, n_jobs=n_jobs, ) elif algorithm == 'dictlearning': self._estimator = DictLearning( mask=mask, n_components=n_components, random_state=random_state, standardize=standardize, smoothing_fwhm=smoothing_fwhm, verbose=1, n_epochs=n_epochs, alpha=alpha, memory=memory, memory_level=memory_level, n_jobs=n_jobs, ) # set output file names self._estimator_name = algorithm self._confounds = confounds self._reconstructed_img_file = '{}_resting_state.nii.gz'.format( self._estimator_name) self._score_file = '{}_score.txt'.format(self._estimator_name) self._loading_file = '{}_{}_loading.txt' # fit and transform self._estimator.fit(self.inputs.in_files, confounds=self._confounds) self._score = self._estimator.score(self.inputs.in_files, confounds=self._confounds) self._loadings = self._estimator.transform(self.inputs.in_files, confounds=self._confounds) return runtime def _list_outputs(self): outputs = self.output_spec().get() masker = self._estimator.masker_ # Drop output maps to a Nifti file components_img = masker.inverse_transform(self._estimator.components_) components_img.to_filename(self._reconstructed_img_file) # save the score array if isinstance(self._score, float): with open(self._score_file, 'w') as f: f.write(str("%.10f" % self._score)) else: np.savetxt(self._score_file, self._score, fmt='%.10f') # save the loadings files self._loading_files = [] for idx, loadings in enumerate(self._loadings): loading_file = self._loading_file.format(self._estimator_name, idx) np.savetxt(loading_file, loadings, fmt='%.10f') self._loading_files.append(loading_file) outputs['components'] = op.abspath(self._reconstructed_img_file) outputs['score'] = op.abspath(self._score_file) outputs['loadings'] = [op.abspath(lf) for lf in self._loading_files] return outputs
class CanICAInterface(BaseInterface): """ Nipype Interface to NiLearn methods to perform Canonical Independent Component Analysis. For more information look at: nilearn.decomposition.CanICA """ input_spec = CanICAInputSpec output_spec = CanICAOutputSpec def _run_interface(self, runtime): algorithm = get_trait_value(self.inputs, 'algorithm', default='canica') mask = get_trait_value(self.inputs, 'mask') n_components = get_trait_value(self.inputs, 'n_components') do_cca = get_trait_value(self.inputs, 'do_cca') smoothing_fwhm = get_trait_value(self.inputs, 'smoothing_fwhm', default=None) standardize = get_trait_value(self.inputs, 'standardize', default=None) threshold = get_trait_value(self.inputs, 'threshold', default=None) random_state = get_trait_value(self.inputs, 'random_state', default=None) n_init = get_trait_value(self.inputs, 'n_init') n_jobs = get_trait_value(self.inputs, 'n_jobs') n_epochs = get_trait_value(self.inputs, 'n_epochs') alpha = get_trait_value(self.inputs, 'alpha') memory = get_trait_value(self.inputs, 'memory') memory_level = get_trait_value(self.inputs, 'memory_level') confounds = get_trait_value(self.inputs, 'confounds') # init the estimator if algorithm == 'canica': self._estimator = CanICA(mask=mask, n_components=n_components, threshold=threshold, random_state=random_state, standardize=standardize, smoothing_fwhm=smoothing_fwhm, do_cca=do_cca, verbose=1, n_init=n_init, memory=memory, memory_level=memory_level, n_jobs=n_jobs, ) elif algorithm == 'dictlearning': self._estimator = DictLearning(mask=mask, n_components=n_components, random_state=random_state, standardize=standardize, smoothing_fwhm=smoothing_fwhm, verbose=1, n_epochs=n_epochs, alpha=alpha, memory=memory, memory_level=memory_level, n_jobs=n_jobs, ) # set output file names self._estimator_name = algorithm self._confounds = confounds self._reconstructed_img_file = '{}_resting_state.nii.gz'.format(self._estimator_name) self._score_file = '{}_score.txt'.format(self._estimator_name) self._loading_file = '{}_{}_loading.txt' # fit and transform self._estimator.fit(self.inputs.in_files, confounds=self._confounds) self._score = self._estimator.score (self.inputs.in_files, confounds=self._confounds) self._loadings = self._estimator.transform(self.inputs.in_files, confounds=self._confounds) return runtime def _list_outputs(self): outputs = self.output_spec().get() masker = self._estimator.masker_ # Drop output maps to a Nifti file components_img = masker.inverse_transform(self._estimator.components_) components_img.to_filename(self._reconstructed_img_file) # save the score array if isinstance(self._score, float): with open(self._score_file, 'w') as f: f.write(str("%.10f" % self._score)) else: np.savetxt(self._score_file, self._score, fmt='%.10f') # save the loadings files self._loading_files = [] for idx, loadings in enumerate(self._loadings): loading_file = self._loading_file.format(self._estimator_name, idx) np.savetxt(loading_file, loadings, fmt='%.10f') self._loading_files.append(loading_file) outputs['components'] = op.abspath(self._reconstructed_img_file) outputs['score'] = op.abspath(self._score_file) outputs['loadings'] = [op.abspath(lf) for lf in self._loading_files] return outputs