def fit(self, run_imgs, events=None, confounds=None, design_matrices=None): """ Fit the GLM For each run: 1. create design matrix X 2. do a masker job: fMRI_data -> Y 3. fit regression to (Y, X) Parameters ---------- run_imgs: Niimg-like object or list of Niimg-like objects, See http://nilearn.github.io/manipulating_images/input_output.html#inputing-data-file-names-or-image-objects # noqa:E501 Data on which the GLM will be fitted. If this is a list, the affine is considered the same for all. events: pandas Dataframe or string or list of pandas DataFrames or strings fMRI events used to build design matrices. One events object expected per run_img. Ignored in case designs is not None. If string, then a path to a csv file is expected. confounds: pandas Dataframe, numpy array or string or list of pandas DataFrames, numpy arays or strings Each column in a DataFrame corresponds to a confound variable to be included in the regression model of the respective run_img. The number of rows must match the number of volumes in the respective run_img. Ignored in case designs is not None. If string, then a path to a csv file is expected. design_matrices: pandas DataFrame or list of pandas DataFrames, Design matrices that will be used to fit the GLM. If given it takes precedence over events and confounds. """ # Local import to prevent circular imports from nilearn.input_data import NiftiMasker # noqa # Check arguments # Check imgs type if events is not None: _check_events_file_uses_tab_separators(events_files=events) if not isinstance(run_imgs, (list, tuple)): run_imgs = [run_imgs] if design_matrices is None: if events is None: raise ValueError('events or design matrices must be provided') if self.t_r is None: raise ValueError('t_r not given to FirstLevelModel object' ' to compute design from events') else: design_matrices = _check_run_tables(run_imgs, design_matrices, 'design_matrices') # Check that number of events and confound files match number of runs # Also check that events and confound files can be loaded as DataFrame if events is not None: events = _check_run_tables(run_imgs, events, 'events') if confounds is not None: confounds = _check_run_tables(run_imgs, confounds, 'confounds') # Learn the mask if self.mask_img is False: # We create a dummy mask to preserve functionality of api ref_img = check_niimg(run_imgs[0]) self.mask_img = Nifti1Image(np.ones(ref_img.shape[:3]), ref_img.affine) if not isinstance(self.mask_img, NiftiMasker): self.masker_ = NiftiMasker(mask_img=self.mask_img, smoothing_fwhm=self.smoothing_fwhm, target_affine=self.target_affine, standardize=self.standardize, mask_strategy='epi', t_r=self.t_r, memory=self.memory, verbose=max(0, self.verbose - 2), target_shape=self.target_shape, memory_level=self.memory_level ) self.masker_.fit(run_imgs[0]) else: if self.mask_img.mask_img_ is None and self.masker_ is None: self.masker_ = clone(self.mask_img) for param_name in ['target_affine', 'target_shape', 'smoothing_fwhm', 't_r', 'memory', 'memory_level']: our_param = getattr(self, param_name) if our_param is None: continue if getattr(self.masker_, param_name) is not None: warn('Parameter %s of the masker' ' overriden' % param_name) setattr(self.masker_, param_name, our_param) self.masker_.fit(run_imgs[0]) else: self.masker_ = self.mask_img # For each run fit the model and keep only the regression results. self.labels_, self.results_, self.design_matrices_ = [], [], [] n_runs = len(run_imgs) t0 = time.time() for run_idx, run_img in enumerate(run_imgs): # Report progress if self.verbose > 0: percent = float(run_idx) / n_runs percent = round(percent * 100, 2) dt = time.time() - t0 # We use a max to avoid a division by zero if run_idx == 0: remaining = 'go take a coffee, a big one' else: remaining = (100. - percent) / max(0.01, percent) * dt remaining = '%i seconds remaining' % remaining sys.stderr.write( "Computing run %d out of %d runs (%s)\n" % (run_idx + 1, n_runs, remaining)) # Build the experimental design for the glm run_img = check_niimg(run_img, ensure_ndim=4) if design_matrices is None: n_scans = get_data(run_img).shape[3] if confounds is not None: confounds_matrix = confounds[run_idx].values if confounds_matrix.shape[0] != n_scans: raise ValueError('Rows in confounds does not match' 'n_scans in run_img at index %d' % (run_idx,)) confounds_names = confounds[run_idx].columns.tolist() else: confounds_matrix = None confounds_names = None start_time = self.slice_time_ref * self.t_r end_time = (n_scans - 1 + self.slice_time_ref) * self.t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_first_level_design_matrix(frame_times, events[run_idx], self.hrf_model, self.drift_model, self.high_pass, self.drift_order, self.fir_delays, confounds_matrix, confounds_names, self.min_onset ) else: design = design_matrices[run_idx] self.design_matrices_.append(design) # Mask and prepare data for GLM if self.verbose > 1: t_masking = time.time() sys.stderr.write('Starting masker computation \r') Y = self.masker_.transform(run_img) del run_img # Delete unmasked image to save memory if self.verbose > 1: t_masking = time.time() - t_masking sys.stderr.write('Masker took %d seconds \n' % t_masking) if self.signal_scaling: Y, _ = mean_scaling(Y, self.scaling_axis) if self.memory: mem_glm = self.memory.cache(run_glm, ignore=['n_jobs']) else: mem_glm = run_glm # compute GLM if self.verbose > 1: t_glm = time.time() sys.stderr.write('Performing GLM computation\r') labels, results = mem_glm(Y, design.values, noise_model=self.noise_model, bins=100, n_jobs=self.n_jobs) if self.verbose > 1: t_glm = time.time() - t_glm sys.stderr.write('GLM took %d seconds \n' % t_glm) self.labels_.append(labels) # We save memory if inspecting model details is not necessary if self.minimize_memory: for key in results: results[key] = SimpleRegressionResults(results[key]) self.results_.append(results) del Y # Report progress if self.verbose > 0: sys.stderr.write("\nComputation of %d runs done in %i seconds\n\n" % (n_runs, time.time() - t0)) return self
def compute_contrast(self, second_level_contrast=None, first_level_contrast=None, second_level_stat_type=None, output_type='z_score'): """Generate different outputs corresponding to the contrasts provided e.g. z_map, t_map, effects and variance. Parameters ---------- second_level_contrast : str or array of shape (n_col), optional Where ``n_col`` is the number of columns of the design matrix. The string can be a formula compatible with `pandas.DataFrame.eval`. Basically one can use the name of the conditions as they appear in the design matrix of the fitted model combined with operators +- and combined with numbers with operators +-`*`/. The default (None) is accepted if the design matrix has a single column, in which case the only possible contrast array((1)) is applied; when the design matrix has multiple columns, an error is raised. first_level_contrast : str or array of shape (n_col) with respect to FirstLevelModel, optional In case a list of FirstLevelModel was provided as second_level_input, we have to provide a contrast to apply to the first level models to get the corresponding list of images desired, that would be tested at the second level. In case a pandas DataFrame was provided as second_level_input this is the map name to extract from the pandas dataframe map_name column. It has to be a 't' contrast. second_level_stat_type : {'t', 'F'}, optional Type of the second level contrast output_type : str, optional Type of the output map. Can be 'z_score', 'stat', 'p_value', 'effect_size', 'effect_variance' or 'all'. Default='z-score'. Returns ------- output_image : Nifti1Image The desired output image(s). If ``output_type == 'all'``, then the output is a dictionary of images, keyed by the type of image. """ if self.second_level_input_ is None: raise ValueError('The model has not been fit yet') # check first_level_contrast _check_first_level_contrast(self.second_level_input_, first_level_contrast) # check contrast and obtain con_val con_val = _get_con_val(second_level_contrast, self.design_matrix_) # check output type # 'all' is assumed to be the final entry; # if adding more, place before 'all' valid_types = [ 'z_score', 'stat', 'p_value', 'effect_size', 'effect_variance', 'all' ] _check_output_type(output_type, valid_types) # Get effect_maps appropriate for chosen contrast effect_maps = _infer_effect_maps(self.second_level_input_, first_level_contrast) # Check design matrix X and effect maps Y agree on number of rows _check_effect_maps(effect_maps, self.design_matrix_) # Fit an Ordinary Least Squares regression for parametric statistics Y = self.masker_.transform(effect_maps) if self.memory: mem_glm = self.memory.cache(run_glm, ignore=['n_jobs']) else: mem_glm = run_glm labels, results = mem_glm(Y, self.design_matrix_.values, n_jobs=self.n_jobs, noise_model='ols') # We save memory if inspecting model details is not necessary if self.minimize_memory: for key in results: results[key] = SimpleRegressionResults(results[key]) self.labels_ = labels self.results_ = results # We compute contrast object if self.memory: mem_contrast = self.memory.cache(compute_contrast) else: mem_contrast = compute_contrast contrast = mem_contrast(self.labels_, self.results_, con_val, second_level_stat_type) output_types = \ valid_types[:-1] if output_type == 'all' else [output_type] outputs = {} for output_type_ in output_types: # We get desired output from contrast object estimate_ = getattr(contrast, output_type_)() # Prepare the returned images output = self.masker_.inverse_transform(estimate_) contrast_name = str(con_val) output.header['descrip'] = ('%s of contrast %s' % (output_type, contrast_name)) outputs[output_type_] = output return outputs if output_type == 'all' else output