def _run_interface(self, runtime): model = level2.SecondLevelModel() files = [] # Super inefficient... think more about this later for idx in self.inputs.contrast_indices: for fname, metadata in zip(_flatten(self.inputs.stat_files), _flatten(self.inputs.stat_metadata)): if _match(idx, metadata): files.append(fname) break else: raise ValueError out_ents = reduce(dict_intersection, self.inputs.contrast_indices) in_ents = [{ key: val for key, val in index.items() if key not in out_ents } for index in self.inputs.contrast_indices] contrast_spec = pd.read_hdf(self.inputs.contrast_info, key='contrasts') contrast_matrix = contrast_spec.drop(columns=['type']).T contrast_types = contrast_spec['type'] contrast_matrix.index = [ '_'.join('{}-{}'.format(key, val) for key, val in ents.items()) for ents in in_ents ] contrast_matrix.to_csv('contrasts.tsv', sep='\t') self._results['contrast_matrix'] = os.path.join( runtime.cwd, 'contrasts.tsv') out_ents['type'] = 'stat' contrast_maps = [] contrast_metadata = [] stat_fmt = os.path.join(runtime.cwd, '{}.nii.gz').format for contrast, ctype in zip(contrast_matrix, contrast_types): intercept = contrast_matrix[contrast] data = np.array(files)[intercept != 0].tolist() intercept = intercept[intercept != 0] model.fit(data, design_matrix=pd.DataFrame({'intercept': intercept})) stat_type = {'T': 't', 'F': 'F'}[ctype] stat = model.compute_contrast(second_level_stat_type=stat_type) stat_fname = stat_fmt(contrast) stat.to_filename(stat_fname) contrast_maps.append(stat_fname) metadata = out_ents.copy() metadata['contrast'] = contrast contrast_metadata.append(metadata) self._results['contrast_maps'] = contrast_maps self._results['contrast_metadata'] = contrast_metadata return runtime
def _run_interface(self, runtime): model = level2.SecondLevelModel() contrast_maps = [] contrast_metadata = [] entities = self.inputs.contrast_info[0]['entities'] # Same for all out_ents = {'suffix': 'stat', **entities} # Only keep files which match all entities for contrast stat_metadata = _flatten(self.inputs.stat_metadata) stat_files = _flatten(self.inputs.stat_files) filtered_files = [] names = [] for m, f in zip(stat_metadata, stat_files): if _match(entities, m): filtered_files.append(f) names.append(m['contrast']) for name, weights, type in prepare_contrasts(self.inputs.contrast_info, names): # Need to add F-test support for intercept (more than one column) # Currently only taking 0th column as intercept (t-test) weights = weights[0] input = (np.array(filtered_files)[weights != 0]).tolist() design_matrix = pd.DataFrame({'intercept': weights[weights != 0]}) model.fit(input, design_matrix=design_matrix) stat = model.compute_contrast(second_level_stat_type=type) stat_fname = os.path.join(runtime.cwd, '{}.nii.gz').format(name) stat.to_filename(stat_fname) contrast_maps.append(stat_fname) contrast_metadata.append({'contrast': name, **out_ents}) self._results['contrast_maps'] = contrast_maps self._results['contrast_metadata'] = contrast_metadata return runtime
def second_level(analysis, block, space, deriv_dir): fl_layout = grabbids.BIDSLayout( deriv_dir, config=['bids', 'derivatives', pkgr.resource_filename('fitlins', 'data/fitlins.json')]) fl_layout.path_patterns[:0] = PATH_PATTERNS analyses = [] # pybids likes to give us a lot of extraneous columns cnames = [contrast['name'] for contrast in block.contrasts] + block.model['variables'] fmri_glm = level2.SecondLevelModel() for contrasts, idx, ents in block.get_contrasts(names=cnames): if contrasts.empty: continue data = [] for in_name, sub_ents in zip(contrasts.index, idx.to_dict(orient='record')): # The underlying contrast name might have been added to by a transform for option in [in_name] + in_name.split('.'): files = fl_layout.get(contrast=snake_to_camel(option), type='stat', space=space, **sub_ents) if files: data.append(files[0].filename) break else: raise ValueError("Unknown input: {}".format(in_name)) out_ents = reduce(dict_intersection, map(fl_layout.parse_file_entities, data)) contrasts_ents = out_ents.copy() contrasts_ents['type'] = 'contrasts' contrasts_ents.pop('contrast', None) contrasts_ents.pop('space', None) contrasts_fname = op.join( deriv_dir, fl_layout.build_path(contrasts_ents, strict=True)) # Make parent results directory os.makedirs(os.path.dirname(contrasts_fname), exist_ok=True) plot_and_save(contrasts_fname, plot_contrast_matrix, contrasts, ornt='horizontal') job_desc = { 'ents': out_ents, 'subject_id': ents.get('subject'), 'dataset': analysis.layout.root, 'model_name': analysis.model['name'], 'contrasts_svg': contrasts_fname, } for contrast in contrasts: out_ents['contrast'] = snake_to_camel(contrast) stat_fname = op.join(deriv_dir, fl_layout.build_path(out_ents, strict=True)) ortho_ents = out_ents.copy() ortho_ents['type'] = 'ortho' ortho_fname = op.join(deriv_dir, analysis.layout.build_path(ortho_ents, strict=True)) desc = {'name': contrast, 'image_file': ortho_fname} job_desc.setdefault('contrasts', []).append(desc) if op.exists(stat_fname): continue cols = {'intercept': np.ones(len(data))} cname = 'intercept' if not np.allclose(contrasts[contrast], 1): cname = contrast cols[contrast] = contrasts[contrast] paradigm = pd.DataFrame(cols) fmri_glm.fit(data, design_matrix=paradigm) stat_type = [c['type'] for c in block.contrasts if c['name'] == contrast] or ['T'] stat_type = stat_type[0] stat = fmri_glm.compute_contrast( cname, second_level_stat_type={'T': 't', 'F': 'F'}[stat_type], ) data = stat.get_data() masked_vals = data[data != 0] if np.isnan(masked_vals).all(): raise ValueError("nistats was unable to perform this contrast") stat.to_filename(stat_fname) nlp.plot_glass_brain(stat, colorbar=True, threshold=sps.norm.isf(0.001), plot_abs=False, display_mode='lyrz', output_file=ortho_fname) analyses.append(job_desc) return analyses
def _run_interface(self, runtime): from nistats import second_level_model as level2 smoothing_fwhm = self.inputs.smoothing_fwhm if not isdefined(smoothing_fwhm): smoothing_fwhm = None model = level2.SecondLevelModel(smoothing_fwhm=smoothing_fwhm) effect_maps = [] variance_maps = [] stat_maps = [] zscore_maps = [] pvalue_maps = [] contrast_metadata = [] out_ents = self.inputs.contrast_info[0]['entities'] # Same for all fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format # Only keep files which match all entities for contrast stat_metadata = _flatten(self.inputs.stat_metadata) input_effects = _flatten(self.inputs.effect_maps) filtered_effects = [] names = [] for m, eff in zip(stat_metadata, input_effects): if _match(out_ents, m): filtered_effects.append(eff) names.append(m['contrast']) # Dummy code contrast of input effects design_matrix = pd.get_dummies(names) # Fit single model for all inputs model.fit(filtered_effects, design_matrix=design_matrix) for name, weights, contrast_type in prepare_contrasts( self.inputs.contrast_info, design_matrix.columns.to_list()): contrast_metadata.append({ 'contrast': name, 'stat': contrast_type, **out_ents }) maps = model.compute_contrast(second_level_contrast=weights, second_level_stat_type=contrast_type, output_type='all') for map_type, map_list in (('effect_size', effect_maps), ('effect_variance', variance_maps), ('z_score', zscore_maps), ('p_value', pvalue_maps), ('stat', stat_maps)): fname = fname_fmt(name, map_type) maps[map_type].to_filename(fname) map_list.append(fname) self._results['effect_maps'] = effect_maps self._results['variance_maps'] = variance_maps self._results['stat_maps'] = stat_maps self._results['zscore_maps'] = zscore_maps self._results['pvalue_maps'] = pvalue_maps self._results['contrast_metadata'] = contrast_metadata return runtime
def _run_interface(self, runtime): import nibabel as nb from nistats import second_level_model as level2 from nistats import first_level_model as level1 from nistats.contrasts import (compute_contrast, compute_fixed_effects, _compute_fixed_effects_params) smoothing_fwhm = self.inputs.smoothing_fwhm if not isdefined(smoothing_fwhm): smoothing_fwhm = None effect_maps = [] variance_maps = [] stat_maps = [] zscore_maps = [] pvalue_maps = [] contrast_metadata = [] out_ents = self.inputs.contrast_info[0]['entities'] # Same for all # Only keep files which match all entities for contrast stat_metadata = _flatten(self.inputs.stat_metadata) input_effects = _flatten(self.inputs.effect_maps) input_variances = _flatten(self.inputs.variance_maps) filtered_effects = [] filtered_variances = [] names = [] for m, eff, var in zip(stat_metadata, input_effects, input_variances): if _match(out_ents, m): filtered_effects.append(eff) filtered_variances.append(var) names.append(m['contrast']) mat = pd.get_dummies(names) contrasts = prepare_contrasts(self.inputs.contrast_info, mat.columns) is_cifti = filtered_effects[0].endswith('dscalar.nii') if is_cifti: fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format else: fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format # Only fit model if any non-FEMA contrasts at this level if any(c[2] != 'FEMA' for c in contrasts): if len(filtered_effects) < 2: raise RuntimeError( "At least two inputs are required for a 't' for 'F' " "second level contrast") if is_cifti: effect_data = np.squeeze([ nb.load(effect).get_fdata(dtype='f4') for effect in filtered_effects ]) labels, estimates = level1.run_glm(effect_data, mat.values, noise_model='ols') else: model = level2.SecondLevelModel(smoothing_fwhm=smoothing_fwhm) model.fit(filtered_effects, design_matrix=mat) for name, weights, contrast_type in contrasts: contrast_metadata.append({ 'contrast': name, 'stat': contrast_type, **out_ents }) # Pass-through happens automatically as it can handle 1 input if contrast_type == 'FEMA': # Index design identity matrix on non-zero contrasts weights con_ix = weights[0].astype(bool) # Index of all input files "involved" with that contrast dm_ix = mat.iloc[:, con_ix].any(axis=1) contrast_imgs = np.array(filtered_effects)[dm_ix] variance_imgs = np.array(filtered_variances)[dm_ix] if is_cifti: ffx_cont, ffx_var, ffx_t = _compute_fixed_effects_params( np.squeeze([ nb.load(fname).get_fdata(dtype='f4') for fname in contrast_imgs ]), np.squeeze([ nb.load(fname).get_fdata(dtype='f4') for fname in variance_imgs ]), precision_weighted=False) img = nb.load(filtered_effects[0]) maps = { 'effect_size': dscalar_from_cifti(img, ffx_cont, "effect_size"), 'effect_variance': dscalar_from_cifti(img, ffx_var, "effect_variance"), 'stat': dscalar_from_cifti(img, ffx_t, "stat") } else: ffx_res = compute_fixed_effects(contrast_imgs, variance_imgs) maps = { 'effect_size': ffx_res[0], 'effect_variance': ffx_res[1], 'stat': ffx_res[2] } else: if is_cifti: contrast = compute_contrast(labels, estimates, weights, contrast_type=contrast_type) img = nb.load(filtered_effects[0]) maps = { map_type: dscalar_from_cifti(img, getattr(contrast, map_type)(), map_type) for map_type in [ 'z_score', 'stat', 'p_value', 'effect_size', 'effect_variance' ] } else: maps = model.compute_contrast( second_level_contrast=weights, second_level_stat_type=contrast_type, output_type='all') for map_type, map_list in (('effect_size', effect_maps), ('effect_variance', variance_maps), ('z_score', zscore_maps), ('p_value', pvalue_maps), ('stat', stat_maps)): if map_type in maps: fname = fname_fmt(name, map_type) maps[map_type].to_filename(fname) map_list.append(fname) self._results['effect_maps'] = effect_maps self._results['variance_maps'] = variance_maps self._results['stat_maps'] = stat_maps self._results['contrast_metadata'] = contrast_metadata # These are "optional" as fixed effects do not support these if zscore_maps: self._results['zscore_maps'] = zscore_maps if pvalue_maps: self._results['pvalue_maps'] = pvalue_maps return runtime
def _run_interface(self, runtime): from nistats import second_level_model as level2 smoothing_fwhm = self.inputs.smoothing_fwhm if not isdefined(smoothing_fwhm): smoothing_fwhm = None model = level2.SecondLevelModel(smoothing_fwhm=smoothing_fwhm) effect_maps = [] variance_maps = [] stat_maps = [] zscore_maps = [] pvalue_maps = [] contrast_metadata = [] out_ents = self.inputs.contrast_info[0]['entities'] # Same for all fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format # Only keep files which match all entities for contrast stat_metadata = _flatten(self.inputs.stat_metadata) input_effects = _flatten(self.inputs.effect_maps) # XXX nistats should begin supporting mixed effects models soon # input_variances = _flatten(self.inputs.variance_maps) input_variances = [None] * len(input_effects) filtered_effects = [] filtered_variances = [] names = [] for m, eff, var in zip(stat_metadata, input_effects, input_variances): if _match(out_ents, m): filtered_effects.append(eff) filtered_variances.append(var) names.append(m['contrast']) for name, weights, contrast_type in prepare_contrasts( self.inputs.contrast_info, names): # Need to add F-test support for intercept (more than one column) # Currently only taking 0th column as intercept (t-test) weights = weights[0] effects = (np.array(filtered_effects)[weights != 0]).tolist() _variances = (np.array(filtered_variances)[weights != 0]).tolist() design_matrix = pd.DataFrame({'intercept': weights[weights != 0]}) model.fit(effects, design_matrix=design_matrix) maps = model.compute_contrast(second_level_stat_type=contrast_type, output_type='all') contrast_metadata.append({ 'contrast': name, 'stat': contrast_type, **out_ents }) for map_type, map_list in (('effect_size', effect_maps), ('effect_variance', variance_maps), ('z_score', zscore_maps), ('p_value', pvalue_maps), ('stat', stat_maps)): fname = fname_fmt(name, map_type) maps[map_type].to_filename(fname) map_list.append(fname) self._results['effect_maps'] = effect_maps self._results['variance_maps'] = variance_maps self._results['stat_maps'] = stat_maps self._results['zscore_maps'] = zscore_maps self._results['pvalue_maps'] = pvalue_maps self._results['contrast_metadata'] = contrast_metadata return runtime
def _run_interface(self, runtime): from nistats import second_level_model as level2 from nistats.contrasts import compute_fixed_effects smoothing_fwhm = self.inputs.smoothing_fwhm if not isdefined(smoothing_fwhm): smoothing_fwhm = None effect_maps = [] variance_maps = [] stat_maps = [] zscore_maps = [] pvalue_maps = [] contrast_metadata = [] out_ents = self.inputs.contrast_info[0]['entities'] # Same for all fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format # Only keep files which match all entities for contrast stat_metadata = _flatten(self.inputs.stat_metadata) input_effects = _flatten(self.inputs.effect_maps) input_variances = _flatten(self.inputs.variance_maps) filtered_effects = [] filtered_variances = [] names = [] for m, eff, var in zip(stat_metadata, input_effects, input_variances): if _match(out_ents, m): filtered_effects.append(eff) filtered_variances.append(var) names.append(m['contrast']) mat = pd.get_dummies(names) contrasts = prepare_contrasts(self.inputs.contrast_info, mat.columns) # Only fit model if any non-FEMA contrasts at this level if any(c[2] != 'FEMA' for c in contrasts): if len(filtered_effects) < 2: raise RuntimeError( "At least two inputs are required for a 't' for 'F' " "second level contrast") model = level2.SecondLevelModel(smoothing_fwhm=smoothing_fwhm) model.fit(filtered_effects, design_matrix=mat) for name, weights, contrast_type in contrasts: contrast_metadata.append({ 'contrast': name, 'stat': contrast_type, **out_ents }) # Pass-through happens automatically as it can handle 1 input if contrast_type == 'FEMA': # Index design identity matrix on non-zero contrasts weights con_ix = weights[0].astype(bool) # Index of all input files "involved" with that contrast dm_ix = mat.iloc[:, con_ix].any(axis=1) ffx_res = compute_fixed_effects( np.array(filtered_effects)[dm_ix], np.array(filtered_variances)[dm_ix]) maps = { 'effect_size': ffx_res[0], 'effect_variance': ffx_res[1], 'stat': ffx_res[2] } else: maps = model.compute_contrast( second_level_contrast=weights, second_level_stat_type=contrast_type, output_type='all') for map_type, map_list in (('effect_size', effect_maps), ('effect_variance', variance_maps), ('z_score', zscore_maps), ('p_value', pvalue_maps), ('stat', stat_maps)): if map_type in maps: fname = fname_fmt(name, map_type) maps[map_type].to_filename(fname) map_list.append(fname) self._results['effect_maps'] = effect_maps self._results['variance_maps'] = variance_maps self._results['stat_maps'] = stat_maps self._results['contrast_metadata'] = contrast_metadata # These are "optional" as fixed effects do not support these if zscore_maps: self._results['zscore_maps'] = zscore_maps if pvalue_maps: self._results['pvalue_maps'] = pvalue_maps return runtime