Esempio n. 1
0
def test_t_contrast_add():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 2
0
def test_t_contrast_add():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    lab, res = run_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 3
0
def test_F_contrast_add():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)

    # first test with dependent contrast
    con1 = compute_contrast(lab, res, c1)
    con2 = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c1)
    assert_almost_equal(con1.effect * 2, con2.effect)
    assert_almost_equal(con1.variance * 2, con2.variance)
    assert_almost_equal(con1.stat() * 2, con2.stat())
Esempio n. 4
0
def test_Tcontrast():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    labels, results = run_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 5
0
def test_contrast_values():
    # but this test is circular and should be removed
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1', bins=1)
    # t test
    cval = np.eye(q)[0]
    con = compute_contrast(lab, res, cval)
    t_ref = list(res.values())[0].Tcontrast(cval).t
    assert_almost_equal(np.ravel(con.stat()), t_ref)
    # F test
    cval = np.eye(q)[:3]
    con = compute_contrast(lab, res, cval)
    F_ref = list(res.values())[0].Fcontrast(cval).F
    # Note that the values are not strictly equal,
    # this seems to be related to a bug in Mahalanobis
    assert_almost_equal(np.ravel(con.stat()), F_ref, 3)
Esempio n. 6
0
def test_Tcontrast():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    labels, results = run_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 7
0
def test_contrast_mul():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        assert_almost_equal(con1.z_score(), con2.z_score())
Esempio n. 8
0
def test_contrast_mul():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    lab, res = run_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        assert_almost_equal(con1.z_score(), con2.z_score())
Esempio n. 9
0
def test_Fcontrast():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    for model in ['ols', 'ar1']:
        labels, results = run_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(labels,
                                      results,
                                      con_val,
                                      contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 10
0
def test_Fcontrast():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    for model in ['ols', 'ar1']:
        labels, results = run_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(labels,
                                      results,
                                      con_val,
                                      contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 11
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nilearn.glm import second_level as level2
        from nilearn.glm import first_level as level1
        from nilearn.glm.contrasts import (compute_contrast,
                                           compute_fixed_effects,
                                           _compute_fixed_effects_params)

        smoothing_fwhm = self.inputs.smoothing_fwhm
        smoothing_type = self.inputs.smoothing_type
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        if isdefined(smoothing_type) and smoothing_type != 'iso':
            raise NotImplementedError(
                "Only the iso smoothing type is available for the nistats estimator."
            )
        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        out_ents = self.inputs.contrast_info[0]['entities']  # Same for all

        # Only keep files which match all entities for contrast
        stat_metadata = _flatten(self.inputs.stat_metadata)
        input_effects = _flatten(self.inputs.effect_maps)
        input_variances = _flatten(self.inputs.variance_maps)

        filtered_effects = []
        filtered_variances = []
        names = []
        for m, eff, var in zip(stat_metadata, input_effects, input_variances):
            if _match(out_ents, m):
                filtered_effects.append(eff)
                filtered_variances.append(var)
                names.append(m['contrast'])

        mat = pd.get_dummies(names)
        contrasts = prepare_contrasts(self.inputs.contrast_info, mat.columns)

        is_cifti = filtered_effects[0].endswith('dscalar.nii')
        if is_cifti:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format
        else:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format

        # Only fit model if any non-FEMA contrasts at this level
        if any(c[2] != 'FEMA' for c in contrasts):
            if len(filtered_effects) < 2:
                raise RuntimeError(
                    "At least two inputs are required for a 't' for 'F' "
                    "second level contrast")
            if is_cifti:
                effect_data = np.squeeze([
                    nb.load(effect).get_fdata(dtype='f4')
                    for effect in filtered_effects
                ])
                labels, estimates = level1.run_glm(effect_data,
                                                   mat.values,
                                                   noise_model='ols')
            else:
                model = level2.SecondLevelModel(smoothing_fwhm=smoothing_fwhm)
                model.fit(filtered_effects, design_matrix=mat)

        for name, weights, contrast_type in contrasts:
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })

            # Pass-through happens automatically as it can handle 1 input
            if contrast_type == 'FEMA':
                # Index design identity matrix on non-zero contrasts weights
                con_ix = weights[0].astype(bool)
                # Index of all input files "involved" with that contrast
                dm_ix = mat.iloc[:, con_ix].any(axis=1)

                contrast_imgs = np.array(filtered_effects)[dm_ix]
                variance_imgs = np.array(filtered_variances)[dm_ix]
                if is_cifti:
                    ffx_cont, ffx_var, ffx_t = _compute_fixed_effects_params(
                        np.squeeze([
                            nb.load(fname).get_fdata(dtype='f4')
                            for fname in contrast_imgs
                        ]),
                        np.squeeze([
                            nb.load(fname).get_fdata(dtype='f4')
                            for fname in variance_imgs
                        ]),
                        precision_weighted=False)
                    img = nb.load(filtered_effects[0])
                    maps = {
                        'effect_size':
                        dscalar_from_cifti(img, ffx_cont, "effect_size"),
                        'effect_variance':
                        dscalar_from_cifti(img, ffx_var, "effect_variance"),
                        'stat':
                        dscalar_from_cifti(img, ffx_t, "stat")
                    }

                else:
                    ffx_res = compute_fixed_effects(contrast_imgs,
                                                    variance_imgs)
                    maps = {
                        'effect_size': ffx_res[0],
                        'effect_variance': ffx_res[1],
                        'stat': ffx_res[2]
                    }
            else:
                if is_cifti:
                    contrast = compute_contrast(labels,
                                                estimates,
                                                weights,
                                                contrast_type=contrast_type)
                    img = nb.load(filtered_effects[0])
                    maps = {
                        map_type:
                        dscalar_from_cifti(img,
                                           getattr(contrast, map_type)(),
                                           map_type)
                        for map_type in [
                            'z_score', 'stat', 'p_value', 'effect_size',
                            'effect_variance'
                        ]
                    }
                else:
                    maps = model.compute_contrast(
                        second_level_contrast=weights,
                        second_level_stat_type=contrast_type,
                        output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):
                if map_type in maps:
                    fname = fname_fmt(name, map_type)
                    maps[map_type].to_filename(fname)
                    map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['contrast_metadata'] = contrast_metadata

        # These are "optional" as fixed effects do not support these
        if zscore_maps:
            self._results['zscore_maps'] = zscore_maps
        if pvalue_maps:
            self._results['pvalue_maps'] = pvalue_maps

        return runtime
Esempio n. 12
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nilearn.glm import first_level as level1
        from nilearn.glm.contrasts import compute_contrast
        mat = pd.read_csv(self.inputs.design_matrix,
                          delimiter='\t',
                          index_col=0)
        img = nb.load(self.inputs.bold_file)

        is_cifti = isinstance(img, nb.Cifti2Image)
        if isinstance(img, nb.dataobj_images.DataobjImage):
            # Ugly hack to ensure that retrieved data isn't cast to float64 unless
            # necessary to prevent an overflow
            # For NIfTI-1 files, slope and inter are 32-bit floats, so this is
            # "safe". For NIfTI-2 (including CIFTI-2), these fields are 64-bit,
            # so include a check to make sure casting doesn't lose too much.
            slope32 = np.float32(img.dataobj._slope)
            inter32 = np.float32(img.dataobj._inter)
            if max(np.abs(slope32 - img.dataobj._slope),
                   np.abs(inter32 - img.dataobj._inter)) < 1e-7:
                img.dataobj._slope = slope32
                img.dataobj._inter = inter32

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        smoothing_type = self.inputs.smoothing_type
        if isdefined(smoothing_type) and smoothing_type != 'iso':
            raise NotImplementedError(
                "Only the iso smoothing type is available for the nistats estimator."
            )
        if is_cifti:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format
            labels, estimates = level1.run_glm(img.get_fdata(dtype='f4'),
                                               mat.values)
            model_attr = {
                'r_square':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'r_square'),
                    'r_square'),
                'log_likelihood':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'logL'),
                    'log_likelihood')
            }
        else:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format
            flm = level1.FirstLevelModel(minimize_memory=False,
                                         mask_img=mask_file,
                                         smoothing_fwhm=smoothing_fwhm)
            flm.fit(img, design_matrices=mat)
            model_attr = {
                'r_square':
                flm.r_square[0],
                'log_likelihood':
                flm.masker_.inverse_transform(
                    _get_voxelwise_stat(flm.labels_[0], flm.results_[0],
                                        'logL'))
            }

        out_ents = self.inputs.contrast_info[0]['entities']

        # Save model level images

        model_maps = []
        model_metadata = []
        for attr, img in model_attr.items():
            model_metadata.append({'stat': attr, **out_ents})
            fname = fname_fmt('model', attr)
            img.to_filename(fname)
            model_maps.append(fname)

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        for name, weights, contrast_type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns):
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })
            if is_cifti:
                contrast = compute_contrast(labels,
                                            estimates,
                                            weights,
                                            contrast_type=contrast_type)
                maps = {
                    map_type: dscalar_from_cifti(img,
                                                 getattr(contrast, map_type)(),
                                                 map_type)
                    for map_type in [
                        'z_score', 'stat', 'p_value', 'effect_size',
                        'effect_variance'
                    ]
                }

            else:
                maps = flm.compute_contrast(weights,
                                            contrast_type,
                                            output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):

                fname = fname_fmt(name, map_type)
                maps[map_type].to_filename(fname)
                map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['zscore_maps'] = zscore_maps
        self._results['pvalue_maps'] = pvalue_maps
        self._results['contrast_metadata'] = contrast_metadata
        self._results['model_maps'] = model_maps
        self._results['model_metadata'] = model_metadata

        return runtime
Esempio n. 13
0
def _run_glmdenoise_model(ddict, cfg, logger):
    """ Runs a GLMdenoise-style cross-validated analysis. """
    Y_all = ddict['denoised_func'].copy()
    nonzero = ~np.all(np.isclose(Y_all, 0.), axis=0)

    # Some shortcuts
    n_runs = np.unique(ddict['run_idx']).size
    K = Y_all.shape[1]
    stype = STATS[cfg['pattern_units']]

    # Pre-allocate some stuff, separately for bootstrap data (boot) and 
    # parameteric data (param)
    conditions = ddict['preproc_events']['trial_type'].unique().tolist()
    cond_param = np.zeros((len(conditions), K))

    if cfg['contrast'] is not None:
        # ccon = custom contrast
        ccon_param = np.zeros(K)  # parametric

    # Note: opt_n_comps must be the same for each run!
    if ddict['opt_n_comps'].ndim > 1:
        raise ValueError("Cannot have run-specific n-comps when using GLMdenoise. Set --regularize-n-comps!")

    opt_n_comps = ddict['opt_n_comps']

    if cfg['hrf_model'] == 'kay':  # use optimal HRF
        if ddict['opt_hrf_idx'].sum() == 0:
            logger.warn("No HRF index data found; going to start optimization routine")
            r2_hrf = _optimize_hrf_between(ddict, cfg, logger)
            opt_hrf_idx = r2_hrf.argmax(axis=0)
            save_data(opt_hrf_idx, cfg, ddict, par_dir='best', run=None, desc='opt', dtype='hrf')
            save_data(r2_hrf, cfg, ddict, par_dir='best', run=None, desc='hrf', dtype='r2')
            save_data(r2_hrf.max(axis=0), cfg, ddict, par_dir='best', run=None, desc='max', dtype='r2')   
        else:
            opt_hrf_idx = ddict['opt_hrf_idx'].astype(int)
    else:  # use the same HRF (this is ignored)
        opt_hrf_idx = np.zeros(K)

    r2 = np.zeros(K)
    preds = np.zeros_like(Y_all)

    # Loop over HRF indices
    for hrf_idx in np.unique(opt_hrf_idx).astype(int):            
        # Loop over n-components
        for n_comp in np.unique(opt_n_comps).astype(int):
            # Determine voxel index (intersection nonzero and the voxels that 
            # were denoised with the current n_comp)
            vox_idx = opt_n_comps == n_comp
            vox_idx = np.logical_and(vox_idx, nonzero)
            vox_idx = np.logical_and(vox_idx, hrf_idx == opt_hrf_idx)

            # Gather the run-specific design matrices
            Xs = []
            for run in range(n_runs):
                tr = ddict['trs'][run]
                this_Y, confs, events = get_run_data(ddict, run, func_type='denoised')
                ft = get_frame_times(tr, ddict, cfg, this_Y)
                # Note: hrf_idx is ignored when hrf_model is not "kay"
                X = create_design_matrix(tr, ft, events, hrf_model=cfg['hrf_model'], hrf_idx=hrf_idx)
                X = X.drop('constant', axis=1)  # remove intercept

                # Orthogonalize noise components w.r.t. design matrix
                if n_comp != 0:
                    X.loc[:, :], _ = custom_clean(X, this_Y, confs[:, :n_comp], tr, ddict, cfg, clean_Y=False)
    
                X = X - X.mean(axis=0)
                Xs.append(X)

            # Concatenate design matrices
            X = pd.concat(Xs, axis=0)
            Y = Y_all[:, vox_idx]  # only current voxels

            # Get regular (parametric) scores
            labels, results = run_glm(Y, X.to_numpy(), noise_model='ols')
            r2[vox_idx] = get_param_from_glm('r_square', labels, results, X, time_series=False)
            preds[:, vox_idx] = get_param_from_glm('predicted', labels, results, X, time_series=True)

            for i, cond in enumerate(conditions):
                cvec = np.zeros(X.shape[1])
                cvec[X.columns.tolist().index(cond)] = 1
                con = compute_contrast(labels, results, con_val=cvec, contrast_type='t')
                cond_param[i, vox_idx] = getattr(con, stype)()

            if cfg['contrast'] is not None:
                cvec = expression_to_contrast_vector(cfg['contrast'], X.columns.tolist())
                con = compute_contrast(labels, results, cvec)
                ccon_param[vox_idx] = getattr(con, stype)()

    save_data(r2, cfg, ddict, par_dir='best', run=None, desc='model', dtype='r2', nii=True)
    for i, cond in enumerate(conditions):
        save_data(cond_param[i, :], cfg, ddict, par_dir='best', run=None, desc=cond, dtype=cfg['pattern_units'], nii=True)

    if cfg['contrast'] is not None:
        save_data(ccon_param, cfg, ddict, par_dir='best', run=None, desc='custom', dtype=cfg['pattern_units'], nii=True)

    for run in np.unique(ddict['run_idx']):
        save_data(preds[run == ddict['run_idx']], cfg, ddict, par_dir='best',
                  run=run+1, desc='model', dtype='predicted', nii=True)
Esempio n. 14
0
def _run_single_trial_model_parallel(run, best_hrf_idx, ddict, cfg, logger):
    """ Fits a single trial model, possibly using an optimized HRF. """
    Y, conf, events = get_run_data(ddict, run, func_type='denoised')
    K = Y.shape[1]
    tr = ddict['trs'][run]
    stype = cfg['pattern_units']  # stat type

    # ft = frame times (Nilearn lingo)
    ft = get_frame_times(tr, ddict, cfg, Y)
    nonzero = ~np.all(np.isclose(Y, 0.), axis=0)

    # Which events are single trials (st)?
    if cfg['single_trial_id'] is None:
        st_idx = np.zeros(events.shape[0]).astype(bool)
    else:
        st_idx = events['trial_type'].str.contains(cfg['single_trial_id'])
    
    # What are the names of the single-trials (st) and other conditions (cond)?
    st_names = events.loc[st_idx, 'trial_type']
    cond_names = events.loc[~st_idx, 'trial_type'].unique().tolist()

    # If we're doing a proper single-trial LSA analysis, we add an "unmodulated"
    # stimulus regressor
    if cfg['single_trial_id'] is not None:
        cond_names += ['unmodstim']

    if best_hrf_idx.ndim > 1:  # run-specific HRF
        best_hrf_idx = best_hrf_idx[run, :]

    # Pre-allocate residuals and r2
    residuals = np.zeros(Y.shape)
    r2 = np.zeros(Y.shape[1])
    n_reg = len(st_names) + len(cond_names)
    patterns = np.zeros((n_reg + 1, K))
    preds = np.zeros(Y.shape)

    if cfg['contrast'] is not None:
        # ccon = custom contrast
        ccon = np.zeros(K)

    # Save design matrix! (+1 for constant)
    varB = np.zeros((n_reg + 1, n_reg + 1, K))
                        
    # Loop over unique HRF indices (0-20 probably)
    for hrf_idx in tqdm_ctm(np.unique(best_hrf_idx), tdesc(f'Final model run {run+1}:')):
        # Create voxel mask (nonzero ^ hrf index)
        vox_idx = best_hrf_idx == hrf_idx
        vox_idx = np.logical_and(vox_idx, nonzero)

        # Get current design matrix
        X = create_design_matrix(tr, ft, events, hrf_model=cfg['hrf_model'], hrf_idx=hrf_idx)
        X = X.drop('constant', axis=1)

        if cfg['single_trial_id'] is not None:
            st_idx_x = X.columns.str.contains(cfg['single_trial_id'])
            st_idx_x = np.r_[st_idx_x, False]  # constant

        if 'unmodstim' in cond_names:
            # Add "unmodulated stimulus" regressor
            X['unmodstim'] = X.loc[:, st_idx_x].sum(axis=1)
            st_idx_x = np.r_[st_idx_x, False]  # this is not a single trial reg.

        # Loop across unique opt_n_comps
        for out in yield_glm_results(vox_idx, Y, X, conf, run, ddict, cfg):
            this_vox_idx, this_X, labels, results = out
            # Extract residuals, predictions, and r2
            residuals[:, this_vox_idx] = get_param_from_glm('residuals', labels, results, this_X, time_series=True)
            preds[:, this_vox_idx] = get_param_from_glm('predicted', labels, results, this_X, time_series=True)
            r2[this_vox_idx] = get_param_from_glm('r_square', labels, results, this_X, time_series=False)

            beta = get_param_from_glm('theta', labels, results, this_X, predictors=True)
            
            # Loop over columns to extract parameters/zscores
            for i, col in enumerate(this_X.columns):
                cvec = np.zeros(this_X.shape[1])
                cvec[this_X.columns.tolist().index(col)] = 1
                con = compute_contrast(labels, results, con_val=cvec, contrast_type='t')
                patterns[i, this_vox_idx] = getattr(con, STATS[stype])()

            # Evaluate "custom contrast" if there is any
            if cfg['contrast'] is not None:
                cvec = expression_to_contrast_vector(cfg['contrast'], this_X.columns.tolist())
                con = compute_contrast(labels, results, con_val=cvec, contrast_type='t')
                ccon[this_vox_idx] = getattr(con, STATS[stype])()

            for lab in np.unique(labels):
                r = results[lab]
                # dispersion = sigsq, cov = cov = inv(X.T @ X)
                tmp_idx = np.zeros(K, dtype=bool)
                tmp_idx[this_vox_idx] = labels == lab
                #varB[:, :, tmp_idx] = r.dispersion * r.cov[..., np.newaxis]
                # For now, do not incorporate dispersion/sigsq
                varB[:, :, tmp_idx] = r.cov[..., np.newaxis]

                if cfg['uncorrelation']:
                    D = sqrtm(np.linalg.inv(r.cov[:-1, :-1]))
                    patterns[:-1, tmp_idx] = D @ patterns[:-1, tmp_idx]

    # uncorrelation (whiten patterns with covariance of design)
    # https://www.sciencedirect.com/science/article/pii/S1053811919310407
    # if cfg['uncorrelation']:
    #     logger.info("Prewhitening the patterns ('uncorrelation')")
    #     # Must be a way to do this without a loop?
    #     #nonzero = ~np.all(np.isclose(Y, 0.), axis=0)
    #     for vx in tqdm(range(K)):
    #         if np.isclose(varB.sum(), 0.0):
    #             continue

    #         D = sqrtm(np.linalg.inv(varB[:-1, :-1, vx]))
    #         patterns[:-1, vx] = np.squeeze(D @ patterns[:-1, vx, np.newaxis])

    # Extract single-trial (st) patterns and
    # condition-average patterns (cond)

    #save_data(preds_icept, cfg, ddict, par_dir='best', run=run+1, desc='model', dtype='predsicept', nii=True)

    if cfg['single_trial_id'] is not None:
        st_patterns = patterns[st_idx_x, :]
        save_data(st_patterns, cfg, ddict, par_dir='best', run=run+1, desc='trial', dtype=stype, nii=True)
        cond_patterns = patterns[~st_idx_x, :]
    else:
        cond_patterns = patterns

    # Save each parameter/statistic of the other conditions
    for i, name in enumerate(cond_names + ['constant']):
        save_data(cond_patterns[i, :], cfg, ddict, par_dir='best', run=run+1, desc=name, dtype=stype, nii=True)

    # Always save residuals
    save_data(residuals, cfg, ddict, par_dir='best', run=run+1, desc='model', dtype='residuals', nii=True)

    # In case of LSA, also save predicted values
    save_data(preds, cfg, ddict, par_dir='best', run=run+1, desc='model', dtype='predicted', nii=True)    

    # Always save R2
    save_data(r2, cfg, ddict, par_dir='best', run=run+1, desc='model', dtype='r2', nii=True)

    # Save custom contrast (--contrast)
    if cfg['contrast'] is not None:
        save_data(ccon, cfg, ddict, par_dir='best', run=run+1, desc='customcontrast', dtype=stype, nii=True)

    np.save(op.join(cfg['save_dir'], 'best', cfg['f_base'] + f'_run-{run+1}_desc-trial_cov.npy'), varB)
    basic_contrasts['audio'] - basic_contrasts['visual'],
    'computation - sentences':
    (basic_contrasts['computation'] - basic_contrasts['sentences'])
}

###############################################################################
# Let's estimate the contrasts by iterating over them.
from nilearn.glm.contrasts import compute_contrast
from nilearn import plotting

for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
    print('  Contrast % i out of %i: %s, right hemisphere' %
          (index + 1, len(contrasts), contrast_id))
    # compute contrast-related statistics
    contrast = compute_contrast(labels,
                                estimates,
                                contrast_val,
                                contrast_type='t')
    # we present the Z-transform of the t map
    z_score = contrast.z_score()
    # we plot it on the surface, on the inflated fsaverage mesh,
    # together with a suitable background to give an impression
    # of the cortex folding.
    plotting.plot_surf_stat_map(fsaverage.infl_right,
                                z_score,
                                hemi='right',
                                title=contrast_id,
                                colorbar=True,
                                threshold=3.,
                                bg_map=fsaverage.sulc_right)

###############################################################################