Esempio n. 1
0
def test_t_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 2
0
def test_t_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 3
0
def run_surface_glm(dmtx, contrasts, fmri_path, subject_session_output_dir):
    """ """
    from nibabel.gifti import read, write, GiftiDataArray, GiftiImage
    from nistats.first_level_model import run_glm
    from nistats.contrasts import compute_contrast
    Y = np.array([darrays.data for darrays in read(fmri_path).darrays])
    labels, res = run_glm(Y, dmtx)
    # Estimate the contrasts
    print('Computing contrasts...')
    side = fmri_path[-6:-4]
    for index, contrast_id in enumerate(contrasts):
        print('  Contrast % i out of %i: %s' %
              (index + 1, len(contrasts), contrast_id))
        # compute contrasts
        con_ = contrasts[contrast_id]
        contrast_ = compute_contrast(labels, res, con_)
        stats = [
            contrast_.z_score(), contrast_.stat_, contrast_.effect,
            contrast_.variance
        ]
        for map_type, out_map in zip(['z', 't', 'effects', 'variance'], stats):
            map_dir = os.path.join(subject_session_output_dir,
                                   '%s_surf' % map_type)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(map_dir, '%s_%s.gii' % (contrast_id, side))
            print("\t\tWriting %s ..." % map_path)
            tex = GiftiImage(darrays=[
                GiftiDataArray().from_array(out_map, intent='t test')
            ])
            write(tex, map_path)
Esempio n. 4
0
def compute_rfx_contrast(imgs, design_matrix, contrast_def, mask=None, noise_model='ols', stat_type='t', output_type='z_score'):

    design_info = DesignInfo(design_matrix.columns.tolist())
    if isinstance(imgs, list):
        Y = np.stack([i.get_data() for i in imgs]).reshape(len(imgs), -1)        
    elif isinstance(imgs, np.ndarray):
        Y = imgs
    else:
        raise ValueError(f"Unknown format for Y ({type(imgs)}).")

    X = design_matrix.values
    labels, results = run_glm(Y, X, noise_model=noise_model)

    if isinstance(contrast_def, (np.ndarray, str)):
        con_vals = [contrast_def]
    elif isinstance(contrast_def, (list, tuple)):
        con_vals = contrast_def
    else:
        raise ValueError('contrast_def must be an array or str or list of'
                         ' (array or str)')

    for cidx, con in enumerate(con_vals):
        if not isinstance(con, np.ndarray):
            con_vals[cidx] = design_info.linear_constraint(con).coefs

    contrast = compute_contrast(labels, results, con_vals, stat_type)

    values = getattr(contrast, output_type)()
    if isinstance(imgs, list):
        values = nib.Nifti1Image(values.reshape(imgs[0].shape), affine=imgs[0].affine)

    return values
Esempio n. 5
0
def test_F_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)

    # first test with dependent contrast
    con1 = compute_contrast(lab, res, c1)
    con2 = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c1)
    assert_almost_equal(con1.effect * 2, con2.effect)
    assert_almost_equal(con1.variance * 2, con2.variance)
    assert_almost_equal(con1.stat() * 2, con2.stat())
Esempio n. 6
0
def test_F_contrast_add():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)

    # first test with dependent contrast
    con1 = compute_contrast(lab, res, c1)
    con2 = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c1)
    assert_almost_equal(con1.effect * 2, con2.effect)
    assert_almost_equal(con1.variance * 2, con2.variance)
    assert_almost_equal(con1.stat() * 2, con2.stat())
Esempio n. 7
0
def test_Tcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    labels, results = run_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 8
0
def test_contrast_values():
    # new API
    # but this test is circular and should be removed
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1', bins=1)
    # t test
    cval = np.eye(q)[0]
    con = compute_contrast(lab, res, cval)
    t_ref = list(res.values())[0].Tcontrast(cval).t
    assert_almost_equal(np.ravel(con.stat()), t_ref)
    # F test
    cval = np.eye(q)[:3]
    con = compute_contrast(lab, res, cval)
    F_ref = list(res.values())[0].Fcontrast(cval).F
    # Note that the values are not strictly equal,
    # this seems to be related to a bug in Mahalanobis
    assert_almost_equal(np.ravel(con.stat()), F_ref, 3)
Esempio n. 9
0
def test_Tcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    labels, results = run_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 10
0
def test_contrast_values():
    # new API
    # but this test is circular and should be removed
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1', bins=1)
    # t test
    cval = np.eye(q)[0]
    con = compute_contrast(lab, res, cval)
    t_ref = list(res.values())[0].Tcontrast(cval).t
    assert_almost_equal(np.ravel(con.stat()), t_ref)
    # F test
    cval = np.eye(q)[:3]
    con = compute_contrast(lab, res, cval)
    F_ref = list(res.values())[0].Fcontrast(cval).F
    # Note that the values are not strictly equal,
    # this seems to be related to a bug in Mahalanobis
    assert_almost_equal(np.ravel(con.stat()), F_ref, 3)
Esempio n. 11
0
def test_Fcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    for model in ['ols', 'ar1']:
        labels, results = run_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(
                labels, results, con_val, contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 12
0
def test_Fcontrast():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    for model in ['ols', 'ar1']:
        labels, results = run_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(
                labels, results, con_val, contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
Esempio n. 13
0
def test_contrast_mul():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        # assert_almost_equal(con1.variance * 2, con2.variance) FIXME
        # assert_almost_equal(con1.stat() * 2, con2.stat()) FIXME
        assert_almost_equal(con1.z_score(), con2.z_score())
Esempio n. 14
0
def test_contrast_mul():
    # new API
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        # assert_almost_equal(con1.variance * 2, con2.variance) FIXME
        # assert_almost_equal(con1.stat() * 2, con2.stat()) FIXME
        assert_almost_equal(con1.z_score(), con2.z_score())
Esempio n. 15
0
    def compute_fxe_contrast(self, contrast_def, stat_type='t', run=None, output_type='z_score'):
        """ Computes a fixed effect across multiple runs. """
        
        self.logger.info(f"Computing contrast: {contrast_def} for task {self.task} ...")
        if self.glm is None:
            raise ValueError("GLM has not been run yet!")

        if run is None:
            results = self.glm['results']
            labels = self.glm['labels']
            dms = self.glm['dms']
            design_info = DesignInfo(dms[0].columns.tolist())
        else:
            results = self.glm['results'][run]
            labels = self.glm['labels'][run]
            dms = self.glm['dms'][run]
            design_info = DesignInfo(dms.columns.tolist())

        if isinstance(contrast_def, (np.ndarray, str)):
            con_vals = [contrast_def]
        elif isinstance(contrast_def, (list, tuple)):
            con_vals = contrast_def
        else:
            raise ValueError('contrast_def must be an array or str or list of'
                             ' (array or str)')

        for cidx, con in enumerate(con_vals):
            if not isinstance(con, np.ndarray):
                con_vals[cidx] = design_info.linear_constraint(con).coefs

        if run is None:
            contrast = _fixed_effect_contrast(labels, results, con_vals, stat_type)
        else:
            contrast = compute_contrast(labels, results, con_vals, stat_type)

        values = getattr(contrast, output_type)()
        if self.mask is not None:
            return masking.unmask(values, self.mask)
        else:
            return values
Esempio n. 16
0
for i in range(len(trials)):
    trials[i] = image.clean_img(trials[i], detrend=False, standardize=True)

Y = masking.apply_mask(image.concat_imgs(trials), mask)

events = pybest_dir + '/preproc/sub-02_ses-1_task-face_desc-preproc_events.tsv'
events = pd.read_csv(events, sep='\t').query("trial_type != 'rating' and trial_type != 'response'")
events.loc[:, 'face_eth'] = ['asian' if 'sian' in s else s for s in events['face_eth']]
events.loc[:, 'trial_type'] = [s[-7:] for s in events.loc[:, 'trial_type']]
X = events.loc[:, ['subject_dominance', 'subject_trustworthiness', 'subject_attractiveness']]
X /= X.mean(axis=0)
X = pd.concat((X, pd.get_dummies(events.loc[:, 'trial_type'])), axis=1)
X = pd.concat((X, pd.get_dummies(events.loc[:, 'face_eth'])), axis=1)
labels, results = run_glm(Y, X.to_numpy(), noise_model='ols')

for i in range(X.shape[1]):
    cvec = np.zeros(X.shape[1])
    cvec[i] = 1
    zscores = compute_contrast(labels, results, con_val=cvec, contrast_type='t').z_score()
    zscores = masking.unmask(zscores, mask)
    #zscores = image.smooth_img(zscores, fwhm=4)
    zscores.to_filename(f"{X.columns[i]}.nii.gz")


data = np.zeros_like(labels)

for lab in np.unique(labels):
    data[..., labels == lab] = getattr(results[lab], 'r_square')

masking.unmask(data, mask).to_filename('rsq.nii.gz')
        fmri_img = os.path.join(
            derivative_dir, 'wrr%s_%s_task-%s_bold.ico7.s5.%sh.gii' %
            (subject, session, task, hemisphere[0]))
        texture = np.array(
            [darrays.data for darrays in read(fmri_img).darrays]).T
        labels, res = run_glm(texture.T,
                              design_matrix.values[:texture.shape[1]])
        #######################################################################
        # contrast estimation
        for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
            print('  Contrast % 2i out of %i: %s' %
                  (index + 1, len(contrasts), contrast_id))
            if subject_idx == 0:
                effects[contrast_id] = []

            contrast_ = compute_contrast(labels, res, contrast_val)
            z_map = contrast_.z_score()
            effect = contrast_.effect
            effects[contrast_id].append(effect)
            # Create snapshots of the contrasts
            threshold = fdr_threshold(z_map, alpha=.05)
            out_file = os.path.join(
                write_dir, '%s_%s_z_map.png' % (contrast_id, hemisphere))
            plotting.plot_surf_stat_map(fsaverage['infl_%s' % hemisphere],
                                        z_map,
                                        hemi=hemisphere,
                                        title=contrast_id,
                                        colorbar=True,
                                        output_file=out_file,
                                        threshold=threshold,
                                        bg_map=fsaverage['sulc_%s' %
    # the drift model is implicitly a cosine basis with period cutoff 128s.
    design_matrix = make_first_level_design_matrix(
        frame_times, events=events[0], hrf_model='glover + derivative',
        add_regs=confound[0])

    # contrast_specification
    contrast_values = (design_matrix.columns == 'language') * 1.0 -\
                      (design_matrix.columns == 'string')

    # Setup and fit GLM.
    # Note that the output consists in 2 variables: `labels` and `fit`
    # `labels` tags voxels according to noise autocorrelation.
    # `estimates` contains the parameter estimates.
    # We input them for contrast computation.
    labels, estimates = run_glm(texture.T, design_matrix.values)
    contrast = compute_contrast(labels, estimates, contrast_values,
                                contrast_type='t')
    # we present the Z-transform of the t map
    z_score = contrast.z_score()
    z_scores_right.append(z_score)

    # Do the left hemipshere exactly in the same way
    texture = surface.vol_to_surf(fmri_img, fsaverage.pial_left)
    labels, estimates = run_glm(texture.T, design_matrix.values)
    contrast = compute_contrast(labels, estimates, contrast_values,
                                contrast_type='t')
    z_scores_left.append(contrast.z_score())

############################################################################
# Individual activation maps have been accumulated in the z_score_left
# and az_scores_right lists respectively. We can now use them in a
# group study (one -sample study)
    # the drift model is implicitly a cosine basis with period cutoff 128s.
    design_matrix = make_first_level_design_matrix(
        frame_times, events=events[0], hrf_model='glover + derivative',
        add_regs=confound[0])

    # contrast_specification
    contrast_values = (design_matrix.columns == 'language') * 1.0 -\
                      (design_matrix.columns == 'string')

    # Setup and fit GLM.
    # Note that the output consists in 2 variables: `labels` and `fit`
    # `labels` tags voxels according to noise autocorrelation.
    # `estimates` contains the parameter estimates.
    # We input them for contrast computation.
    labels, estimates = run_glm(texture.T, design_matrix.values)
    contrast = compute_contrast(labels, estimates, contrast_values,
                                contrast_type='t')
    # we present the Z-transform of the t map
    z_score = contrast.z_score()
    z_scores_right.append(z_score)

    # Do the left hemipshere exactly in the same way
    texture = surface.vol_to_surf(fmri_img, fsaverage.pial_left)
    labels, estimates = run_glm(texture.T, design_matrix.values)
    contrast = compute_contrast(labels, estimates, contrast_values,
                                contrast_type='t')
    z_scores_left.append(contrast.z_score())

############################################################################
# Individual activation maps have been accumulated in the z_score_left
# and az_scores_right lists respectively. We can now use them in a
# group study (one -sample study)
Esempio n. 20
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nistats import first_level_model as level1
        from nistats.contrasts import compute_contrast
        mat = pd.read_csv(self.inputs.design_matrix,
                          delimiter='\t',
                          index_col=0)
        img = nb.load(self.inputs.bold_file)

        is_cifti = isinstance(img, nb.Cifti2Image)
        if isinstance(img, nb.dataobj_images.DataobjImage):
            # Ugly hack to ensure that retrieved data isn't cast to float64 unless
            # necessary to prevent an overflow
            # For NIfTI-1 files, slope and inter are 32-bit floats, so this is
            # "safe". For NIfTI-2 (including CIFTI-2), these fields are 64-bit,
            # so include a check to make sure casting doesn't lose too much.
            slope32 = np.float32(img.dataobj._slope)
            inter32 = np.float32(img.dataobj._inter)
            if max(np.abs(slope32 - img.dataobj._slope),
                   np.abs(inter32 - img.dataobj._inter)) < 1e-7:
                img.dataobj._slope = slope32
                img.dataobj._inter = inter32

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        if is_cifti:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format
            labels, estimates = level1.run_glm(img.get_fdata(dtype='f4'),
                                               mat.values)
            model_attr = {
                'r_square':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'r_square'),
                    'r_square'),
                'log_likelihood':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'logL'),
                    'log_likelihood')
            }
        else:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format
            flm = level1.FirstLevelModel(minimize_memory=False,
                                         mask_img=mask_file,
                                         smoothing_fwhm=smoothing_fwhm)
            flm.fit(img, design_matrices=mat)
            model_attr = {
                'r_square':
                flm.r_square[0],
                'log_likelihood':
                flm.masker_.inverse_transform(
                    _get_voxelwise_stat(flm.labels_[0], flm.results_[0],
                                        'logL'))
            }

        out_ents = self.inputs.contrast_info[0]['entities']

        # Save model level images

        model_maps = []
        model_metadata = []
        for attr, img in model_attr.items():
            model_metadata.append({'stat': attr, **out_ents})
            fname = fname_fmt('model', attr)
            img.to_filename(fname)
            model_maps.append(fname)

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        for name, weights, contrast_type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns):
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })
            if is_cifti:
                contrast = compute_contrast(labels,
                                            estimates,
                                            weights,
                                            contrast_type=contrast_type)
                maps = {
                    map_type: dscalar_from_cifti(img,
                                                 getattr(contrast, map_type)(),
                                                 map_type)
                    for map_type in [
                        'z_score', 'stat', 'p_value', 'effect_size',
                        'effect_variance'
                    ]
                }

            else:
                maps = flm.compute_contrast(weights,
                                            contrast_type,
                                            output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):

                fname = fname_fmt(name, map_type)
                maps[map_type].to_filename(fname)
                map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['zscore_maps'] = zscore_maps
        self._results['pvalue_maps'] = pvalue_maps
        self._results['contrast_metadata'] = contrast_metadata
        self._results['model_maps'] = model_maps
        self._results['model_metadata'] = model_metadata

        return runtime
Esempio n. 21
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nistats import second_level_model as level2
        from nistats import first_level_model as level1
        from nistats.contrasts import (compute_contrast, compute_fixed_effects,
                                       _compute_fixed_effects_params)

        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        out_ents = self.inputs.contrast_info[0]['entities']  # Same for all

        # Only keep files which match all entities for contrast
        stat_metadata = _flatten(self.inputs.stat_metadata)
        input_effects = _flatten(self.inputs.effect_maps)
        input_variances = _flatten(self.inputs.variance_maps)

        filtered_effects = []
        filtered_variances = []
        names = []
        for m, eff, var in zip(stat_metadata, input_effects, input_variances):
            if _match(out_ents, m):
                filtered_effects.append(eff)
                filtered_variances.append(var)
                names.append(m['contrast'])

        mat = pd.get_dummies(names)
        contrasts = prepare_contrasts(self.inputs.contrast_info, mat.columns)

        is_cifti = filtered_effects[0].endswith('dscalar.nii')
        if is_cifti:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format
        else:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format

        # Only fit model if any non-FEMA contrasts at this level
        if any(c[2] != 'FEMA' for c in contrasts):
            if len(filtered_effects) < 2:
                raise RuntimeError(
                    "At least two inputs are required for a 't' for 'F' "
                    "second level contrast")
            if is_cifti:
                effect_data = np.squeeze([
                    nb.load(effect).get_fdata(dtype='f4')
                    for effect in filtered_effects
                ])
                labels, estimates = level1.run_glm(effect_data,
                                                   mat.values,
                                                   noise_model='ols')
            else:
                model = level2.SecondLevelModel(smoothing_fwhm=smoothing_fwhm)
                model.fit(filtered_effects, design_matrix=mat)

        for name, weights, contrast_type in contrasts:
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })

            # Pass-through happens automatically as it can handle 1 input
            if contrast_type == 'FEMA':
                # Index design identity matrix on non-zero contrasts weights
                con_ix = weights[0].astype(bool)
                # Index of all input files "involved" with that contrast
                dm_ix = mat.iloc[:, con_ix].any(axis=1)

                contrast_imgs = np.array(filtered_effects)[dm_ix]
                variance_imgs = np.array(filtered_variances)[dm_ix]
                if is_cifti:
                    ffx_cont, ffx_var, ffx_t = _compute_fixed_effects_params(
                        np.squeeze([
                            nb.load(fname).get_fdata(dtype='f4')
                            for fname in contrast_imgs
                        ]),
                        np.squeeze([
                            nb.load(fname).get_fdata(dtype='f4')
                            for fname in variance_imgs
                        ]),
                        precision_weighted=False)
                    img = nb.load(filtered_effects[0])
                    maps = {
                        'effect_size':
                        dscalar_from_cifti(img, ffx_cont, "effect_size"),
                        'effect_variance':
                        dscalar_from_cifti(img, ffx_var, "effect_variance"),
                        'stat':
                        dscalar_from_cifti(img, ffx_t, "stat")
                    }

                else:
                    ffx_res = compute_fixed_effects(contrast_imgs,
                                                    variance_imgs)
                    maps = {
                        'effect_size': ffx_res[0],
                        'effect_variance': ffx_res[1],
                        'stat': ffx_res[2]
                    }
            else:
                if is_cifti:
                    contrast = compute_contrast(labels,
                                                estimates,
                                                weights,
                                                contrast_type=contrast_type)
                    img = nb.load(filtered_effects[0])
                    maps = {
                        map_type:
                        dscalar_from_cifti(img,
                                           getattr(contrast, map_type)(),
                                           map_type)
                        for map_type in [
                            'z_score', 'stat', 'p_value', 'effect_size',
                            'effect_variance'
                        ]
                    }
                else:
                    maps = model.compute_contrast(
                        second_level_contrast=weights,
                        second_level_stat_type=contrast_type,
                        output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):
                if map_type in maps:
                    fname = fname_fmt(name, map_type)
                    maps[map_type].to_filename(fname)
                    map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['contrast_metadata'] = contrast_metadata

        # These are "optional" as fixed effects do not support these
        if zscore_maps:
            self._results['zscore_maps'] = zscore_maps
        if pvalue_maps:
            self._results['pvalue_maps'] = pvalue_maps

        return runtime