예제 #1
0
def test_Tcontrast():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    labels, results = run_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
예제 #2
0
def test_fixed_effect_contrast():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = _compute_fixed_effect_contrast([lab, lab], [res, res], [c1, c2])
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
예제 #3
0
def test_t_contrast_add():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
예제 #4
0
def test_contrast_mul():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        assert_almost_equal(con1.z_score(), con2.z_score())
예제 #5
0
def test_Tcontrast():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    labels, results = run_glm(Y, X, 'ar1')
    con_val = np.eye(q)[0]
    z_vals = compute_contrast(labels, results, con_val).z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
예제 #6
0
def test_contrast_mul():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    lab, res = run_glm(Y, X, 'ar1')
    for c1 in [np.eye(q)[0], np.eye(q)[:3]]:
        con1 = compute_contrast(lab, res, c1)
        con2 = con1 * 2
        assert_almost_equal(con1.effect * 2, con2.effect)
        assert_almost_equal(con1.z_score(), con2.z_score())
예제 #7
0
def test_fixed_effect_contrast():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    lab, res = run_glm(Y, X, 'ols')
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = _compute_fixed_effect_contrast([lab, lab], [res, res], [c1, c2])
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
예제 #8
0
def test_Fcontrast():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    for model in ['ols', 'ar1']:
        labels, results = run_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(labels,
                                      results,
                                      con_val,
                                      contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
예제 #9
0
def test_run_glm():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))

    # Ordinary Least Squares case
    labels, results = run_glm(Y, X, 'ols')
    assert_array_equal(labels, np.zeros(n))
    assert list(results.keys()) == [0.0]
    assert results[0.0].theta.shape == (q, n)
    assert_almost_equal(results[0.0].theta.mean(), 0, 1)
    assert_almost_equal(results[0.0].theta.var(), 1. / p, 1)

    # ar(1) case
    labels, results = run_glm(Y, X, 'ar1')
    assert len(labels) == n
    assert len(results.keys()) > 1
    tmp = sum([val.theta.shape[1] for val in results.values()])
    assert tmp == n

    # non-existant case
    with pytest.raises(ValueError):
        run_glm(Y, X, 'ar2')
    with pytest.raises(ValueError):
        run_glm(Y, X.T)
예제 #10
0
def test_Fcontrast():
    rng = np.random.RandomState(42)
    n, p, q = 100, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))
    for model in ['ols', 'ar1']:
        labels, results = run_glm(Y, X, model)
        for con_val in [np.eye(q)[0], np.eye(q)[:3]]:
            z_vals = compute_contrast(labels,
                                      results,
                                      con_val,
                                      contrast_type='F').z_score()
            assert_almost_equal(z_vals.mean(), 0, 0)
            assert_almost_equal(z_vals.std(), 1, 0)
예제 #11
0
def test_F_contrast_add():
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1')
    c1, c2 = np.eye(q)[:2], np.eye(q)[2:4]
    con = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)

    # first test with dependent contrast
    con1 = compute_contrast(lab, res, c1)
    con2 = compute_contrast(lab, res, c1) + compute_contrast(lab, res, c1)
    assert_almost_equal(con1.effect * 2, con2.effect)
    assert_almost_equal(con1.variance * 2, con2.variance)
    assert_almost_equal(con1.stat() * 2, con2.stat())
예제 #12
0
def test_contrast_values():
    # but this test is circular and should be removed
    n, p, q = 100, 80, 10
    X, Y = np.random.randn(p, q), np.random.randn(p, n)
    lab, res = run_glm(Y, X, 'ar1', bins=1)
    # t test
    cval = np.eye(q)[0]
    con = compute_contrast(lab, res, cval)
    t_ref = list(res.values())[0].Tcontrast(cval).t
    assert_almost_equal(np.ravel(con.stat()), t_ref)
    # F test
    cval = np.eye(q)[:3]
    con = compute_contrast(lab, res, cval)
    F_ref = list(res.values())[0].Fcontrast(cval).F
    # Note that the values are not strictly equal,
    # this seems to be related to a bug in Mahalanobis
    assert_almost_equal(np.ravel(con.stat()), F_ref, 3)
예제 #13
0
def test_fixed_effect_contrast_nonzero_effect():
    X, y = make_regression(n_features=5, n_samples=20, random_state=0)
    y = y[:, None]
    labels, results = run_glm(y, X, 'ols')
    coef = LinearRegression(fit_intercept=False).fit(X, y).coef_
    for i in range(X.shape[1]):
        contrast = np.zeros(X.shape[1])
        contrast[i] = 1.
        fixed_effect = _compute_fixed_effect_contrast(
            [labels],
            [results],
            [contrast],
        )
        assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i])
        fixed_effect = _compute_fixed_effect_contrast([labels] * 3,
                                                      [results] * 3,
                                                      [contrast] * 3)
        assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i])
예제 #14
0
def run_GLM(raw, design_matrix, noise_model='ar1', bins=100,
            n_jobs=1, verbose=0):
    """
    Run GLM on data using supplied design matrix.

    This is a wrapper function for nilearn.stats.first_level_model.run_glm.

    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    design_matrix : as specified in Nilearn
        The design matrix.
    noise_model : {'ar1', 'ols'}, optional
        The temporal variance model. Defaults to 'ar1'.
    bins : : int, optional
        Maximum number of discrete bins for the AR(1) coef histogram.
    n_jobs : int, optional
        The number of CPUs to use to do the computation. -1 means
        'all CPUs'.
    verbose : int, optional
        The verbosity level. Defaut is 0

    Returns
    -------
    glm_estimates : dict
        Keys correspond to the different labels values values are
        RegressionResults instances corresponding to the voxels.
    """
    from nilearn.glm.first_level import run_glm

    picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)
    ch_names = raw.ch_names

    results = dict()
    for pick in picks:
        labels, glm_estimates = run_glm(raw.get_data(pick).T,
                                        design_matrix.values,
                                        noise_model=noise_model, bins=bins,
                                        n_jobs=n_jobs, verbose=verbose)
        results[ch_names[pick]] = glm_estimates[labels[0]]

    return results
예제 #15
0
def test_glm_AR_estimates():
    """Test that Yule-Walker AR fits are correct."""

    n, p, q = 1, 500, 2
    X_orig = np.random.RandomState(2).randn(p, q)
    Y_orig = np.random.RandomState(2).randn(p, n)

    for ar_vals in [[-0.2], [-0.2, -0.5], [-0.2, -0.5, -0.7, -0.3]]:
        ar_order = len(ar_vals)
        ar_arg = 'ar' + str(ar_order)

        X = X_orig.copy()
        Y = Y_orig.copy()

        for idx in range(1, len(Y)):
            for lag in range(ar_order):
                Y[idx] += ar_vals[lag] * Y[idx - 1 - lag]

        # Test using run_glm
        labels, results = run_glm(Y, X, ar_arg, bins=100)
        assert len(labels) == n
        for lab in results.keys():
            ar_estimate = lab.split("_")
            for lag in range(ar_order):
                assert_almost_equal(float(ar_estimate[lag]),
                                    ar_vals[lag],
                                    decimal=1)

        # Test using _yule_walker
        yw = _yule_walker(Y.T, ar_order)
        assert_almost_equal(yw[0], ar_vals, decimal=1)

    with pytest.raises(TypeError):
        _yule_walker(Y_orig, 1.2)
    with pytest.raises(ValueError):
        _yule_walker(Y_orig, 0)
    with pytest.raises(ValueError):
        _yule_walker(Y_orig, -2)
    with pytest.raises(TypeError, match='at least 1 dim'):
        _yule_walker(np.array(0.), 2)
예제 #16
0
def test_second_level_glm_computation():
    with InTemporaryDirectory():
        shapes = ((7, 8, 9, 1), )
        mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes)
        FUNCFILE = FUNCFILE[0]
        func_img = load(FUNCFILE)
        # Ordinary Least Squares case
        model = SecondLevelModel(mask_img=mask)
        Y = [func_img] * 4
        X = pd.DataFrame([[1]] * 4, columns=['intercept'])

        model = model.fit(Y, design_matrix=X)
        model.compute_contrast()
        labels1 = model.labels_
        results1 = model.results_

        labels2, results2 = run_glm(model.masker_.transform(Y), X.values,
                                    'ols')
        assert_almost_equal(labels1, labels2, decimal=1)
        assert len(results1) == len(results2)
        # Delete objects attached to files to avoid WindowsError when deleting
        # temporary directory (in Windows)
        del func_img, FUNCFILE, model, X, Y
예제 #17
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nilearn.glm import second_level as level2
        from nilearn.glm import first_level as level1
        from nilearn.glm.contrasts import (compute_contrast,
                                           compute_fixed_effects,
                                           _compute_fixed_effects_params)

        smoothing_fwhm = self.inputs.smoothing_fwhm
        smoothing_type = self.inputs.smoothing_type
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        if isdefined(smoothing_type) and smoothing_type != 'iso':
            raise NotImplementedError(
                "Only the iso smoothing type is available for the nistats estimator."
            )
        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        out_ents = self.inputs.contrast_info[0]['entities']  # Same for all

        # Only keep files which match all entities for contrast
        stat_metadata = _flatten(self.inputs.stat_metadata)
        input_effects = _flatten(self.inputs.effect_maps)
        input_variances = _flatten(self.inputs.variance_maps)

        filtered_effects = []
        filtered_variances = []
        names = []
        for m, eff, var in zip(stat_metadata, input_effects, input_variances):
            if _match(out_ents, m):
                filtered_effects.append(eff)
                filtered_variances.append(var)
                names.append(m['contrast'])

        mat = pd.get_dummies(names)
        contrasts = prepare_contrasts(self.inputs.contrast_info, mat.columns)

        is_cifti = filtered_effects[0].endswith('dscalar.nii')
        if is_cifti:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format
        else:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format

        # Only fit model if any non-FEMA contrasts at this level
        if any(c[2] != 'FEMA' for c in contrasts):
            if len(filtered_effects) < 2:
                raise RuntimeError(
                    "At least two inputs are required for a 't' for 'F' "
                    "second level contrast")
            if is_cifti:
                effect_data = np.squeeze([
                    nb.load(effect).get_fdata(dtype='f4')
                    for effect in filtered_effects
                ])
                labels, estimates = level1.run_glm(effect_data,
                                                   mat.values,
                                                   noise_model='ols')
            else:
                model = level2.SecondLevelModel(smoothing_fwhm=smoothing_fwhm)
                model.fit(filtered_effects, design_matrix=mat)

        for name, weights, contrast_type in contrasts:
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })

            # Pass-through happens automatically as it can handle 1 input
            if contrast_type == 'FEMA':
                # Index design identity matrix on non-zero contrasts weights
                con_ix = weights[0].astype(bool)
                # Index of all input files "involved" with that contrast
                dm_ix = mat.iloc[:, con_ix].any(axis=1)

                contrast_imgs = np.array(filtered_effects)[dm_ix]
                variance_imgs = np.array(filtered_variances)[dm_ix]
                if is_cifti:
                    ffx_cont, ffx_var, ffx_t = _compute_fixed_effects_params(
                        np.squeeze([
                            nb.load(fname).get_fdata(dtype='f4')
                            for fname in contrast_imgs
                        ]),
                        np.squeeze([
                            nb.load(fname).get_fdata(dtype='f4')
                            for fname in variance_imgs
                        ]),
                        precision_weighted=False)
                    img = nb.load(filtered_effects[0])
                    maps = {
                        'effect_size':
                        dscalar_from_cifti(img, ffx_cont, "effect_size"),
                        'effect_variance':
                        dscalar_from_cifti(img, ffx_var, "effect_variance"),
                        'stat':
                        dscalar_from_cifti(img, ffx_t, "stat")
                    }

                else:
                    ffx_res = compute_fixed_effects(contrast_imgs,
                                                    variance_imgs)
                    maps = {
                        'effect_size': ffx_res[0],
                        'effect_variance': ffx_res[1],
                        'stat': ffx_res[2]
                    }
            else:
                if is_cifti:
                    contrast = compute_contrast(labels,
                                                estimates,
                                                weights,
                                                contrast_type=contrast_type)
                    img = nb.load(filtered_effects[0])
                    maps = {
                        map_type:
                        dscalar_from_cifti(img,
                                           getattr(contrast, map_type)(),
                                           map_type)
                        for map_type in [
                            'z_score', 'stat', 'p_value', 'effect_size',
                            'effect_variance'
                        ]
                    }
                else:
                    maps = model.compute_contrast(
                        second_level_contrast=weights,
                        second_level_stat_type=contrast_type,
                        output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):
                if map_type in maps:
                    fname = fname_fmt(name, map_type)
                    maps[map_type].to_filename(fname)
                    map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['contrast_metadata'] = contrast_metadata

        # These are "optional" as fixed effects do not support these
        if zscore_maps:
            self._results['zscore_maps'] = zscore_maps
        if pvalue_maps:
            self._results['pvalue_maps'] = pvalue_maps

        return runtime
예제 #18
0
    def _run_interface(self, runtime):
        import nibabel as nb
        from nilearn.glm import first_level as level1
        from nilearn.glm.contrasts import compute_contrast
        mat = pd.read_csv(self.inputs.design_matrix,
                          delimiter='\t',
                          index_col=0)
        img = nb.load(self.inputs.bold_file)

        is_cifti = isinstance(img, nb.Cifti2Image)
        if isinstance(img, nb.dataobj_images.DataobjImage):
            # Ugly hack to ensure that retrieved data isn't cast to float64 unless
            # necessary to prevent an overflow
            # For NIfTI-1 files, slope and inter are 32-bit floats, so this is
            # "safe". For NIfTI-2 (including CIFTI-2), these fields are 64-bit,
            # so include a check to make sure casting doesn't lose too much.
            slope32 = np.float32(img.dataobj._slope)
            inter32 = np.float32(img.dataobj._inter)
            if max(np.abs(slope32 - img.dataobj._slope),
                   np.abs(inter32 - img.dataobj._inter)) < 1e-7:
                img.dataobj._slope = slope32
                img.dataobj._inter = inter32

        mask_file = self.inputs.mask_file
        if not isdefined(mask_file):
            mask_file = None
        smoothing_fwhm = self.inputs.smoothing_fwhm
        if not isdefined(smoothing_fwhm):
            smoothing_fwhm = None
        smoothing_type = self.inputs.smoothing_type
        if isdefined(smoothing_type) and smoothing_type != 'iso':
            raise NotImplementedError(
                "Only the iso smoothing type is available for the nistats estimator."
            )
        if is_cifti:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.dscalar.nii').format
            labels, estimates = level1.run_glm(img.get_fdata(dtype='f4'),
                                               mat.values)
            model_attr = {
                'r_square':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'r_square'),
                    'r_square'),
                'log_likelihood':
                dscalar_from_cifti(
                    img, _get_voxelwise_stat(labels, estimates, 'logL'),
                    'log_likelihood')
            }
        else:
            fname_fmt = os.path.join(runtime.cwd, '{}_{}.nii.gz').format
            flm = level1.FirstLevelModel(minimize_memory=False,
                                         mask_img=mask_file,
                                         smoothing_fwhm=smoothing_fwhm)
            flm.fit(img, design_matrices=mat)
            model_attr = {
                'r_square':
                flm.r_square[0],
                'log_likelihood':
                flm.masker_.inverse_transform(
                    _get_voxelwise_stat(flm.labels_[0], flm.results_[0],
                                        'logL'))
            }

        out_ents = self.inputs.contrast_info[0]['entities']

        # Save model level images

        model_maps = []
        model_metadata = []
        for attr, img in model_attr.items():
            model_metadata.append({'stat': attr, **out_ents})
            fname = fname_fmt('model', attr)
            img.to_filename(fname)
            model_maps.append(fname)

        effect_maps = []
        variance_maps = []
        stat_maps = []
        zscore_maps = []
        pvalue_maps = []
        contrast_metadata = []
        for name, weights, contrast_type in prepare_contrasts(
                self.inputs.contrast_info, mat.columns):
            contrast_metadata.append({
                'contrast': name,
                'stat': contrast_type,
                **out_ents
            })
            if is_cifti:
                contrast = compute_contrast(labels,
                                            estimates,
                                            weights,
                                            contrast_type=contrast_type)
                maps = {
                    map_type: dscalar_from_cifti(img,
                                                 getattr(contrast, map_type)(),
                                                 map_type)
                    for map_type in [
                        'z_score', 'stat', 'p_value', 'effect_size',
                        'effect_variance'
                    ]
                }

            else:
                maps = flm.compute_contrast(weights,
                                            contrast_type,
                                            output_type='all')

            for map_type, map_list in (('effect_size', effect_maps),
                                       ('effect_variance', variance_maps),
                                       ('z_score', zscore_maps),
                                       ('p_value', pvalue_maps), ('stat',
                                                                  stat_maps)):

                fname = fname_fmt(name, map_type)
                maps[map_type].to_filename(fname)
                map_list.append(fname)

        self._results['effect_maps'] = effect_maps
        self._results['variance_maps'] = variance_maps
        self._results['stat_maps'] = stat_maps
        self._results['zscore_maps'] = zscore_maps
        self._results['pvalue_maps'] = pvalue_maps
        self._results['contrast_metadata'] = contrast_metadata
        self._results['model_maps'] = model_maps
        self._results['model_metadata'] = model_metadata

        return runtime
예제 #19
0
def yield_glm_results(vox_idx, Y, X, conf, run, ddict, cfg):
    """ Utility to easily loop across GLM results for voxels with
    unique number of noise components, which is cumbersome but necessary for
    proper orthogonalization, becausô noise components (and HP-filter) previously regressed out of
    the fMRI data should also be regressed out of the design matrix (X). """

    # Pre-allocate optimal number of noise components array (opt_n_comps)
    tr = ddict['trs'][run]
    if ddict['opt_n_comps'].ndim > 1:  # extract run-specific
        opt_n_comps = ddict['opt_n_comps'][run, :]
    else:
        opt_n_comps = ddict['opt_n_comps']

    # Make sure they're integers (not doing this caused so many bugs because you cannot
    # compare a float array to 0)
    opt_n_comps = opt_n_comps.astype(int)

    nm = cfg['single_trial_noise_model']
    for this_n_comps in np.unique(
            opt_n_comps):  # loop across unique opt_n_comps

        # Find voxels that correspond to this_n_comps and intersect
        # with given voxel index
        this_vox_idx = opt_n_comps == this_n_comps
        this_vox_idx = np.logical_and(vox_idx, this_vox_idx)

        # Get confound matrix (X_n) ...
        if this_n_comps == 0:
            C = None  # no need for orthogonalization!
        else:
            C = conf[:, :this_n_comps]

        this_X = X.copy()
        if 'constant' in this_X.columns:  # no need for now
            this_X = this_X.drop('constant', axis=1)

        # orthogonalize w.r.t. unmodulated regressor
        if 'unmodstim' in this_X.columns:
            idx = ~this_X.columns.str.contains('unmodstim')
            unmod_reg = this_X.loc[:, ~idx].to_numpy()
            this_X.loc[:, idx] = signal.clean(this_X.loc[:, idx].to_numpy(),
                                              detrend=False,
                                              confounds=unmod_reg,
                                              standardize=False)

        # ... and remove from design (this_X); also high-pass
        this_X.loc[:, :], Y = custom_clean(this_X,
                                           Y,
                                           C,
                                           tr,
                                           ddict,
                                           cfg,
                                           high_pass=True,
                                           standardize=False)

        # Finally, fit actual GLM and yield results
        this_X['constant'] = 1
        labels, results = run_glm(Y[:, this_vox_idx],
                                  this_X.to_numpy(),
                                  noise_model=nm)
        yield this_vox_idx, this_X, labels, results
예제 #20
0
def test_run_glm():
    rng = np.random.RandomState(42)
    n, p, q = 33, 80, 10
    X, Y = rng.standard_normal(size=(p, q)), rng.standard_normal(size=(p, n))

    # Ordinary Least Squares case
    labels, results = run_glm(Y, X, 'ols')
    assert_array_equal(labels, np.zeros(n))
    assert list(results.keys()) == [0.0]
    assert results[0.0].theta.shape == (q, n)
    assert_almost_equal(results[0.0].theta.mean(), 0, 1)
    assert_almost_equal(results[0.0].theta.var(), 1. / p, 1)
    assert type(results[labels[0]].model) == OLSModel

    # ar(1) case
    labels, results = run_glm(Y, X, 'ar1')
    assert len(labels) == n
    assert len(results.keys()) > 1
    tmp = sum([val.theta.shape[1] for val in results.values()])
    assert tmp == n
    assert results[labels[0]].model.order == 1
    assert type(results[labels[0]].model) == ARModel

    # ar(3) case
    labels_ar3, results_ar3 = run_glm(Y, X, 'ar3', bins=10)
    assert len(labels_ar3) == n
    assert len(results_ar3.keys()) > 1
    tmp = sum([val.theta.shape[1] for val in results_ar3.values()])
    assert tmp == n
    assert type(results_ar3[labels_ar3[0]].model) == ARModel
    assert results_ar3[labels_ar3[0]].model.order == 3
    assert len(results_ar3[labels_ar3[0]].model.rho) == 3

    # Check correct errors are thrown for nonsense noise model requests
    with pytest.raises(ValueError):
        run_glm(Y, X, 'ar0')
    with pytest.raises(ValueError):
        run_glm(Y, X, 'arfoo')
    with pytest.raises(ValueError):
        run_glm(Y, X, 'arr3')
    with pytest.raises(ValueError):
        run_glm(Y, X, 'ar1.2')
    with pytest.raises(ValueError):
        run_glm(Y, X, 'ar')
    with pytest.raises(ValueError):
        run_glm(Y, X, '3ar')
예제 #21
0
def _run_glmdenoise_model(ddict, cfg, logger):
    """ Runs a GLMdenoise-style cross-validated analysis. """
    Y_all = ddict['denoised_func'].copy()
    nonzero = ~np.all(np.isclose(Y_all, 0.), axis=0)

    # Some shortcuts
    n_runs = np.unique(ddict['run_idx']).size
    K = Y_all.shape[1]
    stype = STATS[cfg['pattern_units']]

    # Pre-allocate some stuff, separately for bootstrap data (boot) and 
    # parameteric data (param)
    conditions = ddict['preproc_events']['trial_type'].unique().tolist()
    cond_param = np.zeros((len(conditions), K))

    if cfg['contrast'] is not None:
        # ccon = custom contrast
        ccon_param = np.zeros(K)  # parametric

    # Note: opt_n_comps must be the same for each run!
    if ddict['opt_n_comps'].ndim > 1:
        raise ValueError("Cannot have run-specific n-comps when using GLMdenoise. Set --regularize-n-comps!")

    opt_n_comps = ddict['opt_n_comps']

    if cfg['hrf_model'] == 'kay':  # use optimal HRF
        if ddict['opt_hrf_idx'].sum() == 0:
            logger.warn("No HRF index data found; going to start optimization routine")
            r2_hrf = _optimize_hrf_between(ddict, cfg, logger)
            opt_hrf_idx = r2_hrf.argmax(axis=0)
            save_data(opt_hrf_idx, cfg, ddict, par_dir='best', run=None, desc='opt', dtype='hrf')
            save_data(r2_hrf, cfg, ddict, par_dir='best', run=None, desc='hrf', dtype='r2')
            save_data(r2_hrf.max(axis=0), cfg, ddict, par_dir='best', run=None, desc='max', dtype='r2')   
        else:
            opt_hrf_idx = ddict['opt_hrf_idx'].astype(int)
    else:  # use the same HRF (this is ignored)
        opt_hrf_idx = np.zeros(K)

    r2 = np.zeros(K)
    preds = np.zeros_like(Y_all)

    # Loop over HRF indices
    for hrf_idx in np.unique(opt_hrf_idx).astype(int):            
        # Loop over n-components
        for n_comp in np.unique(opt_n_comps).astype(int):
            # Determine voxel index (intersection nonzero and the voxels that 
            # were denoised with the current n_comp)
            vox_idx = opt_n_comps == n_comp
            vox_idx = np.logical_and(vox_idx, nonzero)
            vox_idx = np.logical_and(vox_idx, hrf_idx == opt_hrf_idx)

            # Gather the run-specific design matrices
            Xs = []
            for run in range(n_runs):
                tr = ddict['trs'][run]
                this_Y, confs, events = get_run_data(ddict, run, func_type='denoised')
                ft = get_frame_times(tr, ddict, cfg, this_Y)
                # Note: hrf_idx is ignored when hrf_model is not "kay"
                X = create_design_matrix(tr, ft, events, hrf_model=cfg['hrf_model'], hrf_idx=hrf_idx)
                X = X.drop('constant', axis=1)  # remove intercept

                # Orthogonalize noise components w.r.t. design matrix
                if n_comp != 0:
                    X.loc[:, :], _ = custom_clean(X, this_Y, confs[:, :n_comp], tr, ddict, cfg, clean_Y=False)
    
                X = X - X.mean(axis=0)
                Xs.append(X)

            # Concatenate design matrices
            X = pd.concat(Xs, axis=0)
            Y = Y_all[:, vox_idx]  # only current voxels

            # Get regular (parametric) scores
            labels, results = run_glm(Y, X.to_numpy(), noise_model='ols')
            r2[vox_idx] = get_param_from_glm('r_square', labels, results, X, time_series=False)
            preds[:, vox_idx] = get_param_from_glm('predicted', labels, results, X, time_series=True)

            for i, cond in enumerate(conditions):
                cvec = np.zeros(X.shape[1])
                cvec[X.columns.tolist().index(cond)] = 1
                con = compute_contrast(labels, results, con_val=cvec, contrast_type='t')
                cond_param[i, vox_idx] = getattr(con, stype)()

            if cfg['contrast'] is not None:
                cvec = expression_to_contrast_vector(cfg['contrast'], X.columns.tolist())
                con = compute_contrast(labels, results, cvec)
                ccon_param[vox_idx] = getattr(con, stype)()

    save_data(r2, cfg, ddict, par_dir='best', run=None, desc='model', dtype='r2', nii=True)
    for i, cond in enumerate(conditions):
        save_data(cond_param[i, :], cfg, ddict, par_dir='best', run=None, desc=cond, dtype=cfg['pattern_units'], nii=True)

    if cfg['contrast'] is not None:
        save_data(ccon_param, cfg, ddict, par_dir='best', run=None, desc='custom', dtype=cfg['pattern_units'], nii=True)

    for run in np.unique(ddict['run_idx']):
        save_data(preds[run == ddict['run_idx']], cfg, ddict, par_dir='best',
                  run=run+1, desc='model', dtype='predicted', nii=True)
def main(subject,
         session,
         sourcedata,
         smoothed=False,
         pca_confounds=False,
         space='fsnative',
         n_jobs=14):

    derivatives = op.join(sourcedata, 'derivatives')

    base_dir = 'glm_stim1_surf'

    if smoothed:
        base_dir += '.smoothed'

    if pca_confounds:
        base_dir += '.pca_confounds'

    base_dir = op.join(derivatives, base_dir, f'sub-{subject}',
                       f'ses-{session}', 'func')

    if not op.exists(base_dir):
        os.makedirs(base_dir)

    runs = range(1, 9)

    behavior = []
    for run in runs:
        behavior.append(
            pd.read_table(
                op.join(
                    sourcedata,
                    f'sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_events.tsv'
                )))

    behavior = pd.concat(behavior, keys=runs, names=['run'])
    behavior['subject'] = subject
    behavior = behavior.reset_index().set_index(
        ['subject', 'run', 'trial_type'])

    stimulus1 = behavior.xs('stimulus 1', 0, 'trial_type',
                            drop_level=False).reset_index('trial_type')[[
                                'onset', 'trial_nr', 'trial_type'
                            ]]
    stimulus1['duration'] = 0.6
    stimulus1['trial_type'] = stimulus1.trial_nr.map(
        lambda trial: f'trial_{trial}')

    print(stimulus1)

    stimulus2 = behavior.xs(
        'stimulus 2', 0, 'trial_type',
        drop_level=False).reset_index('trial_type')[['onset', 'trial_type']]
    stimulus2['duration'] = 0.6

    n2 = behavior.xs('stimulus 2', 0, 'trial_type',
                     drop_level=False).reset_index('trial_type')[[
                         'onset', 'trial_type', 'n2'
                     ]]
    n2['duration'] = 0.6

    def zscore(n):
        return (n - n.mean()) / n.std()

    n2['modulation'] = zscore(n2['n2'])
    n2['trial_type'] = 'n_dots2'

    p2 = behavior.xs('stimulus 2', 0, 'trial_type',
                     drop_level=False).reset_index('trial_type')[[
                         'onset', 'trial_type', 'prob2'
                     ]]
    p2 = p2[p2.prob2 == 1.0]
    p2['duration'] = 0.6
    p2['trial_type'] = 'certain2'

    events = pd.concat((stimulus1, stimulus2, n2, p2)).sort_values('onset')
    events['modulation'].fillna(1.0, inplace=True)

    # # sub-02_ses-7t2_task-task_run-1_space-fsaverage_hemi-R_bold.func

    keys = [(run, hemi) for run, hemi in product(runs, ['L', 'R'])]

    if smoothed:
        surfs = [
            op.join(
                sourcedata,
                f'derivatives/smoothed/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_space-{space}_hemi-{hemi}_desc-smoothed_bold.func.gii'
            ) for run, hemi in keys
        ]
    else:
        surfs = [
            op.join(
                sourcedata,
                f'derivatives/fmriprep/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_space-{space}_hemi-{hemi}_bold.func.gii'
            ) for run, hemi in keys
        ]

    fmriprep_confounds_include = [
        'global_signal', 'dvars', 'framewise_displacement', 'trans_x',
        'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z', 'a_comp_cor_00',
        'a_comp_cor_01', 'a_comp_cor_02', 'a_comp_cor_03', 'cosine00',
        'cosine01', 'cosine02', 'cosine03', 'non_steady_state_outlier00',
        'non_steady_state_outlier01', 'non_steady_state_outlier02'
    ]
    fmriprep_confounds = [
        op.join(
            sourcedata,
            f'derivatives/fmriprep/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_desc-confounds_timeseries.tsv'
        ) for run, hemi in keys
    ]
    fmriprep_confounds = [
        pd.read_table(cf)[fmriprep_confounds_include]
        for cf in fmriprep_confounds
    ]

    retroicor_confounds = [
        op.join(
            sourcedata,
            f'derivatives/physiotoolbox/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_task-task_run-{run}_desc-retroicor_timeseries.tsv'
        ) for run, hemi in keys
    ]
    retroicor_confounds = [
        pd.read_table(cf, header=None, usecols=range(18))
        if op.exists(cf) else pd.DataFrame(np.zeros((160, 0)))
        for cf in retroicor_confounds
    ]

    confounds = [
        pd.concat((rcf, fcf), axis=1)
        for rcf, fcf in zip(retroicor_confounds, fmriprep_confounds)
    ]
    confounds = [c.fillna(method='bfill') for c in confounds]

    t_r, n_scans = 2.3, 160
    frame_times = t_r * (np.arange(n_scans) + .5)

    betas = []

    n_verts = {}

    for (run, hemi), cf, surf in zip(keys, confounds, surfs):
        e = events.xs(run, 0, 'run')
        Y = surface.load_surf_data(surf).T

        n_verts[hemi] = Y.shape[1]

        if len(Y) == 213:
            Y = Y[:160]
            cf = cf.iloc[:160]

        if pca_confounds:
            pca = PCA(n_components=13)
            cf -= cf.mean(0)
            cf /= cf.std(0)
            cf = pca.fit_transform(cf)
            print('PCA size: ', cf.shape)

        X = make_first_level_design_matrix(
            frame_times,
            events=e,
            hrf_model='glover',
            high_pass=False,
            drift_model=None,
            add_regs=cf,
        )

        Y = (Y / Y.mean(0) * 100)
        Y -= Y.mean(0)

        fit = run_glm(Y, X, noise_model='ols', n_jobs=n_jobs)
        r = fit[1][0.0]
        betas.append(pd.DataFrame(r.theta, index=X.columns))

    betas = pd.concat(betas, keys=keys, names=['run', 'hemi'])
    betas.reset_index('run', drop=True, inplace=True)
    betas = betas.loc[(slice(None), stimulus1.trial_type), :].unstack(
        'hemi', fill_value=-1e6).swaplevel(axis=1).sort_index(axis=1)

    for hemi in ['L', 'R']:
        b = betas[hemi].loc[:, :n_verts[hemi] - 1]
        print(b)
        gii = nb.gifti.GiftiImage(
            header=nb.load(surfs[['L', 'R'].index(hemi)]).header,
            darrays=[nb.gifti.GiftiDataArray(row) for _, row in b.iterrows()])

        fn_template = op.join(
            base_dir,
            'sub-{subject}_ses-{session}_task-task_space-{space}_desc-stims1_hemi-{hemi}.pe.gii'
        )

        gii.to_filename(fn_template.format(**locals()))
design_matrix = make_first_level_design_matrix(frame_times,
                                               events=events,
                                               hrf_model='glover + derivative')

###############################################################################
# Setup and fit GLM.
#
# Note that the output consists in 2 variables: `labels` and `fit`.
# `labels` tags voxels according to noise autocorrelation.
# `estimates` contains the parameter estimates.
# We keep them for later contrast computation.

from nilearn.glm.first_level import run_glm

labels, estimates = run_glm(texture.T, design_matrix.values)

###############################################################################
# Estimate contrasts
# ------------------
# Specify the contrasts.
#
# For practical purpose, we first generate an identity matrix whose size is
# the number of columns of the design matrix.
contrast_matrix = np.eye(design_matrix.shape[1])

###############################################################################
# At first, we create basic contrasts.
basic_contrasts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])