Exemplo n.º 1
0
    def test_statsmodels(self):
        """Test GLM
        """
        skip_if_no_external('statsmodels')
        from mvpa2.measures.statsmodels_adaptor import GLM
        # high SNR dataset for such a short timeseries
        data = simple_hrf_dataset(signal_level=2, noise_level=0.5)
        X = data.sa.design

        # check GLM betas
        glm = GLM(X)
        betas = glm(data)

        # betas for each feature and each regressor
        self.assertTrue(betas.shape == (X.shape[1], data.nfeatures))

        self.assertTrue(
            np.absolute(betas.samples[1] - data.a.baseline < 10).all(),
            msg="baseline betas should be huge and around 800")

        self.assertTrue(
            betas.samples[0, 0] > betas[0, 1],
            msg="feature (with signal) beta should be larger than for noise")

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(np.absolute(betas[0, 1]) < 0.5)
            self.assertTrue(np.absolute(betas[0, 0]) > 1.0)

        # check GLM t values
        glm = GLM(X, voi='tvalues')
        tstats = glm(data)

        self.assertTrue(tstats.shape == betas.shape)

        self.assertTrue((tstats.samples[1] > 1000).all(),
                        msg='constant tvalues should be huge')

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(np.absolute(betas[0, 0]) > betas[0, 1],
                            msg='with signal should have higher tvalues')

        # check t-contrast -- should do the same as tvalues for the first
        # parameter
        glm = GLM(X, voi=[1, 0])
        contrast = glm(data)
        assert_array_almost_equal(contrast.samples[0], tstats.samples[0])
        assert_equals(len(contrast), 6)
        # we should be able to recover the approximate effect size of the signal
        # which is constructed with a baseline offset of 2 (see above)
        if cfg.getboolean('tests', 'labile', default='yes'):
            assert_true(1.5 < contrast.samples[2, 0] < 2.5)

        # check F-test
        glm = GLM(X, voi=[[1, 0]])
        ftest = glm(data)
        assert_equals(len(ftest), 4)
        assert_true(ftest.samples[0, 0] > ftest.samples[0, 1])
Exemplo n.º 2
0
    def test_statsmodels(self):
        """Test GLM
        """
        skip_if_no_external('statsmodels')
        from mvpa2.measures.statsmodels_adaptor import GLM
        # high SNR dataset for such a short timeseries
        data = simple_hrf_dataset(signal_level=2, noise_level=0.5)
        X = data.sa.design

        # check GLM betas
        glm = GLM(X)
        betas = glm(data)

        # betas for each feature and each regressor
        self.assertTrue(betas.shape == (X.shape[1], data.nfeatures))

        self.assertTrue(np.absolute(betas.samples[1] - data.a.baseline < 10).all(),
            msg="baseline betas should be huge and around 800")

        self.assertTrue(betas.samples[0, 0] > betas[0, 1],
            msg="feature (with signal) beta should be larger than for noise")

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(np.absolute(betas[0, 1]) < 0.5)
            self.assertTrue(np.absolute(betas[0, 0]) > 1.0)

        # check GLM t values
        glm = GLM(X, voi='tvalues')
        tstats = glm(data)

        self.assertTrue(tstats.shape == betas.shape)

        self.assertTrue((tstats.samples[1] > 1000).all(),
                msg='constant tvalues should be huge')

        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(np.absolute(betas[0, 0]) > betas[0, 1],
                msg='with signal should have higher tvalues')

        # check t-contrast -- should do the same as tvalues for the first
        # parameter
        glm = GLM(X, voi=[1, 0])
        contrast = glm(data)
        assert_array_almost_equal(contrast.samples[0], tstats.samples[0])
        assert_equals(len(contrast), 6)
        # we should be able to recover the approximate effect size of the signal
        # which is constructed with a baseline offset of 2 (see above)
        if cfg.getboolean('tests', 'labile', default='yes'):
            assert_true(1.5 < contrast.samples[2, 0] < 2.5)

        # check F-test
        glm = GLM(X, voi=[[1, 0]])
        ftest = glm(data)
        assert_equals(len(ftest), 4)
        assert_true(ftest.samples[0, 0] > ftest.samples[0, 1])
Exemplo n.º 3
0
def add_signal_custom(ds, ms, spec, tpeak=0.8, fwhm=1, fir_length=15):
    """
    add signal to a pure noise simulated image
    (as generated e.g. by simulate_run)
    """

    dataset_with_signal = ds.copy()
    ms = fmri_dataset(ms)
    """
    some parameters from data
    """
    #  TR
    tr = ds.sa['time_coords'][1] - ds.sa['time_coords'][0]
    nsamples = len(ds.samples)
    """
    loop over specified conditions
    """
    for cond in spec:
        # condition = spec['conditions'][cond]
        roivalue = cond['roivalue']
        onsets = cond['onset']
        amplitude = cond['amplitude']
        sigchange = float(amplitude) / 100

        # get voxel indices for roi
        roi_indices = np.where(ms.samples[0] == roivalue)[0]
        """
        model hrf
        """
        hrf_model = simple_hrf_dataset(events=onsets,
                                       nsamples=nsamples * 2,
                                       tr=tr,
                                       tres=1,
                                       baseline=1,
                                       signal_level=sigchange,
                                       noise_level=0).samples[:, 0]
        """
        add activation to data set
        """
        # add model activation to roi voxels
        # import pdb; pdb.set_trace()
        for sample, activation in zip(dataset_with_signal.samples, hrf_model):
            sample[roi_indices] *= activation

    return dataset_with_signal
Exemplo n.º 4
0
def add_contrast(timeseries, onsetpath, amplitudes=(1, 1)):
    """
    Based on the onsets for familiar and unfamiliar faces, construct a regressor
    that reflects the difference between the two conditions and add it to our
    time series.
    This way, we want to try to model the effect of experimental stimulation on
    our connectivity model in Py-Causal / TETRAD.
    """

    # import custom function, used also during simulation, to get the onsets

    sys.path.insert(0, '/data/famface/openfmri/oli/osf_prereg_code/simulation')
    from famface_simulation_functions import get_onsets_famface

    # get onsets
    spec = get_onsets_famface(onsetpath, amplitudes)

    # construct hrf model for familiar and unfamiliar faces
    # (in the form of dicts in a list)
    hrf_models = []
    for condition, amplitude in zip(spec, amplitudes):
        hrf_model = simple_hrf_dataset(events=condition['onset'],
                                       nsamples=154,
                                       tr=2,
                                       tres=2,
                                       baseline=0,
                                       signal_level=amplitude,
                                       noise_level=0)
        hrf_models.append(hrf_model)

    # subtract
    fam_vs_unfam = hrf_models[0].samples[:, 0] - hrf_models[1].samples[:, 0]

    # append to time series
    from copy import deepcopy
    ts_copy = deepcopy(timeseries)
    ts_copy.append(fam_vs_unfam)
    return ts_copy
Exemplo n.º 5
0
def construct_design_matrix(onset_dicts):
    """
    Take dict of onsets for the different trial types and create a design matrix, including intercept, from it.
    """

    # convolve onsets with hrf to get regressors
    regressors_conv = []
    for onset_dict in onset_dicts:
        convolved_ds = simple_hrf_dataset(events=onset_dict['onsets'], tr=2, tres=1,
                                          baseline=0, signal_level=1, noise_level=0)
        convolved_reg = convolved_ds.sa['design'].value[:, 0]
        # pymvpa scales effect sizes to 2. We want them to be 1
        convolved_reg_scaled = convolved_reg / 2
        regressors_conv.append(convolved_reg_scaled)

    # padd regressors to same length
    # determine maximum length
    maxlen = 0
    for onset_dict in regressors_conv:
        if len(onset_dict) >= maxlen:
            maxlen = len(onset_dict)

    # pad shorter regressors accordingly
    for onset_dict in regressors_conv:
        if len(onset_dict) < maxlen:
            regressors_conv[regressors_conv.index(onset_dict)] = np.pad(onset_dict,
                                                                        (0, maxlen - len(onset_dict)), 'edge')

    # construct intercept vector and add to first position
    intercept = np.ones(len(regressors_conv[0]))
    design_matrix = np.vstack((intercept, regressors_conv))

    # get dimensions right (rows should be samples, columns should be regressors)
    design_matrix = design_matrix.transpose()

    return design_matrix