Ejemplo n.º 1
0
def run_linear_regression_v2(analysis_name, regressor_names, subject, cleaned=True):
    # Load data & update metadata (in case new things were added)
    if cleaned:
        epochs = epoching_funcs.load_epochs_items(subject, cleaned=True)
        epochs = epoching_funcs.update_metadata_rejected(subject, epochs)
    else:
        epochs = epoching_funcs.load_epochs_items(subject, cleaned=False)
        epochs = epoching_funcs.update_metadata_rejected(subject, epochs)

    # # ====== remove some items from the linear model ==========
    print('We remove the first sequence item for which the surprise is not well computed and for which there is no RepeatAlter')
    epochs = epochs["StimPosition > 1"]
    # print('We remove items from trials with violation')
    # epochs = epochs["ViolationInSequence == 0"]

    # ====== regressors
    names = regressor_names

    # ====== normalization ?
    for name in names:
        epochs.metadata[name] = scale(epochs.metadata[name])

    # ====== Linear model (all items)
    df = epochs.metadata
    epochs.metadata = df.assign(Intercept=1)  # Add an intercept for later
    names = ["Intercept"] + names
    res = linear_regression(epochs, epochs.metadata[names], names=names)

    # Save regression results
    out_path = op.join(config.result_path, 'linear_models', analysis_name, subject)
    utils.create_folder(out_path)
    for name in names:
        res[name].beta.save(op.join(out_path, name + '.fif'))
Ejemplo n.º 2
0
def decode_probe(file):
    #load in epochs
    _id = file.split('_')[0]
    try:
        epochs = mne.epochs.read_epochs(join(epodir, file))
        if len(epochs) < 30:
            return [0]
        # crop out the postcue period after pre-stim baseline
        # epochs.apply_baseline(baseline=(3, 3.5))
        # epochs.crop(tmin=3, tmax=4.5)
        epochs.apply_baseline(baseline=(1.5, 2))
        epochs.crop(tmin=1.5, tmax=3.5)
        epochs.metadata = epochs.metadata.assign(Intercept=1)
        epochs.metadata['ang_dist'] = ang_dist(epochs.metadata[['targ_ang', 'resp_ang']], 90)
        epochs.pick_types(meg=True, chpi=False)
        #epochs.metadata.perc_diff = (epochs.metadata.perc_diff - epochs.metadata.perc_diff.mean) /epochs.metadata.perc_diff
        # seperate out probe directions

        _l = epochs['cue_dir == 0']
        _r = epochs['cue_dir == 1']
        _n = epochs['cue_dir == -1']
        lav = _l.average()
        rav = _r.average()
        nav = _n.average()

        # l = mne.combine_evoked([lav, nav], [1, -1])
        # r = mne.combine_evoked([rav, nav], [1, -1])

        l = _l
        r = _r
        names = ["Intercept", reg_n]
        res_l = linear_regression(l, l.metadata[names].reset_index(drop=True), names=names)
        res_r = linear_regression(r, r.metadata[names].reset_index(drop=True), names=names)
        res_both = linear_regression(epochs, epochs.metadata[names].reset_index(drop=True), names=names)
        return [res_l, res_r, res_both]
    except:
        return [0]
Ejemplo n.º 3
0
def run_linear_regression(subject, cleaned=True):
    # Load data
    if cleaned:
        epochs = epoching_funcs.load_epochs_items(subject, cleaned=True)
        epochs = epoching_funcs.update_metadata(subject, epochs)
    else:
        epochs = epoching_funcs.load_epochs_items(subject, cleaned=False)
        epochs = epoching_funcs.update_metadata(subject, epochs)

    # add surprise_dynamic in metadata (excluding removed items/trials)
    print('We merge the dynamical model of surprise with the metadata')
    run_info_subject_dir = op.join(config.run_info_dir, subject)
    surprise = loadmat(op.join(run_info_subject_dir, 'surprise.mat'))
    surprise = list(surprise['Surprise'])
    badidx = np.where(epochs.drop_log)
    badidx = badidx[0]
    [surprise.pop(i) for i in badidx[::-1]]
    surprise = np.asarray(surprise)
    epochs.metadata['surprise_dynamic'] = surprise

    # ====== remove the first item of each sequence in the linear model ==========
    print('We remove the first sequence item for which the surprise is not well computed')
    epochs = epochs["StimPosition > 1"]

    # ====== normalization ?
    epochs.metadata['surprise_dynamic'] = scale(epochs.metadata['surprise_dynamic'])
    epochs.metadata['Complexity'] = scale(epochs.metadata['Complexity'])
    epochs.metadata['ViolationOrNot'] = scale(epochs.metadata['ViolationOrNot'])

    # ====== let's add in the metadata a term of violation_or_not X complexity ==========
    print('We remove the first sequence item for which the surprise is not well computed')
    # epochs.metadata['violation_X_complexity'] = np.asarray([epochs.metadata['ViolationOrNot'][i]*epochs.metadata['Complexity'][i] for i in range(len(epochs.metadata))])  # does not work, replaced by the next line (correct?)
    epochs.metadata['violation_X_complexity'] = scale(epochs.metadata['ViolationOrNot'] * epochs.metadata['Complexity'])

    # Linear model (all items)
    df = epochs.metadata
    epochs.metadata = df.assign(Intercept=1)  # Add an intercept for later
    names = ["Intercept", "Complexity", "surprise_dynamic", "ViolationOrNot", "violation_X_complexity"]
    res = linear_regression(epochs, epochs.metadata[names], names=names)

    # Save regression results
    out_path = op.join(config.result_path, 'linear_models', 'complexity&surprisedynamic', subject)
    utils.create_folder(out_path)
    res['Intercept'].beta.save(op.join(out_path, 'beta_intercept-ave.fif'))
    res['Complexity'].beta.save(op.join(out_path, 'beta_Complexity-ave.fif'))
    res['surprise_dynamic'].beta.save(op.join(out_path, 'beta_surprise_dynamic-ave.fif'))
    res['ViolationOrNot'].beta.save(op.join(out_path, 'beta_violation_or_not-ave.fif'))
    res['violation_X_complexity'].beta.save(op.join(out_path, 'beta_violation_X_complexity-ave.fif'))
        epochs = epochs[idx]
        design_matrix = design_matrix[idx]
        evts = evts[idx]

        # rerf keys
        dm_keys = evts[:, 0]

        assert len(design_matrix) == len(epochs) == len(dm_keys)
        # group_ols[subject] = epochs.average()
        # Define 'y': what you're predicting
        y = design_matrix[:, -1]

        # run a rERF
        covariates = dict(zip(dm_keys, y))
        # linear regression
        reg = linear_regression(epochs, design_matrix, reg_names)
        reg[c_name].beta.save(fname_reg)

        print 'get ready for decoding ;)'

        train_times = {
            'start': tmin,
            'stop': tmax,
            'length': length,
            'step': step
        }
        cv = KFold(n=len(y), n_folds=n_folds, random_state=random_state)
        gat = GeneralizationAcrossTime(predict_mode='cross-validation',
                                       n_jobs=-1,
                                       train_times=train_times,
                                       scorer=rank_scorer,
Ejemplo n.º 5
0
df = epochs.metadata
df[name] = pd.cut(df[name], 11, labels=False) / 10
colors = {str(val): val for val in df[name].unique()}
epochs.metadata = df.assign(Intercept=1)  # Add an intercept for later
evokeds = {val: epochs[name + " == " + val].average() for val in colors}
plot_compare_evokeds(evokeds, colors=colors, split_legend=True,
                     cmap=(name + " Percentile", "viridis"))

##############################################################################
# We observe that there appears to be a monotonic dependence of EEG on
# concreteness. We can also conduct a continuous analysis: single-trial level
# regression with concreteness as a continuous (although here, binned)
# feature. We can plot the resulting regression coefficient just like an
# Event-related Potential.
names = ["Intercept", name]
res = linear_regression(epochs, epochs.metadata[names], names=names)
for cond in names:
    res[cond].beta.plot_joint(title=cond, ts_args=dict(time_unit='s'),
                              topomap_args=dict(time_unit='s'))

##############################################################################
# Because the `linear_regression` function also estimates p values, we can --
# after applying FDR correction for multiple comparisons -- also visualise the
# statistical significance of the regression of word concreteness.
# The :func:`mne.viz.plot_evoked_image` function takes a `mask` parameter.
# If we supply it with a boolean mask of the positions where we can reject
# the null hypothesis, points that are not significant will be shown
# transparently, and if desired, in a different colour palette and surrounded
# by dark contour lines.
reject_H0, fdr_pvals = fdr_correction(res["Concreteness"].p_val.data)
evoked = res["Concreteness"].beta
Ejemplo n.º 6
0
###############################################################################
# Next, we can inspect the effect of phase-coherence on the activation
# patterns evoked by the presented face-stimuli.
# Here, one would expect that faces with high phase-coherence evoke a stronger
# response, as participants should be better at identifying these faces.
#
# Create design matrix for linear regression. We'll use the information
# contained in the ``limo_epochs.metadata``.
design = limo_epochs.metadata.copy()
design = design.assign(intercept=1)  # add intercept
design['face a - face b'] = np.where(design['face'] == 'A', 1, -1)
names = ['intercept', 'face a - face b', 'phase-coherence']

# fit linear model
reg = linear_regression(limo_epochs, design[names], names=names)

###############################################################################
# Visualise effect of phase-coherence.
reg['phase-coherence'].beta.plot_joint(ts_args=ts_args,
                                       title='Effect of phase-coherence',
                                       times=[.23])

###############################################################################
# Here we can see a clear effect of phase-coherence, with higher
# phase-coherence (i.e., better "face visibility") being associated with
# stronger activity patterns.

###############################################################################
# Conversely, there appears to be no (or very small) systematic effects when
# constraining Face A and Face B. This is largely consistent with the
Ejemplo n.º 7
0
def run_linear_reg_surprise_repeat_alt_latest(subject, cross_validate=True):
    # remove old files
    meg_subject_dir = op.join(config.meg_dir, subject)
    metadata_path = op.join(meg_subject_dir, 'metadata_item.pkl')
    if op.exists(metadata_path):
        os.remove(metadata_path)
    metadata_path = op.join(meg_subject_dir, 'metadata_item_clean.pkl')
    if op.exists(metadata_path):
        os.remove(metadata_path)

    list_omegas = np.logspace(-1, 2, 50)
    TP_funcs.from_epochs_to_surprise(subject, list_omegas)
    TP_funcs.append_surprise_to_metadata_clean(subject)

    # =========== correction of the metadata with the surprise for the clean epochs ============
    # TP_funcs.append_surprise_to_metadata_clean(subject)  # already done above

    # ====== load the data , remove the first item for which the surprise is not computed ==========
    epochs = epoching_funcs.load_epochs_items(subject, cleaned=True)
    metadata = epoching_funcs.update_metadata(subject,
                                              clean=True,
                                              new_field_name=None,
                                              new_field_values=None)
    metadata["surprise_100"] = metadata[
        "surprise_100.00000"]  # "rename" the variable
    # metadata.to_csv(r'tmp.csv')

    # ============ build the repeatAlter and the surprise 100 for n+1 ==================
    metadata_notclean = epoching_funcs.update_metadata(subject,
                                                       clean=False,
                                                       new_field_name=None,
                                                       new_field_values=None)
    metadata_notclean["surprise_100"] = metadata_notclean[
        "surprise_100.00000"]  # "rename" the variable
    RepeatAlternp1_notclean = metadata_notclean["RepeatAlter"].values[
        1:].tolist()
    RepeatAlternp1_notclean.append(np.nan)
    Surprisenp1_notclean = metadata_notclean["surprise_100"].values[1:].tolist(
    )
    Surprisenp1_notclean.append(np.nan)
    good_idx = np.where(
        [len(epochs.drop_log[i]) == 0 for i in range(len(epochs.drop_log))])[0]
    RepeatAlternp1 = np.asarray(RepeatAlternp1_notclean)[good_idx]
    Surprisenp1 = np.asarray(Surprisenp1_notclean)[good_idx]
    # ======================================================================================

    metadata = metadata.assign(Intercept=1)  # Add an intercept for later
    metadata = metadata.assign(RepeatAlternp1=RepeatAlternp1)
    metadata = metadata.assign(
        Surprisenp1=Surprisenp1)  # Add an intercept for later

    epochs.metadata = metadata
    epochs.pick_types(meg=True, eeg=True)

    # np.unique(metadata[np.isnan(epochs.metadata['RepeatAlter'])]['StimPosition'].values)
    # np.unique(metadata[np.isnan(epochs.metadata['surprise_100'])]['StimPosition'].values)
    # np.unique(metadata[np.isnan(metadata['RepeatAlternp1'])]['StimPosition'].values)
    # np.unique(metadata[np.isnan(metadata['Surprisenp1'])]['StimPosition'].values)

    epochs = epochs[np.where(
        1 - np.isnan(epochs.metadata["surprise_100"].values))[0]]
    epochs = epochs[np.where(
        1 - np.isnan(epochs.metadata["RepeatAlternp1"].values))[0]]

    # =============== define the regressors =================
    # Repetition and alternation for n (not defined for the 1st item of the 16)
    # Repetition and alternation for n+1 (not defined for the last item of the 16)
    # Omega infinity for n (not defined for the 1st item of the 16)
    # Omega infinity for n+1 (not defined for the last item of the 16)

    names = [
        "Intercept", "surprise_100", "Surprisenp1", "RepeatAlter",
        "RepeatAlternp1"
    ]
    for name in names:
        print(name)
        print(np.unique(epochs.metadata[name].values))

    # ====== normalization ? ====== #
    for name in names[1:]:  # all but intercept
        epochs.metadata[name] = scale(epochs.metadata[name])

    # ====== baseline correction ? ====== #
    print('Baseline correction...')
    epochs = epochs.apply_baseline(baseline=(-0.050, 0))

    lin_reg = linear_regression(epochs, epochs.metadata[names], names=names)
    out_path = op.join(config.result_path, 'linear_models',
                       'reg_repeataltern_surpriseOmegainfinity', subject)
    utils.create_folder(out_path)

    suffix = ''
    if cross_validate:
        #  ---- we replace the data in lin_reg ----
        suffix = '_cv'
        skf = StratifiedKFold(n_splits=4)
        y_balancing = epochs.metadata[
            "SequenceID"].values * 100 + epochs.metadata["StimPosition"].values

        betas = []
        scores = []

        fold_number = 1
        for train_index, test_index in skf.split(np.zeros(len(y_balancing)),
                                                 y_balancing):
            print("======= running a new fold =======")

            # predictor matrix
            preds_matrix_train = np.asarray(
                epochs[train_index].metadata[names].values)
            preds_matrix_test = np.asarray(
                epochs[test_index].metadata[names].values)
            betas_matrix = np.zeros((len(names), epochs.get_data().shape[1],
                                     epochs.get_data().shape[2]))
            scores_cv = np.zeros((epochs.get_data().shape[2]))
            residuals_cv = np.zeros(epochs[test_index].get_data().shape)

            for tt in range(epochs.get_data().shape[2]):
                # for each time-point, we run a regression for each channel
                reg = linear_model.LinearRegression(fit_intercept=False)
                data_train = epochs[train_index].get_data()
                data_test = epochs[test_index].get_data()

                reg.fit(y=data_train[:, :, tt], X=preds_matrix_train)
                betas_matrix[:, :, tt] = reg.coef_.T
                y_preds = reg.predict(preds_matrix_test)
                scores_cv[tt] = r2_score(y_true=data_test[:, :, tt],
                                         y_pred=y_preds)

                # build the residuals by removing the betas computed on the training set to the data from the testing set

                residuals_cv[:, :, tt] = data_test[:, :, tt] - y_preds
            residual_epochs_cv = epochs[test_index].copy()
            residual_epochs_cv._data = residuals_cv
            residual_epochs_cv.save(out_path + op.sep + 'fold_' +
                                    str(fold_number) + 'residuals-epo.fif',
                                    overwrite=True)

            betas.append(betas_matrix)
            scores.append(scores_cv)
            fold_number += 1

        # MEAN ACROSS CROSS-VALIDATION FOLDS
        betas = np.mean(betas, axis=0)
        scores = np.mean(scores, axis=0)

        lin_reg['Intercept'].beta._data = np.asarray(betas[0, :, :])
        lin_reg['surprise_100'].beta._data = np.asarray(betas[1, :, :])
        lin_reg['Surprisenp1'].beta._data = np.asarray(betas[2, :, :])
        lin_reg['RepeatAlter'].beta._data = np.asarray(betas[3, :, :])
        lin_reg['RepeatAlternp1'].beta._data = np.asarray(betas[4, :, :])

    # Save surprise regression results

    lin_reg['Intercept'].beta.save(
        op.join(out_path, suffix + 'beta_intercept-ave.fif'))
    lin_reg['surprise_100'].beta.save(
        op.join(out_path, suffix + 'beta_surpriseN-ave.fif'))
    lin_reg['Surprisenp1'].beta.save(
        op.join(out_path, suffix + 'beta_surpriseNp1-ave.fif'))
    lin_reg['RepeatAlternp1'].beta.save(
        op.join(out_path, suffix + 'beta_RepeatAlternp1-ave.fif'))
    lin_reg['RepeatAlter'].beta.save(
        op.join(out_path, suffix + 'beta_RepeatAlter-ave.fif'))

    if cross_validate:
        np.save(op.join(out_path, 'scores_linear_reg_CV.npy'), scores)
    # save the residuals epoch in the same folder

    residuals = epochs.get_data() - lin_reg['Intercept'].beta.data
    for nn in ["surprise_100", "Surprisenp1", "RepeatAlter", "RepeatAlternp1"]:
        residuals = residuals - np.asarray([
            epochs.metadata[nn].values[i] * lin_reg[nn].beta._data
            for i in range(len(epochs))
        ])

    residual_epochs = epochs.copy()
    residual_epochs._data = residuals
    # save the residuals epoch in the same folder
    residual_epochs.save(out_path + op.sep + suffix + 'residuals-epo.fif',
                         overwrite=True)
Ejemplo n.º 8
0
def run_linear_reg_surprise_repeat_alt(subject,
                                       with_complexity=False,
                                       cross_validate=True):
    TP_funcs.append_surprise_to_metadata_clean(subject)

    # ====== load the data , remove the first item for which the surprise is not computed ==========
    epochs = epoching_funcs.load_epochs_items(subject, cleaned=True)
    metadata = epoching_funcs.update_metadata(subject,
                                              clean=True,
                                              new_field_name=None,
                                              new_field_values=None)

    # ============ build the repeatAlter and the surprise 299 for n+1 ==================
    metadata_notclean = epoching_funcs.update_metadata(subject,
                                                       clean=False,
                                                       new_field_name=None,
                                                       new_field_values=None)

    # ====== attention il faut que je code à la main la présence de répétition ou d'alternance ===========

    metadata_notclean = repeat_alternate_from_metadata(metadata_notclean)
    RepeatAlternp1_notclean = metadata_notclean["RepeatAlter"].values[
        1:].tolist()
    RepeatAlternp1_notclean.append(np.nan)
    Surprisenp1_notclean = metadata_notclean["surprise_299"].values[1:].tolist(
    )
    Surprisenp1_notclean.append(np.nan)
    good_idx = np.where(
        [len(epochs.drop_log[i]) == 0 for i in range(len(epochs.drop_log))])[0]
    RepeatAlternp1 = np.asarray(RepeatAlternp1_notclean)[good_idx]
    Surprisenp1 = np.asarray(Surprisenp1_notclean)[good_idx]
    # ======================================================================================

    metadata = metadata.assign(Intercept=1)  # Add an intercept for later
    metadata = metadata.assign(RepeatAlternp1=RepeatAlternp1)
    metadata = metadata.assign(
        Surprisenp1=Surprisenp1)  # Add an intercept for later

    epochs.metadata = metadata
    epochs.pick_types(meg=True, eeg=True)

    epochs = epochs[np.where(
        1 - np.isnan(epochs.metadata["surprise_299"].values))[0]]
    epochs = epochs[np.where(
        1 - np.isnan(epochs.metadata["RepeatAlternp1"].values))[0]]

    # =============== define the regressors =================
    # Repetition and alternation for n (not defined for the 1st item of the 16)
    # Repetition and alternation for n+1 (not defined for the last item of the 16)
    # Omega infinity for n (not defined for the 1st item of the 16)
    # Omega infinity for n+1 (not defined for the last item of the 16)

    names = [
        "Intercept", "surprise_299", "Surprisenp1", "RepeatAlter",
        "RepeatAlternp1"
    ]
    if with_complexity:
        names.append("Complexity")
    for name in names:
        print(name)
        print(np.unique(epochs.metadata[name].values))

    # ============== define the output paths ======

    out_path = op.join(config.result_path, 'linear_models',
                       'reg_repeataltern_surpriseOmegainfinity', subject)
    if with_complexity:
        out_path = op.join(
            config.result_path, 'linear_models',
            'reg_repeataltern_surpriseOmegainfinity_complexity', subject)
    utils.create_folder(out_path)

    # ------------------- implementing the 4 folds CV -----------------

    if cross_validate:
        from sklearn.model_selection import StratifiedKFold
        skf = StratifiedKFold(n_splits=4)
        y_balancing = epochs.metadata[
            "SequenceID"].values * 100 + epochs.metadata["StimPosition"].values

        Intercept = []
        surprise_299 = []
        Surprisenp1 = []
        RepeatAlternp1 = []
        RepeatAlter = []
        Complexity = []

        for train_index, test_index in skf.split(np.zeros(len(y_balancing)),
                                                 y_balancing):
            print("======= running a new fold =======")
            lin_reg_cv = linear_regression(epochs[train_index],
                                           epochs[train_index].metadata[names],
                                           names=names)
            # score with cross validation
            Intercept.append(lin_reg_cv['Intercept'].beta)
            surprise_299.append(lin_reg_cv['surprise_299'].beta)
            Surprisenp1.append(lin_reg_cv['Surprisenp1'].beta)
            RepeatAlternp1.append(lin_reg_cv['RepeatAlternp1'].beta)
            RepeatAlter.append(lin_reg_cv['RepeatAlter'].beta)
            if with_complexity:
                Complexity.append(lin_reg_cv['Complexity'].beta)
        lin_reg_cv['Intercept'].beta = np.asarray(np.mean(Intercept, axis=0))
        lin_reg_cv['surprise_299'].beta = np.asarray(
            np.mean(surprise_299, axis=0))
        lin_reg_cv['Surprisenp1'].beta = np.asarray(
            np.mean(Surprisenp1, axis=0))
        lin_reg_cv['RepeatAlternp1'].beta = np.asarray(
            np.mean(RepeatAlternp1, axis=0))
        lin_reg_cv['RepeatAlter'].beta = np.asarray(
            np.mean(RepeatAlter, axis=0))
        if with_complexity:
            lin_reg_cv['Complexity'].beta = np.asarray(
                np.mean(Complexity, axis=0))
        lin_reg = lin_reg_cv

    # ------ end of the CV option -------

    else:
        lin_reg = linear_regression(epochs,
                                    epochs.metadata[names],
                                    names=names)
        # Save surprise regression results

    lin_reg['Intercept'].beta.save(op.join(out_path, 'beta_intercept-ave.fif'))
    lin_reg['surprise_299'].beta.save(
        op.join(out_path, 'beta_surpriseN-ave.fif'))
    lin_reg['Surprisenp1'].beta.save(
        op.join(out_path, 'beta_surpriseNp1-ave.fif'))
    lin_reg['RepeatAlternp1'].beta.save(
        op.join(out_path, 'beta_RepeatAlternp1-ave.fif'))
    lin_reg['RepeatAlter'].beta.save(
        op.join(out_path, 'beta_RepeatAlter-ave.fif'))
    if with_complexity:
        lin_reg['Complexity'].beta.save(
            op.join(out_path, 'beta_Complexity-ave.fif'))

    # save the residuals epoch in the same folder

    residuals = epochs.get_data() - lin_reg['Intercept'].beta.data
    for nn in ["surprise_299", "Surprisenp1", "RepeatAlter", "RepeatAlternp1"]:
        residuals = residuals - np.asarray([
            epochs.metadata[nn].values[i] * lin_reg[nn].beta._data
            for i in range(len(epochs))
        ])
    if with_complexity:
        residuals = residuals - np.asarray([
            epochs.metadata["Complexity"].values[i] *
            lin_reg["Complexity"].beta._data for i in range(len(epochs))
        ])

    residual_epochs = epochs.copy()
    residual_epochs._data = residuals

    # save the residuals epoch in the same folder
    residual_epochs.save(out_path + op.sep + 'residuals-epo.fif',
                         overwrite=True)

    return True
Ejemplo n.º 9
0
# name of predictors + intercept
predictor_vars = ['face a - face b', 'phase-coherence', 'intercept']

# create design matrix
design = limo_epochs.metadata[['phase-coherence', 'face']].copy()
design['face a - face b'] = np.where(design['face'] == 'A', 1, -1)
design['intercept'] = 1
design = design[predictor_vars]

###############################################################################
# Now we can set up the linear model to be used in the analysis using
# MNE-Python's func:`~mne.stats.linear_regression` function.

reg = linear_regression(limo_epochs,
                        design_matrix=design,
                        names=predictor_vars)

###############################################################################
# Extract regression coefficients
# -------------------------------
#
# The results are stored within the object ``reg``,
# which is a dictionary of evoked objects containing
# multiple inferential measures for each predictor in the design matrix.

print('predictors are:', list(reg))
print('fields are:', [field for field in getattr(reg['intercept'], '_fields')])

###############################################################################
# Plot model results
Ejemplo n.º 10
0
                                method='dSPM')

    for col, cond in zip(['condition', 'circscale'], ['pure', 'scale']):
        list = []
        for i in range(len(events)):
            list.append(int(events.iloc[i][col] == cond))
        events[col] = list

    events['interaction'] = events['freq'] * events['condition']

    X = events[var_names].values
    X = sm.add_constant(X)  # add intercept

    print("Fitting...")
    # fit regression
    lm = linear_regression(stcs, X, ['intercept'] + var_names)

    betas = []  # will contain betas over regressors per subject
    for regressor in var_names:
        print('Regressor: %s' % (regressor))

        # make directories if they dont exists
        save_dir = "%s/_RESULTS/%s" % (root_dir, regressor)
        if not os.path.isdir(save_dir):
            os.mkdir(
                save_dir)  # put the data into an stc for convenience, and save
        stc_fname = '%s/morphed_stcs/%s_%s_%s_morphed' % (save_dir, subject,
                                                          regressor, data_type)
        stc_data = eval("lm['%s'].%s" % (regressor, data_type))

        # morph the stc to average brain
Ejemplo n.º 11
0
        for wav_idx, wav in enumerate(wavs):
            stc_temp = mne.read_source_estimate(
                "{dir}stcs/nc_{a}_{b}_{c}_{d}-lh.stc".format(dir=proc_dir,
                                                             a=sub,
                                                             b=cond,
                                                             c=wav,
                                                             d=spacing))
            stcs.append(morph.apply(stc_temp))

df_laut["Intercept"] = 1
temp_df = []
for cond in conds:
    temp_df.append(df_laut[df_laut["Block"] == cond])
df_laut = pd.concat(temp_df)
predictor_vars = ["Laut"] + ["Intercept"]
design_matrix = df_laut.copy()[predictor_vars]
reg_laut = linear_regression(stcs,
                             design_matrix=design_matrix,
                             names=predictor_vars)

df_ang["Intercept"] = 1
temp_df = []
for cond in conds:
    temp_df.append(df_ang[df_ang["Block"] == cond])
df_ang = pd.concat(temp_df)
predictor_vars = ["Angenehm"] + ["Intercept"]
design_matrix = df_ang.copy()[predictor_vars]
reg_ang = linear_regression(stcs,
                            design_matrix=design_matrix,
                            names=predictor_vars)
Ejemplo n.º 12
0
# them back to a channels x time points space.
lm_betas = dict()
for ind, predictor in enumerate(predictors):
    # extract coefficients
    beta = betas[:, ind]
    # back projection to channels x time points
    beta = beta.reshape((n_channels, n_times))
    # create evoked object containing the back projected coefficients
    # for each predictor
    lm_betas[predictor] = EvokedArray(beta, epochs_info, tmin)

###############################################################################
# plot results of linear regression

# only show -250 to 500 ms
ts_args = dict(xlim=(-.25, 0.5))

# visualise effect of phase-coherence for sklearn estimation method.
lm_betas['phase-coherence'].plot_joint(ts_args=ts_args,
                                       title='Phase-coherence - sklearn betas',
                                       times=[.23])

###############################################################################
# replicate analysis using mne.stats.linear_regression
reg = linear_regression(limo_epochs['2'], design, names=predictors)

# visualise effect of phase-coherence for mne.stats method.
reg['phase-coherence'].beta.plot_joint(ts_args=ts_args,
                                       title='Phase-coherence - mne betas',
                                       times=[.23])
Ejemplo n.º 13
0
df = epochs.metadata
df[name] = pd.cut(df[name], 11, labels=False) / 10
colors = {str(val): val for val in df[name].unique()}
epochs.metadata = df.assign(Intercept=1)  # Add an intercept for later
evokeds = {val: epochs[name + " == " + val].average() for val in colors}
plot_compare_evokeds(evokeds, colors=colors, split_legend=True,
                     cmap=(name + " Percentile", "viridis"))

##############################################################################
# We observe that there appears to be a monotonic dependence of EEG on
# concreteness. We can also conduct a continuous analysis: single-trial level
# regression with concreteness as a continuous (although here, binned)
# feature. We can plot the resulting regression coefficient just like an
# Event-related Potential.
names = ["Intercept", name]
res = linear_regression(epochs, epochs.metadata[names], names=names)
for cond in names:
    res[cond].beta.plot_joint(title=cond, ts_args=dict(time_unit='s'),
                              topomap_args=dict(time_unit='s'))

##############################################################################
# Because the `linear_regression` function also estimates p values, we can --
# after applying FDR correction for multiple comparisons -- also visualise the
# statistical significance of the regression of word concreteness.
# The :func:`mne.viz.plot_evoked_image` function takes a `mask` parameter.
# If we supply it with a boolean mask of the positions where we can reject
# the null hypothesis, points that are not significant will be shown
# transparently, and if desired, in a different colour palette and surrounded
# by dark contour lines.
reject_H0, fdr_pvals = fdr_correction(res["Concreteness"].p_val.data)
evoked = res["Concreteness"].beta
Ejemplo n.º 14
0
        # ====== normalization of regressors ====== #
        for name in names:
            epochs.metadata[name] = scale(epochs.metadata[name])

        # ====== Linear model (all items) ====== #
        if config.noEEG:
            epochs = epochs.pick_types(meg=True, eeg=False)
        else:
            epochs = epochs.pick_types(meg=True, eeg=True)

        df = epochs.metadata
        epochs.metadata = df.assign(Intercept=1)  # Add an intercept for later

        regressors_names = ["Intercept"] + names
        res = linear_regression(epochs,
                                epochs.metadata[regressors_names],
                                names=regressors_names)

        if cross_validate:
            skf = StratifiedKFold(n_splits=4)
            y_balancing = epochs.metadata[
                "SequenceID"].values * 100 + epochs.metadata[
                    "StimPosition"].values
            betas = []
            scores = []
            fold_number = 1
            for train_index, test_index in skf.split(
                    np.zeros(len(y_balancing)), y_balancing):
                print("======= running regression for fold %i =======" %
                      fold_number)
Ejemplo n.º 15
0
print "Total reward"
behaviour.reward.sum()

# Drop the first trial
behaviour = behaviour[1:]
outcomes.drop(0)

# Assign behavioural data to metadata attribute
outcomes.metadata = behaviour.assign(Intercept=1)

# Linear regression
# The regression has two terms - the intercept (not interesting) and the reward level (interesting)

from mne.stats import linear_regression, fdr_correction
names = ["Intercept", 'reward']
res = linear_regression(outcomes, outcomes.metadata[names], names=names)
# Create an "evoked" object for the reward predictor - this represents the beta value for reward level in the model
# across time points and gradiometers
reward_evoked = res["reward"].beta
reward_evoked.plot_joint(ts_args=dict(time_unit='s'),
                         topomap_args=dict(time_unit='s'))
plt.savefig(
    '/Users/dancingdiva12/Desktop/UCL/Research Project/thesis/figures/{0}_evoked_rewards.png'
    .format(subject_id))

# You can choose which time points the plot shows scalp plots for using the times argument
reward_evoked.plot_joint(ts_args=dict(time_unit='s'),
                         topomap_args=dict(time_unit='s'),
                         times=[.09, .45])

shock_evoked.save(os.path.join(data_dir, 'shock_evoked-ave.fif.gz'))