示例#1
0
def task_extract_epochs():
    """Step 05: Extract epochs from continuous EEG."""
    # Run the script for each subject in a sub-task.
    for subject in subjects:
        yield dict(
            # This task should come after `fit_ica`
            task_dep=['repair_eeg_artefacts'],

            # A name for the sub-task: set to the name of the subject
            name='Subject_%s_extract_epochs' % subject,

            # If any of these files change, the script needs to be re-run. Make
            # sure that the script itself is part of this list!
            file_dep=['00_eeg_to_bids.py',
                      '01_task_blocks.py',
                      '02_repair_bad_eeg_channels.py',
                      '03_fit_ica.py',
                      '04_repair_eeg_artefacts.py'],

            # The files produced by the script
            targets=[fname.output(processing_step='cue_epochs',
                                  subject=subject,
                                  file_type='epo.fif'),
                     fname.output(processing_step='probe_epochs',
                                  subject=subject,
                                  file_type='epo.fif')],

            # How the script needs to be called. Here we indicate it should
            # have one command line parameter: the name of the subject.
            actions=['python 05_extract_epochs.py -s %s' % subject]
        )
示例#2
0
def task_fit_ica():
    """Step 03: Decompose EEG signal into independent components."""
    # Run the script for each subject in a sub-task.
    for subject in subjects:
        yield dict(
            # This task should come after `repair_bad_channels`
            task_dep=['repair_bad_channels'],

            # A name for the sub-task: set to the name of the subject
            name='Subject_%s_fit_ica' % subject,

            # If any of these files change, the script needs to be re-run. Make
            # sure that the script itself is part of this list!
            file_dep=['00_eeg_to_bids.py',
                      '01_task_blocks.py',
                      '02_repair_bad_eeg_channels.py'],

            # The files produced by the script
            targets=[fname.output(processing_step='fit_ica',
                                  subject=subject,
                                  file_type='ica.fif')],

            # How the script needs to be called. Here we indicate it should
            # have one command line parameter: the name of the subject.
            actions=['python 03_fit_ica.py -s %s' % subject]
        )
示例#3
0
def task_repair_eeg_artefacts():
    """Step 04: Repair EEG artefacts caused by ocular movements."""
    # Run the script for each subject in a sub-task.
    for subject in subjects:
        yield dict(
            # This task should come after `fit_ica`
            task_dep=['fit_ica'],

            # A name for the sub-task: set to the name of the subject
            name='Subject_%s_repair_eeg_artefacts' % subject,

            # If any of these files change, the script needs to be re-run. Make
            # sure that the script itself is part of this list!
            file_dep=['00_eeg_to_bids.py',
                      '01_task_blocks.py',
                      '02_repair_bad_eeg_channels.py',
                      '03_fit_ica.py'],

            # The files produced by the script
            targets=[fname.output(processing_step='repaired_with_ica',
                                  subject=subject,
                                  file_type='raw.fif')],

            # How the script needs to be called. Here we indicate it should
            # have one command line parameter: the name of the subject.
            actions=['python 04_repair_eeg_artefacts.py -s %s' % subject]
        )
示例#4
0
def task_repair_bad_channels():
    """Step 02: Identify and repair bad (i.e., noisy) EEG channels."""
    # Run the script for each subject in a sub-task.
    for subject in subjects:
        yield dict(
            # This task should come after `task_blocks`
            task_dep=['task_blocks'],

            # A name for the sub-task: set to the name of the subject
            name='Subject_%s_repair_bad_channels' % subject,

            # If any of these files change, the script needs to be re-run. Make
            # sure that the script itself is part of this list!
            file_dep=['00_eeg_to_bids.py',
                      '01_task_blocks.py'],

            # The files produced by the script
            targets=[fname.output(processing_step='repair_bads',
                                  subject=subject,
                                  file_type='raw.fif')],

            # How the script needs to be called. Here we indicate it should
            # have one command line parameter: the name of the subject.
            actions=['python 02_repair_bad_eeg_channels.py -s %s' % subject]
        )
示例#5
0
def task_task_blocks():
    """Step 01: Extracts task segments (drop pauses in between)."""
    # Run the script for each subject in a sub-task.
    for subject in subjects:
        yield dict(
            # This task should come after `eeg_to_bids`
            task_dep=['eeg_to_bids'],

            # A name for the sub-task: set to the name of the subject
            name=subject,

            # If any of these files change, the script needs to be re-run. Make
            # sure that the script itself is part of this list!
            file_dep=['00_eeg_to_bids.py'],

            # The files produced by the script
            targets=[
                fname.output(processing_step='task_blocks',
                             subject=subject,
                             file_type='raw.fif')
            ],

            # How the script needs to be called. Here we indicate it should
            # have one command line parameter: the name of the subject.
            actions=['python 01_task_blocks.py %s' % subject])
def task_example_summary():
    """Step 01: Average across subjects."""
    return dict(
        task_dep=['example_step'
                  ],  # This task should come after `task_example_step`
        file_dep=[fname.output(subject=s)
                  for s in subjects] + ['01_grand_average.py'],
        targets=[fname.grand_average],
        actions=['python 01_grand_average.py'],
    )
def get_epochs(subj):
    """
    Loads the single trial data for a participant (name)
    """

    input_file = fname.output(subject=subj,
                              processing_step='cue_epochs',
                              file_type='epo.fif')
    epoch = read_epochs(input_file)
    epoch.crop(tmin=-0.5, tmax=epoch.tmax, include_tmax=False)
    epoch.apply_baseline((-0.300, -0.050))

    return epoch
def task_example_step():
    """Step 00: An example analysis step that is executed for each subject."""
    # Run the example script for each subject in a sub-task.
    for subject in subjects:
        yield dict(
            # This task should come after `task_check`
            task_dep=['check'],

            # A name for the sub-task: set to the name of the subject
            name=subject,

            # If any of these files change, the script needs to be re-run. Make
            # sure that the script itself is part of this list!
            file_dep=[fname.input(subject=subject), '00_example_step.py'],

            # The files produced by the script
            targets=[fname.output(subject=subject)],

            # How the script needs to be called. Here we indicate it should
            # have one command line parameter: the name of the subject.
            actions=['python 00_example_step.py %s' % subject],
        )
from config import fname, parser, n_jobs, LoggingFormat
from bads import find_bad_channels
from viz import plot_z_scores

# Handle command line arguments
args = parser.parse_args()
subject = args.subject

print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
      'Initialise bad channel detection for subject %s' % subject +
      LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          processing_step='raw_files',
                          file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)

# drop status channel
raw.drop_channels('Status')

###############################################################################
# 2) Remove slow drifts and line noise

# Setting up band-pass filter from 0.1 - 40 Hz
#
# FIR filter parameters
# ---------------------
# Designing a one-pass, zero-phase, non-causal bandpass filter:
# - Windowed time-domain design (firwin) method
from mne.io import read_raw_fif

# All parameters are defined in config.py
from config import fname, parser, LoggingFormat

# Handle command line arguments
args = parser.parse_args()
subject = args.subject

print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
      'Extracting epochs for subject %s' % subject + LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          processing_step='repaired_with_ica',
                          file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)

# only keep EEG channels
raw.pick_types(eeg=True)

###############################################################################
# 2) Get events from continuous EEG data
# create a dictionary with event IDs for standardised handling
ev_ids = {
    '245': 1,  # end of block
    '71': 2,  # onset of flanker stimuli
    '11': 3,  # target_C_L
    '12': 4,  # target_C_R
    '21': 5,  # target_I_L
示例#11
0
# All parameters are defined in config.py
from config import fname, parser, LoggingFormat, n_jobs

# Handle command line arguments
args = parser.parse_args()
subject = args.subject
session = args.session
task = args.task

print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
      'Fit ICA for subject %s (%s)' % (subject, task) + LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          task=task,
                          processing_step='repairbads',
                          file_type='raw.fif')

# check if file exists, otherwise terminate the script
if not os.path.isfile(input_file) or subject == 60:
    exit()

# import data
raw = read_raw_fif(input_file, preload=True)

# filter data to remove drifts
raw_filt = raw.copy().filter(l_freq=1.0, h_freq=None, n_jobs=n_jobs)

###############################################################################
#  2) Set ICA parameters
n_components = 5
示例#12
0
# Handle command line arguments
args = parser.parse_args()
subject = args.subject
session = args.session
task = args.task

print(LoggingFormat.PURPLE +
      LoggingFormat.BOLD +
      'Extracting epochs for subject %s (%s)' % (subject, task) +
      LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          task=task,
                          processing_step='cleaned',
                          file_type='raw.fif')

# check if file exists, otherwise terminate the script
if not os.path.isfile(input_file):
    exit()

# import data
raw = read_raw_fif(input_file, preload=True)

# only keep EEG channels
raw.pick_types(eeg=True)

###############################################################################
# 2) Get events from continuous EEG data
ev_id = None
示例#13
0
from matplotlib import pyplot as plt

# All parameters are defined in config.py
from config import fname, fmin, fmax, sample_rate

# Handle command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('subject', metavar='sub###', help='The subject to process')
args = parser.parse_args()
subject = args.subject
print('Processing subject:', subject)

# Load the data, filter it and save the result
data = np.loadtxt(fname.input(subject=subject))
b, a = butter(4, [fmin / (sample_rate / 2), fmax / (sample_rate / 2)],
              btype='band')
data = filtfilt(b, a, data)
np.savetxt(fname.output(subject=subject), data)

# Add a plot of the data to the HTML report
with mne.open_report(fname.report(subject=subject)) as report:
    fig = plt.figure()
    plt.plot(data.T)
    report.add_figs_to_section(fig,
                               'Filtered data',
                               section='filtering',
                               replace=True)
    report.save(fname.report_html(subject=subject),
                overwrite=True,
                open_browser=False)
                     root=fname.data_dir)

# save in bids format
write_raw_bids(raw, bids_path, overwrite=True)

###############################################################################
# 7) Plot the data for report
raw_plot = raw.plot(scalings=dict(eeg=50e-6, eog=50e-6),
                    n_channels=len(raw.info['ch_names']),
                    show=False)

###############################################################################
# 8) Export data to .fif for further processing
# output path
output_path = fname.output(processing_step='raw_files',
                           subject=subject,
                           file_type='raw.fif')

# save file
raw.save(output_path, overwrite=True)

###############################################################################
# 9) Create HTML report
with open_report(fname.report(subject=subject)[0]) as report:
    report.parse_folder(op.dirname(output_path),
                        pattern='*.fif',
                        render_bem=False)
    report.add_figs_to_section(raw_plot,
                               'Raw data',
                               section='Raw data',
                               replace=True)
incongruent_incorrect_neg = dict()
incongruent_correct_neg = dict()
incongruent_incorrect_erps_neg = dict()
incongruent_correct_erps_neg = dict()

baseline = (-0.800, -0.500)

###############################################################################
# 1) loop through subjects and compute ERPs for A and B cues
for sub in subjects:
    # log progress
    print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
          'Loading epochs for subject %s' % sub + LoggingFormat.END)

    # import the output from previous processing step
    input_file = fname.output(subject=sub,
                              processing_step='reaction_epochs',
                              file_type='epo.fif')
    ern_epo = read_epochs(input_file, preload=True)

    df_epo = ern_epo.copy().apply_baseline(baseline)
    df_epo.crop(tmin=0, tmax=.1)
    df = df_epo.to_data_frame(picks='FCz', index=['epoch'])
    df = df[['time', 'FCz']]
    df = df.merge(df_epo.metadata, left_index=True, right_index=True)
    df.to_csv(
        '/Users/philipplange/PycharmProjects/social_flanker/ernsoc_data_bids/derivatives/results/dataframes/epoch_frames/epoch_subject%s'
        % sub + '.tsv')

# create evokeds dict
示例#16
0
subjects = np.arange(1, 63)

# dicts for storing individual sets of epochs/ERPs
block_stroop = dict()

###############################################################################
# 1) loop through subjects and extract condition specific epochs
for subj in subjects:

    # log progress
    print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
          'Loading epochs for subject %s' % subj + LoggingFormat.END)

    # import the output from previous processing step
    input_cong = fname.output(subject=subj,
                              task='congruentstroop',
                              processing_step='epochs',
                              file_type='epo.fif')

    # import the output from previous processing step
    input_incong = fname.output(subject=subj,
                                task='incongruentstroop',
                                processing_step='epochs',
                                file_type='epo.fif')

    # check if file exists, otherwise terminate the script
    if not os.path.isfile(input_cong) or not os.path.isfile(input_incong):
        continue

    # load epochs and only keep correct reactions
    cong_epo = read_epochs(input_cong, preload=True)
    cong_epo = cong_epo['reaction == "correct"']
from config import fname, parser, LoggingFormat

# Handle command line arguments
args = parser.parse_args()
subject = args.subject
session = args.session
task = args.task

print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
      'Removing bad components for subject %s (%s)' % (subject, task) +
      LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          task=task,
                          processing_step='repairbads',
                          file_type='raw.fif')

# check if file exists, otherwise terminate the script
if not os.path.isfile(input_file):
    exit()

# import data
raw = read_raw_fif(input_file, preload=True)

###############################################################################
# 2) Import ICA weights from precious processing step
ica_file = fname.output(subject=subject,
                        task=task,
                        processing_step='fitica',
                        file_type='ica.fif')
示例#18
0
# All parameters are defined in config.py
from config import fname, parser, LoggingFormat

# Handle command line arguments
args = parser.parse_args()
subject = args.subject

print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
      'Finding and removing bad components for subject %s' % subject +
      LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          processing_step='repair_bads',
                          file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)
# activate average reference
raw.apply_proj()

###############################################################################
# 2) Import ICA weights from precious processing step
ica_file = fname.output(subject=subject,
                        processing_step='fit_ica',
                        file_type='ica.fif')
ica = read_ica(ica_file)

###############################################################################
# 3) Find bad components via correlation with template ICA
temp_subjs = [2, 10]
示例#19
0
from mne import events_from_annotations, concatenate_raws, open_report

# All parameters are defined in config.py
from config import fname, n_jobs, parser, LoggingFormat

# Handle command line arguments
args = parser.parse_args()
subject = args.subject

print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
      'Extracting task blocks for subject %s' % subject + LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          processing_step='raw_files',
                          file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)

# drop status channel
raw.drop_channels('Status')

###############################################################################
# 2) Find periods of time in the data with no presented stimuli (i.e., the
# self-paced breaks)

# relevant events
ids = {
    '127': 1,
    '245': 2,
    '13': 3,
"""
Compute grand average across all subjects
"""
import numpy as np

# All parameters are defined in config.py
from config import fname, subjects

# Load the data, compute grand average and save the result
data = []
for subject in subjects:
    data.append(np.loadtxt(fname.output(subject=subject)))
grand_average = np.mean(data, axis=0)
np.savetxt(fname.grand_average, grand_average)
示例#21
0
# All parameters are defined in config.py
from config import fname, parser, LoggingFormat

# Handle command line arguments
args = parser.parse_args()
subject = args.subject

print(LoggingFormat.PURPLE +
      LoggingFormat.BOLD +
      'Finding and removing bad components for subject %s' % subject +
      LoggingFormat.END)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
                          processing_step='repair_bads',
                          file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)
raw.apply_proj()

###############################################################################
# 2) Import ICA weights from precious processing step
ica_file = fname.output(subject=subject,
                        processing_step='fit_ica',
                        file_type='ica.fif')
ica = read_ica(ica_file)

###############################################################################
# 3) Find bad components via correlation with template ICA
temp_subjs = [5]
temp_raws = []
ids = ids.loc[ids['subject_id'].isin(subjects)]
ids = ids[['subject_id', 'group_id']].sort_values(by='subject_id')

# also load individual beta coefficients
betas = np.load(fname.results + '/subj_betas_condition_m250_robust.npy')
r2 = np.load(fname.results + '/subj_r2_condition_m250_robust.npy')

###############################################################################
# 1) import epochs to use as template

# baseline for epochs
baseline = (-0.35, -0.05)

# import the output from previous processing step
input_file = fname.output(subject=int(subjects[0]),
                          task='congruentstroop',
                          processing_step='epochs',
                          file_type='epo.fif')
stroop_epo = read_epochs(input_file, preload=True)
stroop_epo = stroop_epo['reaction == "correct"']
stroop_epo_nb = stroop_epo.copy().crop(tmin=-0.25, tmax=2.0, include_tmax=True)
stroop_epo = stroop_epo.apply_baseline(baseline).crop(tmin=-0.35)

# save the generic info structure of cue epochs (i.e., channel names, number of
# channels, etc.).
epochs_info = stroop_epo_nb.info
n_channels = len(epochs_info['ch_names'])
n_times = len(stroop_epo_nb.times)
times = stroop_epo_nb.times
tmin = stroop_epo_nb.tmin

# placeholder for results
示例#23
0
cues = dict()

# baseline to be applied
baseline = (-0.300, -0.050)

###############################################################################
# 1) loop through subjects and compute ERPs for A and B cues
for subj in subjects:

    # log progress
    print(LoggingFormat.PURPLE + LoggingFormat.BOLD +
          'Loading epochs for subject %s' % subj + LoggingFormat.END)

    # import the output from previous processing step
    input_file = fname.output(subject=subj,
                              processing_step='cue_epochs',
                              file_type='epo.fif')
    cue_epo = read_epochs(input_file, preload=True)

    # excludes subj 51
    if subj == 51:
        continue

    # extract a and b epochs (only those with correct responses)
    # and apply baseline
    cues['subj_%s' % subj] = cue_epo['Correct A', 'Correct B']
    cues['subj_%s' % subj].apply_baseline(baseline).crop(tmin=-0.500)

###############################################################################
# 2) linear model parameters
# use first subject as generic information template for results
示例#24
0
from config import fname, parser, n_jobs
# check if NVIDIA CUDA GPU processing should be used
if n_jobs == 'cuda':
    from mne.utils import set_config
    set_config('MNE_USE_CUDA', 'true')

# Handle command line arguments
args = parser.parse_args()
subject = args.subject

print('Fitting ICA for subject %s' % subject)

###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(processing_step='repair_bads',
                          subject=subject,
                          file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)

# filter data to remove drifts
raw_filt = raw.copy().filter(l_freq=1.0, h_freq=None, n_jobs=n_jobs)

###############################################################################
#  2) Set ICA parameters
n_components = 20
method = 'infomax'
reject = dict(eeg=250e-6)

###############################################################################
# 3) Fit ICA
ica = ICA(n_components=n_components,