def test_chunk_duration():
    """Test chunk_duration."""
    # create dummy raw
    raw = RawArray(data=np.empty([10, 10], dtype=np.float64),
                   info=create_info(ch_names=10, sfreq=1.),
                   first_samp=0)
    raw.info['meas_date'] = 0
    raw.set_annotations(Annotations(description='foo', onset=[0],
                                    duration=[10], orig_time=None))

    # expected_events = [[0, 0, 1], [0, 0, 1], [1, 0, 1], [1, 0, 1], ..
    #                    [9, 0, 1], [9, 0, 1]]
    expected_events = np.atleast_2d(np.repeat(range(10), repeats=2)).T
    expected_events = np.insert(expected_events, 1, 0, axis=1)
    expected_events = np.insert(expected_events, 2, 1, axis=1)

    events, events_id = events_from_annotations(raw, chunk_duration=.5,
                                                use_rounding=False)
    assert_array_equal(events, expected_events)

    # test chunk durations that do not fit equally in annotation duration
    expected_events = np.zeros((3, 3))
    expected_events[:, -1] = 1
    expected_events[:, 0] = np.arange(0, 9, step=3)
    events, events_id = events_from_annotations(raw, chunk_duration=3.)
    assert_array_equal(events, expected_events)
Exemple #2
0
def test_gdf_data():
    """Test reading raw GDF 1.x files."""
    raw = read_raw_edf(gdf1_path + '.gdf', eog=None, misc=None, preload=True)
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    data, _ = raw[picks]

    # Test Status is added as event
    EXPECTED_EVS_ONSETS = raw._raw_extras[0]['events'][1]
    evs, evs_id = events_from_annotations(raw)
    assert_array_equal(evs[:, 0], EXPECTED_EVS_ONSETS)
    assert evs_id == {'Unknown': 1}

    # this .npy was generated using the official biosig python package
    raw_biosig = np.load(gdf1_path + '_biosig.npy')
    raw_biosig = raw_biosig * 1e-6  # data are stored in microvolts
    data_biosig = raw_biosig[picks]

    # Assert data are almost equal
    assert_array_almost_equal(data, data_biosig, 8)

    # Test for events
    assert len(raw.annotations.duration == 963)

    # gh-5604
    assert raw.info['meas_date'] == DATE_NONE
Exemple #3
0
def get_epochs(system):
    """Find, load and process the epoched data."""
    cfg_local = get_cfg_local(system)
    raw_data = get_raw_data(system)

    if cfg_local['eventtype'] in raw_data.ch_names:
        stim_channel = cfg_local['eventtype']
    else:
        stim_channel = 'STI 014'

    if system == 'CNT':
        events, event_id = mne.events_from_annotations(raw_data)
        events[:, 0] = events[:, 0] + 1
    else:
        events = mne.find_events(raw_data, stim_channel=stim_channel,
                                 shortest_event=1)

        if isinstance(cfg_local['eventvalue'], np.ndarray):
            event_id = list(cfg_local['eventvalue'].astype('int'))
        else:
            event_id = [int(cfg_local['eventvalue'])]

        event_id = [id for id in event_id if id in events[:, 2]]

    epochs = mne.Epochs(raw_data, events=events,
                        event_id=event_id,
                        tmin=-cfg_local['prestim'],
                        tmax=cfg_local['poststim'], baseline=None)

    return epochs
def test_event_id_function_using_custom_function():
    """Test [unit_test] arbitrary function to create the ids."""
    def _constant_id(*args, **kwargs):
        return 42

    description = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
    expected_event_id = dict(zip(description, repeat(42)))
    expected_events = np.repeat([[0, 0, 42]], len(description), axis=0)
    raw = _create_annotation_based_on_descr(description)
    events, event_id = events_from_annotations(raw, event_id=_constant_id)

    assert_array_equal(events, expected_events)
    assert event_id == expected_event_id
def test_events_from_annot_onset_alingment():
    """Test events and annotations onset are the same."""
    raw = _raw_annot(meas_date=1, orig_time=1.5)
    #       sec  0        1        2        3
    #       raw  .        |--------XXXXXXXXX
    #     annot  .             |---XX
    # raw.annot  .        |--------XX
    #   latency  .        0        1        2
    #            .                 0        0

    assert raw.annotations.orig_time == 1
    assert raw.annotations.onset[0] == 1
    assert raw.first_samp == 10
    event_latencies, event_id = events_from_annotations(raw)
    assert event_latencies[0, 0] == 10
    assert raw.first_samp == event_latencies[0, 0]
def test_event_id_function_default():
    """Test[unit_test] for event_id_function default in event_from_annotations.

    The expected behavior is give numeric label for all those annotations not
    present in event_id, starting at 1.
    """
    # No event_id given
    description = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
    expected_event_id = dict(zip(description, range(1, 100)))
    expected_events = np.array([[3, 3, 3, 3, 3, 3, 3],
                                [0, 0, 0, 0, 0, 0, 0],
                                [1, 2, 3, 4, 5, 6, 7]]).T

    raw = _create_annotation_based_on_descr(description,
                                            annotation_start_sampl=3,
                                            duration=100)
    events, event_id = events_from_annotations(raw, event_id=None)

    assert_array_equal(events, expected_events)
    assert event_id == expected_event_id
Exemple #7
0
def read_gdf(filenames, subject, mint, maxt):
    f1 = open("testdata/" + subject + "-left.csv", 'w', newline='')
    f2 = open("testdata/" + subject + "-right.csv", 'w', newline='')
    wleft = csv.writer(f1)
    wright = csv.writer(f2)
    lefthands = []
    righthands = []
    for filename in filenames:
        raw = mne.io.read_raw_gdf(filename)
        events_A, event_id_A = mne.events_from_annotations(raw)
        #print(event_id_A)
        # rejected 되어야 하는 event 찾기
        myrejectid = np.where(events_A[:, 2] == event_id_A['1023'])
        myjejectedevent = []
        for i in myrejectid:
            myjejectedevent.append(i + 1)

        # # rejected trials 제거
        events = []
        for j in range(0, events_A.shape[0]):
            if j in myjejectedevent[0]:
                print("제거됨")
            else:
                events.append(events_A[j])
        events = np.asarray(events)
        picks = mne.pick_channels(raw.info["ch_names"],
                                  ["EEG:C3", "EEG:Cz", "EEG:C4"])

        # #큐 사인 이후 0.5에서  3.5초 데이터 저장
        tmin = mint
        tmax = maxt
        event_ids = dict(handsleft=event_id_A['769'],
                         handsright=event_id_A['770'])
        epochs = mne.Epochs(raw,
                            events,
                            event_ids,
                            tmin,
                            tmax,
                            picks=picks,
                            baseline=None,
                            preload=True)

        # 파일에 저장하기 // 3초 이므로 250*3 ==> 750개 씩 저장
        for i in range(0, epochs['handsleft'].get_data().shape[0]):
            data = epochs['handsleft'].get_data()[i].T
            count = 0
            for j in data:
                if count >= 750:
                    break
                else:
                    count = count + 1
                wleft.writerow(np.asarray(j))

        for i in range(0, epochs['handsright'].get_data().shape[0]):
            data = epochs['handsright'].get_data()[i].T
            count = 0
            for j in data:
                if count >= 750:
                    break
                else:
                    count = count + 1
                wright.writerow(np.asarray(np.asarray(j)))
    f1.close()
    f2.close()
Exemple #8
0
def get_data(subj_num=FNAMES, l_freq=13, h_freq=55, tmin=0.0, tmax=4.0):
    """ Extract EEG time series and class labels from edf files
    All T1, T2 events have duration 4.1 or 4.2 s, use 0~4s for epoching
    :param subj_num:
    :param l_freq: low frequency for bandpass filter
    :param h_freq: high frequency for bandpass filter
    :param tmin:
    :param tmax:

    :return: Xs, ys: list (Ni, nc, T), (Ni, )
    """

    run_type_0 = ['02']
    run_type_1 = ['04', '08', '12']
    run_type_2 = ['06', '10', '14']
    run_types = run_type_0 + run_type_1 + run_type_2

    # fixed parameters
    nc = 64  # number of channels
    sfreq = 160
    n_type0 = 22  # each subject has 21~24 fist or feet events, set to use the same number of data points for baseline
    event_id = {'T1': 2, 'T2': 3}  # T0:0 rest event, not interested

    T = round(tmax * sfreq)

    print('Sub, (N,   nc,  T)')
    Xs, ys = [], []

    # Iterate over subjects: S001, S002, S003 ...
    for i, subj in enumerate(subj_num):
        freq_flag = False

        # Get file names for motor imagery data
        fnames = glob.glob(os.path.join(PATH, subj, subj + 'R*.edf'))
        fnames = sorted([name for name in fnames if name[-6:-4] in run_types])

        subject_trials, subject_labels = [], []
        # Iterate over subject's experiment runs
        for i, fname in enumerate(fnames):

            run = fname[-6:-4]

            # Load data into MNE raw object
            raw = mne.io.read_raw_edf(fname, preload=True, verbose=False)

            if raw.info['sfreq'] != 160:
                freq_flag = True
                print('{} is sampled at {}Hz so will be excluded.'.format(
                    subj, raw.info['sfreq']))
                break

            # By default picks all 64 channels
            picks = mne.pick_types(raw.info, eeg=True)
            assert len(picks) == nc

            # Apply notch filter and bandpass filter
            raw.notch_filter([60.])
            raw.filter(l_freq=l_freq,
                       h_freq=h_freq,
                       picks=picks,
                       method='iir',
                       verbose=False)

            if run == '02':
                # Baseline, eye closed
                # There is only the rest event, randomly slice n_type0=22 snippets of length T for this class
                data = raw.get_data() * 1e6
                X = np.zeros((n_type0, nc, T))
                y = np.zeros((n_type0, ), dtype=int)  # eye closed
                for i in range(n_type0):
                    offset = np.random.randint(0, data.shape[1] - T)
                    X[i] = data[:, offset:offset + T]

            else:
                # Epoching
                events, _ = mne.events_from_annotations(raw, verbose=False)
                epochs = mne.Epochs(
                    raw,
                    events=events,
                    event_id=event_id,
                    tmin=tmin,
                    tmax=tmax,
                    baseline=None,
                    # reject={'eeg': reject_threshold*1e-6},
                    picks=picks,
                    preload=True,
                    verbose=False,
                    on_missing='warning')

                # Data
                X = epochs.get_data(
                ) * 1e6  # converts to uV, the L503 in epochs.py makes T=641
                X = X[:, :, :-1]  # make T = sfeq * (tmax-tmin)

                # specific treatment of classes/labels
                y = epochs.events[:, -1]
                if run in run_type_1:
                    y[y == 2] = 1  # left fist
                    y[y == 3] = 2  # right fist
                else:  # run_type_2
                    y[y == 3] = 4  # both feet
                    y[y == 2] = 3  # both fists

            subject_trials.append(X)
            subject_labels.append(y)

        if freq_flag:
            continue
        subject_trials = np.concatenate(subject_trials,
                                        axis=0)  # (None, nc=64, T=640)
        subject_labels = np.concatenate(subject_labels)
        # print({i: sum(subject_labels == i) for i in range(5)})
        print(subj, subject_trials.shape)

        Xs.append(subject_trials)
        ys.append(subject_labels)

    return Xs, ys
Exemple #9
0
def test_psd_class():
    mne_version = version.parse(mne.__version__)
    # get data
    data_dir = _get_test_data_dir()
    download_test_data()

    # read data
    raw_fname = op.join(data_dir, 'DiamSar_023_rest_raw.fif')
    raw = mne.io.read_raw_fif(raw_fname, preload=True)

    # compute psd
    event_id = {'S 11': 11}
    events, _ = mne.events_from_annotations(raw, event_id=event_id)
    psd = compute_psd(raw, tmin=0.5, tmax=20.5, winlen=2.,
                      step=0.5, events=events, event_id=[11])

    # make sure plotting does not error
    if mne_version > version.parse('0.18'):
        psd.plot(dB=False, fmax=40, show=False)
        psd.plot(fmax=40, average=True, show=False)
    else:
        with pytest.raises(ImportError):
            psd.plot(dB=False, fmax=40, show=False)

    topo = psd.plot_topomap(freqs=[11], show=False)
    assert isinstance(topo.axes, plt.Axes)

    topo = psd.plot_topomap(freqs=[6, 11], show=False)
    assert len(topo.axes) == 2

    topo = psd.plot_topomap(freqs=[8, 10, 12], show=False)
    assert len(topo.axes) == 3
    plt.close('all')

    # data shape checks
    assert len(psd.ch_names) == len(raw.ch_names)
    assert psd.data.shape[-1] == len(psd.freqs)
    assert psd.data.shape[-2] == len(raw.ch_names)

    psd_orig = psd.copy()
    psd2 = psd.average()
    assert (psd.data == psd2.data).all()

    psd.crop(fmin=10, fmax=15)
    assert (psd.freqs[0] - 10) < 0.5
    assert (psd.freqs[-1] - 15) < 0.5

    # test for (deep)copy
    psd2 = psd.copy()
    psd2.data[0, 0] = 23
    assert not (psd._data[0, 0] == 23)

    # test to_evoked
    psd2 = psd_orig.copy().average()
    evoked = psd_orig.to_evoked()
    assert isinstance(evoked, mne.Evoked)
    assert (evoked.data == psd2.data).all()

    # test plot_joint()
    psd_orig.plot_joint()
    plt.close('all')

    # psd with Epochs
    # ---------------
    epochs = mne.Epochs(raw, events, event_id, tmin=0., tmax=23.,
                        baseline=None, preload=True)
    psd_epo = compute_psd(epochs, tmin=0.5, tmax=20.5, winlen=2.,
                          step=0.5, events=events, event_id=[11])

    if mne_version > version.parse('0.18'):
        psd_epo.plot(show=False)

    psd_avg = psd_epo.copy().average()
    assert psd_epo.data.ndim == 3
    assert psd_avg.data.ndim == 2

    arr = np.random.randn(23, 48)
    with pytest.raises(TypeError, match='works only with Raw or Epochs'):
        psd_epo = compute_psd(arr, tmin=0.5, tmax=20.5, winlen=2.,
                              step=0.5, events=events, event_id=[11])

    # psd construction
    with pytest.raises(ValueError, match='has to be 3d'):
        use_data = psd_epo.data[..., np.newaxis]
        psd = PSD(use_data, psd_epo.freqs, raw.info)

    with pytest.raises(ValueError, match='or 2d'):
        use_data = psd_epo.data[0, :, 0]
        psd = PSD(use_data, psd_epo.freqs, raw.info)

    # test for __repr__
    psd_epo2 = psd_epo.copy().crop(fmin=8, fmax=12)
    rpr = '<borsar.freq.PSD (2 epochs, 64 channels, 9 frequencies), 8 - 12 Hz>'
    assert str(psd_epo2) == rpr

    # test chaining
    psd_epo3 = psd_epo.copy().crop(fmin=8, fmax=12).average()
    rpr = '<borsar.freq.PSD (64 channels, 9 frequencies), 8 - 12 Hz>'
    assert str(psd_epo3) == rpr

    # test channel picking
    psd2 = psd_epo.copy().pick_channels(['Fz', 'Pz'])
    assert psd2.data.shape[1] == 2

    # missing:
    # compute_rest_psd when events is None

    # Epochs with metadata
    # --------------------
    # read data
    epochs_fname = op.join(data_dir, 'GabCon-48_epo.fif')
    epochs = mne.read_epochs(epochs_fname, preload=True)

    assert epochs.metadata is not None

    psd = compute_psd(epochs, tmin=0.5, tmax=1.)
    assert psd.data.ndim == 3
    assert psd.data.shape[0] == epochs._data.shape[0]

    psd_slow = psd['RT > 0.65']
    epochs_slow = epochs['RT > 0.65']
    # TODO: later use len(psd_slow) here
    assert len(epochs_slow) == psd_slow.data.shape[0]
Exemple #10
0
                                                update_path=False)

# Load each of the files
raws = [
    mne.io.read_raw_edf(path,
                        preload=True,
                        stim_channel="auto",
                        verbose="WARNING") for path in physionet_paths
]

# Concatenate them
raw = concatenate_raws(raws)
del raws

# Find the events in this dataset
events, _ = mne.events_from_annotations(raw)

# Use only EEG channels
picks = mne.pick_types(raw.info, meg=False, eeg=True, exclude="bads")

# Extract trials, only using EEG channels
epochs = mne.Epochs(
    raw,
    events,
    event_id=dict(hands_or_left=2, feet_or_right=3),
    tmin=1,
    tmax=4.1,
    proj=False,
    picks=picks,
    baseline=None,
    preload=True,
Exemple #11
0
def test_annotations_from_events():
    """Test events to annotations conversion."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)

    # 1. Automatic event description
    # -------------------------------------------------------------------------
    annots = annotations_from_events(events, raw.info['sfreq'],
                                     first_samp=raw.first_samp,
                                     orig_time=None)
    assert len(annots) == events.shape[0]

    # Convert back to events
    raw.set_annotations(annots)
    events_out, _ = events_from_annotations(raw, event_id=int)
    assert_array_equal(events, events_out)

    # 2. Explicit event mapping
    # -------------------------------------------------------------------------
    event_desc = {1: 'one', 2: 'two', 3: 'three', 32: None}
    annots = annotations_from_events(events, sfreq=raw.info['sfreq'],
                                     event_desc=event_desc,
                                     first_samp=raw.first_samp,
                                     orig_time=None)

    assert np.all([a in ['one', 'two', 'three'] for a in annots.description])
    assert len(annots) == events[events[:, 2] <= 3].shape[0]

    # 3. Pass list
    # -------------------------------------------------------------------------
    event_desc = [1, 2, 3]
    annots = annotations_from_events(events, sfreq=raw.info['sfreq'],
                                     event_desc=event_desc,
                                     first_samp=raw.first_samp,
                                     orig_time=None)

    assert np.all([a in ['1', '2', '3'] for a in annots.description])
    assert len(annots) == events[events[:, 2] <= 3].shape[0]

    # 4. Try passing callable
    # -------------------------------------------------------------------------
    event_desc = lambda d: 'event{}'.format(d)  # noqa:E731
    annots = annotations_from_events(events, sfreq=raw.info['sfreq'],
                                     event_desc=event_desc,
                                     first_samp=raw.first_samp,
                                     orig_time=None)

    assert np.all(['event' in a for a in annots.description])
    assert len(annots) == events.shape[0]

    # 5. Pass numpy array
    # -------------------------------------------------------------------------
    event_desc = np.array([[1, 2, 3], [1, 2, 3]])
    with pytest.raises(ValueError, match='event_desc must be 1D'):
        annots = annotations_from_events(events, sfreq=raw.info['sfreq'],
                                         event_desc=event_desc,
                                         first_samp=raw.first_samp,
                                         orig_time=None)

    with pytest.raises(ValueError, match='Invalid type for event_desc'):
        annots = annotations_from_events(events, sfreq=raw.info['sfreq'],
                                         event_desc=1,
                                         first_samp=raw.first_samp,
                                         orig_time=None)

    event_desc = np.array([1, 2, 3])
    annots = annotations_from_events(events, sfreq=raw.info['sfreq'],
                                     event_desc=event_desc,
                                     first_samp=raw.first_samp,
                                     orig_time=None)
    assert np.all([a in ['1', '2', '3'] for a in annots.description])
    assert len(annots) == events[events[:, 2] <= 3].shape[0]
fig = raw_haemo.plot_psd(average=True)
fig.suptitle('After filtering', weight='bold', size='x-large')
fig.subplots_adjust(top=0.88)

# %%
# Extract epochs
# --------------
#
# Now that the signal has been converted to relative haemoglobin concentration,
# and the unwanted heart rate component has been removed, we can extract epochs
# related to each of the experimental conditions.
#
# First we extract the events of interest and visualise them to ensure they are
# correct.

events, event_dict = mne.events_from_annotations(raw_haemo)
fig = mne.viz.plot_events(events, event_id=event_dict,
                          sfreq=raw_haemo.info['sfreq'])
fig.subplots_adjust(right=0.7)  # make room for the legend


# %%
# Next we define the range of our epochs, the rejection criteria,
# baseline correction, and extract the epochs. We visualise the log of which
# epochs were dropped.

reject_criteria = dict(hbo=80e-6)
tmin, tmax = -5, 15

epochs = mne.Epochs(raw_haemo, events, event_id=event_dict,
                    tmin=tmin, tmax=tmax,
Exemple #13
0
    '70': 5,
    '71': 6,
    '72': 7,
    '73': 8,
    '74': 9,
    '75': 10,
    '76': 11,
    '77': 12,
    '78': 13,
    '79': 14,
    '80': 15,
    '81': 16,
    'EDGE boundary': 17
}
# extract events
events = events_from_annotations(raw, event_id=ev_id, regexp=None)

###############################################################################
# 3) Recode events into respective conditions and add information about valid
# and invalid responses

# copy of events
new_evs = events[0].copy()

# global variables
trial = 0
broken = []
sfreq = raw.info['sfreq']
block_end = events[0][events[0][:, 2] == 17, 0] / sfreq
# place holders for results
block = []
 # =============================================================================
 ica = mne.preprocessing.ICA( n_components=20, random_state=97, method='fastica')
 ica.fit(raw)
 if plot_steps_preproc==True:
     ica.plot_components() 
 ica.exclude=[0, 1 ,2]
 ica.apply(raw)
 
 # Interpolate
 raw.interpolate_bads()
 
 # Add Triggers
 # =============================================================================
 triggers = mne.read_annotations(raw_dir+'\\'+eeg_partic+'.vmrk')
 raw.set_annotations(triggers)
 events, event_ids = mne.events_from_annotations(raw)
 if plot_steps_preproc==True:
     mne.viz.plot_events(events)
     
 # Identify unimodal auditory (16 preceded by 1)
 for this_event in range(1,len(events)):
     if events[this_event][2]==16 and events[this_event-1][2]==1:
         events[this_event][2]=161
         
 # Epoch (& Reject)
 # =============================================================================
 reject_criteria=dict(eeg=50e-6) 
 epochs = mne.Epochs(raw, events, event_id=161, tmin=-0.1, tmax=0.5,baseline=(-0.1, 0),preload=True,reject=reject_criteria)
 if plot_steps_preproc==True:
     epochs.plot_image()
     epochs.plot_topo_image()
Exemple #15
0
def format_session(list_paths,
                   save_path,
                   extraction_settings,
                   preprocess_settings,
                   marker_encodings,
                   is_game,
                   balance=False):
    session_eeg, session_labels = [], []
    assert len(list_paths) > 0, 'No subsession to format'
    logging.info(f'Decoding: {extraction_settings["marker_decodings"]}')
    for subsession_path in list_paths:

        if subsession_path.suffix == '.vhdr':
            raw = load_vhdr(subsession_path, **preprocess_settings)

            # Extract events in format (ts, ?, marker_id)
            events, _ = mne.events_from_annotations(raw, verbose=False)

            eeg, labels = extract_data(raw,
                                       events,
                                       **extraction_settings,
                                       ignore_multi=is_game)
        elif subsession_path.suffix == '.h5':
            raw, events = load_h5(subsession_path, **preprocess_settings)
            eeg, labels = extract_data(raw,
                                       events,
                                       **extraction_settings,
                                       ignore_multi=is_game)

        # Stack trials
        if len(session_eeg) == 0:
            session_eeg = eeg
            session_labels = labels
        else:
            session_eeg = np.vstack([session_eeg, eeg])
            session_labels = np.concatenate([session_labels, labels])

    # Balance rest sessions
    if balance:
        count_dict = dict(Counter(session_labels))
        rest_marker = extraction_settings['marker_decodings']['Rest']
        n_delete = count_dict[rest_marker] - min(count_dict.values())

        # Randomly pick rest ids to delete
        rest_ids = np.where(session_labels == rest_marker)[0]
        ids_to_remove = np.random.choice(rest_ids, n_delete, replace=False)
        session_eeg = np.delete(session_eeg, ids_to_remove, axis=0)
        session_labels = np.delete(session_labels, ids_to_remove, axis=0)

    # Remap labels
    session_labels = np.array([marker_encodings[l] for l in session_labels])
    logging.info(f'Encoding: {marker_encodings}')
    logging.info(f'Output labels: {Counter(session_labels)}')

    # Save as .npy file
    info = raw.info
    save_folder = f'formatted'
    save_folder += '_filt' if preprocess_settings['preprocess'] else '_raw'
    save_folder += f"_{int(info['sfreq'])}Hz"
    save_folder += '_game' if is_game else ''
    save_session(session_eeg, session_labels, info, save_path, save_folder)
Exemple #16
0
        template=(0, 0),
        threshold=0.85,
        label='blink_up',
        plot=False)
corrmap(icas=[temp_icas[0], ica],
        template=(0, 7),
        threshold=0.85,
        label='blink_side',
        plot=False)

###############################################################################
# 4) Create summary plots to show signal correction on main experimental
# condition

# create target epochs
target_evs = events_from_annotations(raw, regexp='(11)|(12)|(21)|(22)')[0]
target_epo = Epochs(raw,
                    target_evs,
                    tmin=-1.5,
                    tmax=1.5,
                    reject_by_annotation=True,
                    proj=False,
                    preload=True)
target_epo.apply_baseline(baseline=(-0.3, -0.05))
target_evo = target_epo.average()

# loop over identified "bad" components
bad_components = []
for label in ica.labels_:
    bad_components.extend(ica.labels_[label])
Exemple #17
0
def test_experiment_class():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id, event_codes)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel='auto',
                            verbose='WARNING') for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw,
                         events,
                         dict(hands=2, feet=3),
                         tmin=1,
                         tmax=4.1,
                         proj=False,
                         picks=eeg_channel_inds,
                         baseline=None,
                         preload=True)

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))

    # Iterator is used to iterate over datasets both for training
    # and evaluation
    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    # Loss function takes predictions as they come out of the network and the targets
    # and returns a loss
    def loss_function(preds, targets):
        return F.nll_loss(th.mean(preds, dim=2, keepdim=False), targets)

    # Could be used to apply some constraint on the models, then should be object
    # with apply method that accepts a module
    model_constraint = None
    # Monitors log the training progress
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length),
        RuntimeMonitor(),
    ]
    # Stop criterion determines when the first stop happens
    stop_criterion = MaxEpochs(4)
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=None,
                     cuda=cuda)

    # need to setup python logging before to be able to see anything
    logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
                        level=logging.DEBUG,
                        stream=sys.stdout)
    exp.run()

    compare_df = pd.read_csv(
        StringIO(
            'train_loss,valid_loss,test_loss,train_sample_misclass,valid_sample_misclass,'
            'test_sample_misclass,train_misclass,valid_misclass,test_misclass\n'
            '14.167170524597168,13.910758018493652,15.945781707763672,0.5,0.5,'
            '0.5333333333333333,0.5,0.5,0.5333333333333333\n'
            '1.1735659837722778,1.4342904090881348,1.8664429187774658,0.4629567736185384,'
            '0.5120320855614973,0.5336007130124778,0.5,0.5,0.5333333333333333\n'
            '1.3168460130691528,1.60431969165802,1.9181344509124756,0.49298128342245995,'
            '0.5109180035650625,0.531729055258467,0.5,0.5,0.5333333333333333\n'
            '0.8465543389320374,1.280307412147522,1.439755916595459,0.4413435828877005,'
            '0.5461229946524064,0.5283422459893048,0.47916666666666663,0.5,'
            '0.5333333333333333\n0.6977059841156006,1.1762590408325195,1.2779350280761719,'
            '0.40290775401069523,0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n'
            '0.7934166193008423,1.1762590408325195,1.2779350280761719,0.4401069518716577,'
            '0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n0.5982189178466797,'
            '0.8581563830375671,0.9598925113677979,0.32032085561497325,0.47660427807486627,'
            '0.4672905525846702,0.31666666666666665,0.5,0.4666666666666667\n0.5044312477111816,'
            '0.7133197784423828,0.8164243102073669,0.2591354723707665,0.45699643493761144,'
            '0.4393048128342246,0.16666666666666663,0.41666666666666663,0.43333333333333335\n'
            '0.4815250039100647,0.6736412644386292,0.8016976714134216,0.23413547237076648,'
            '0.39505347593582885,0.42932263814616756,0.15000000000000002,0.41666666666666663,0.5\n'
        ))

    for col in compare_df:
        np.testing.assert_allclose(np.array(compare_df[col]),
                                   exp.epochs_df[col],
                                   rtol=1e-3,
                                   atol=1e-4)
Exemple #18
0
    def preproc(self, event_dict, baseline_start, stim_dur, montage, out_dir='preproc', subjects='all', tasks='all', 
                hp_cutoff=1, lp_cutoff=50, line_noise=60, seed=42, eog_chan='none'):
        '''Preprocesses a single EEG file. Assigns a list of epoched data to Dataset instance,
        where each entry in the list is a subject with concatenated task data. Here is the basic 
        structure of the preprocessing workflow:
        
            - Set the montage
            - Band-pass filter (high-pass filter by default)
            - Automatically detect bad channels
            - Notch filter out line-noise
            - Reference data to average of all EEG channels
            - Automated removal of eye-related artifacts using ICA
            - Spherical interpolation of detected bad channels
            - Extract events and epoch the data accordingly
            - Align the events based on type (still need to implement this!)
            - Create a list of epoched data, with subject as the element concatenated across tasks
        
        Parameters
        ----------
        event_dict: dict
            Maps integers to semantic labels for events within the experiment
            
        baseline_start: int or float
            Specify start of the baseline period (in seconds)
            
        stim_dur: int or float
            Stimulus duration (in seconds)
                Note: may need to make more general to allow various durations
                
        montage: mne.channels.montage.DigMontage
            Maps sensor locations to coordinates
            
        subjects: list or 'all'
            Specify which subjects to iterate through
            
        tasks: list or 'all'
            Specify which tasks to iterate through
            
        hp_cutoff: int or float
            The low frequency bound for the highpass filter in Hz
            
        line_noise: int or float
            The frequency of electrical noise to filter out in Hz
            
        seed: int
            Set the seed for replicable results
            
        eog_chan: str
            If there are no EOG channels present, select an EEG channel
            near the eyes for eye-related artifact detection
        '''

        missing = [] # initialize missing file list
        subj_iter = self.gen_iter(subjects, self.n_subj) # get subject iterator
        task_iter = self.gen_iter(tasks, self.n_task) # get task iterator

        # Iterate through subjects (initialize subject epoch list)
        epochs_subj = []
        for subj in subj_iter:

            # Iterate through tasks (initialize within-subject task epoch list)
            epochs_task = []
            for task in task_iter:
                # Specify the file format
                self.get_file_format(subj, task)

                try: # Handles missing files
                    raw = self.wget_raw_edf() # read
                except:
                    print(f'---\nThis file does not exist: {self.file_path}\n---')
                    # Need to write the missing file list out
                    missing.append(self.file_path)
                    break
                    
                # Standardize montage
                mne.datasets.eegbci.standardize(raw)
                # Set montage and strip channel names of "." characters
                raw.set_montage(montage)
                raw.rename_channels(lambda x: x.strip('.'))

                # Apply high-pass filter
                np.random.seed(seed)
                raw.filter(l_freq=hp_cutoff, h_freq=lp_cutoff, picks=['eeg', 'eog'])

                # Instantiate NoisyChannels object
                noise_chans = NoisyChannels(raw, do_detrend=False)

                # Detect bad channels through multiple methods
                noise_chans.find_bad_by_nan_flat()
                noise_chans.find_bad_by_deviation()
                noise_chans.find_bad_by_SNR()

                # Set the bad channels in the raw object
                raw.info['bads'] = noise_chans.get_bads()
                print(f'Bad channels detected: {noise_chans.get_bads()}')

                # Define the frequencies for the notch filter (60Hz and its harmonics)
                #notch_filt = np.arange(line_noise, raw.info['sfreq'] // 2, line_noise)

                # Apply notch filter
                #print(f'Apply notch filter at {line_noise} Hz and its harmonics')
                #raw.notch_filter(notch_filt, picks=['eeg', 'eog'], verbose=False)

                # Reference to the average of all the good channels 
                # Automatically excludes raw.info['bads']
                raw.set_eeg_reference(ref_channels='average')

                # Instantiate ICA object
                ica = mne.preprocessing.ICA(max_iter=1000)
                # Run ICA
                ica.fit(raw)

                # Find which ICs match the EOG pattern
                if eog_chan == 'none':
                    eog_indices, eog_scores = ica.find_bads_eog(raw, verbose=False)
                else:
                    eog_indices, eog_scores = ica.find_bads_eog(raw, eog_chan, verbose=False)

                # Apply the IC blink removals (if any)
                ica.apply(raw, exclude=eog_indices)
                print(f'Removed IC index {eog_indices}')

                # Interpolate bad channels
                raw.interpolate_bads()
                
                # Specify pre-processing directory
                preproc_dir = Path(f'{out_dir}/ {subj}')

                # If directory does not exist, one will be created.
                if not os.path.isdir(preproc_dir):
                    os.makedirs(preproc_dir)

               # raw.save(Path(preproc_dir, f'subj{subj}_task{task}_raw.fif'), 
                #         overwrite=True)

                # Find events
                events = mne.events_from_annotations(raw)[0]

                # Epoch the data
                preproc_epoch = mne.Epochs(raw, events, tmin=baseline_start, tmax=stim_dur, 
                                   event_id=event_dict, event_repeated='error', 
                                   on_missing='ignore', preload=True)
                
                # Equalize event counts
                preproc_epoch.equalize_event_counts(event_dict.keys())
                
                # Rearrange and align the epochs
                align = [preproc_epoch[i] for i in event_dict.keys()]
                align_epoch = mne.concatenate_epochs(align)
                
                # Add to epoch list
                epochs_task.append(align_epoch)

            # Assuming some data exists for a subject
            # Concatenate epochs within subject
            concat_epoch = mne.concatenate_epochs(epochs_task)
            epochs_subj.append(concat_epoch)
        # Attaches a list with each entry corresponding to epochs for a subject
        self.epoch_list = epochs_subj
fig.subplots_adjust(top=0.88)

###############################################################################
# Extract epochs
# --------------
#
# Now that the signal has been converted to relative haemoglobin concentration,
# and the unwanted heart rate component has been removed, we can extract epochs
# related to each of the experimental conditions.
#
# First we extract the events of interest and visualise them to ensure they are
# correct.

events, _ = mne.events_from_annotations(raw_haemo,
                                        event_id={
                                            '1.0': 1,
                                            '2.0': 2,
                                            '3.0': 3
                                        })
event_dict = {'Control': 1, 'Tapping/Left': 2, 'Tapping/Right': 3}
fig = mne.viz.plot_events(events,
                          event_id=event_dict,
                          sfreq=raw_haemo.info['sfreq'])
fig.subplots_adjust(right=0.7)  # make room for the legend

###############################################################################
# Next we define the range of our epochs, the rejection criteria,
# baseline correction, and extract the epochs. We visualise the log of which
# epochs were dropped.

reject_criteria = dict(hbo=80e-6)
tmin, tmax = -5, 15
Exemple #20
0
    'EEG 022': 'P4',
    'EEG 023': 'P8',
    'EEG 024': 'PO7',
    'EEG 025': 'PO3',
    'EEG 026': 'POz',
    'EEG 027': 'PO4',
    'EEG 028': 'PO8',
    'EEG 029': 'O1',
    'EEG 030': 'Oz',
    'EEG 031': 'O2'
}
raw.rename_channels(mapping)
raw.set_channel_types({"EOG1": 'eog', "EOG2": 'eog'})
raw.set_montage('standard_1020')  # Standard placement of channels

events = mne.events_from_annotations(raw, event_id)[0]
"""
EEGLab Data set with the following specifications
    - 32 channels pr frame
    - 30504 frames pr epoch
    - 1 epoch
    - 154 events (square and rt)
    - 128 Hz sample rate
    - 0 - 238,305 sec epoch
"""
# =============================================================================
# Dividing the EEG Data into Epochs
# =============================================================================
epochs = mne.Epochs(raw,
                    events=events,
                    tmin=-1,
###############################################################################

# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = 1., 3.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14]  # motor imagery: hands vs feet

raw_files = [
    read_raw_edf(f, preload=True, verbose=False)
    for f in eegbci.load_data(subject, runs)
]
raw = concatenate_raws(raw_files)

events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = pick_types(raw.info,
                   meg=False,
                   eeg=True,
                   stim=False,
                   eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                proj=True,
Exemple #22
0
import mne
import numpy as np
import matplotlib.pyplot as plt


data_path = 'C:/Users/oo/Desktop/EEG/Hypnosis/Data'
fname = data_path + '/Hypnosis_S1_POST_ACTIVE_a.set' 
data = mne.io.read_raw_eeglab(fname)

data.plot()

# events=mne.find_events(data)

events = mne.events_from_annotations(data) #소리 들려주는거
epochs = mne.Epochs(data,events[0],tmin=-0.2, tmax=0.5) #신호발생 전후 구간

print(epochs)
print(epochs.event_id)
event_dict = {'dev': 1, 'std': 2} #dev - 높은소리?,60개 std 기본소리,420개

epochs = mne.Epochs(data, events[0], tmin=-0.2, tmax=0.5, 
                    event_id=event_dict, preload=True)

del data #원래 데이터 삭제 -용량이 크니까

epochs.plot(n_epochs=10); 
#%%
print(epochs['dev']) 
print(epochs['std']) 
#evoked = epochs.average() # 480개 평균
Exemple #23
0
    def _get_single_subject_data(self, dataset, subject_id, verbose=False):
        """Return data in micro-volt.
        """
        if not self.is_valid(dataset):
            raise TypeError(
                "Dataset {:s} is not valid for the current paradigm. Check your events and channels settings"
                .format(dataset.dataset_code))

        # data = dataset.get_data(subjects)

        # # events, interval checking
        used_events, used_intervals = self._map_events_intervals(dataset)

        Xs = {}
        ys = {}
        metas = {}

        data = dataset.get_data([subject_id])

        for subject, sessions in data.items():
            for session, runs in sessions.items():
                for run, raw in runs.items():
                    # do raw hook either self-implemented or dataset inherited
                    caches = {}
                    if self._raw_hook:
                        raw, caches = self._raw_hook(raw, caches)
                    elif hasattr(dataset, 'raw_hook'):
                        raw, caches = dataset.raw_hook(raw, caches)

                    # pick selected channels by order
                    channels = dataset.channels if self.select_channels is None else self.select_channels
                    # picks = mne.pick_channels(raw.ch_names, channels, ordered=True)

                    picks = pick_channels(raw.ch_names, channels, ordered=True)

                    # find available events, first check stim_channels then annotations
                    stim_channels = mne.utils._get_stim_channel(
                        None, raw.info, raise_error=False)
                    if len(stim_channels) > 0:
                        events = mne.find_events(raw,
                                                 shortest_event=0,
                                                 initial_event=True)
                    else:
                        # convert event_id to its number type instead of default auto-renaming in 0.19.2
                        events, _ = mne.events_from_annotations(
                            raw, event_id=(lambda x: int(x)))

                    for event_name in used_events.keys():
                        # mne.pick_events returns any matching events in include
                        # only raise Runtime Error when nothing is found
                        # then we just skip this event
                        try:
                            selected_events = mne.pick_events(
                                events, include=used_events[event_name])
                        except RuntimeError:
                            continue

                        # transform Raw to Epochs

                        epochs = mne.Epochs(
                            raw,
                            selected_events,
                            event_id={event_name: used_events[event_name]},
                            event_repeated='drop',
                            tmin=used_intervals[event_name][0],
                            tmax=used_intervals[event_name][1] -
                            1. / raw.info['sfreq'],
                            picks=picks,
                            proj=False,
                            baseline=None,
                            preload=True)

                        # skip invalid time intervals
                        if len(epochs) == 0:
                            continue

                        # do epochs hook
                        if self._epochs_hook:
                            epochs, caches = self._epochs_hook(epochs, caches)
                        elif hasattr(dataset, 'epochs_hook'):
                            epochs, caches = dataset.epochs_hook(
                                epochs, caches)

                        # FIXME: is this resample reasonable?
                        if self.srate:
                            # as MNE suggested, decimate after extract epochs
                            # low-pass raw object in raw_hook to prevent aliasing problem
                            epochs = epochs.resample(self.srate)
                            # epochs = epochs.decimate(dataset.srate//self.srate)

                        # retrieve X, y and meta
                        X = epochs.get_data() * 1e6  # micro-volt default
                        y = epochs.events[:, -1]
                        trial_ids = np.argwhere(events[:, -1] == list(
                            epochs.event_id.values())[0]).reshape((-1))
                        meta = pd.DataFrame({
                            "subject": [subject] * len(epochs),
                            "session": [session] * len(epochs),
                            "run": [run] * len(epochs),
                            "event": [event_name] * len(epochs),
                            "trial_id":
                            trial_ids,
                            "dataset": [dataset.dataset_code] * len(epochs)
                        })

                        # do data hook
                        if self._data_hook:
                            X, y, meta, caches = self._data_hook(
                                X, y, meta, caches)
                        elif hasattr(dataset, 'data_hook'):
                            X, y, meta, caches = dataset.data_hook(
                                X, y, meta, caches)

                        # collecting data
                        pre_X = Xs.get(event_name)
                        if pre_X is not None:
                            Xs[event_name] = np.concatenate((pre_X, X), axis=0)
                        else:
                            Xs[event_name] = X

                        pre_y = ys.get(event_name)
                        if pre_y is not None:
                            ys[event_name] = np.concatenate((pre_y, y), axis=0)
                        else:
                            ys[event_name] = y

                        pre_meta = metas.get(event_name)
                        if pre_meta is not None:
                            metas[event_name] = pd.concat((pre_meta, meta),
                                                          axis=0,
                                                          ignore_index=True)
                        else:
                            metas[event_name] = meta
        return Xs, ys, metas
        label='blink_up',
        plot=False)
corrmap(icas=[temp_icas[0], ica],
        template=(0, 1),
        threshold=threshold_side,
        label='blink_side',
        plot=False)
# corrmap(icas=[temp_icas[1], ica],
#         template=(0, 2), threshold=0.80, label='blink_weird', plot=False)

###############################################################################
# 4) Create summary plots to show signal correction on main experimental
# condition

# create a-cue epochs
a_evs = events_from_annotations(raw, regexp='^fix')[0]
a_epo = Epochs(raw,
               a_evs,
               tmin=-2,
               tmax=2,
               reject_by_annotation=True,
               proj=False,
               preload=True)
a_epo.apply_baseline(baseline=(-0.3, -0.05))
a_evo = a_epo.average()

# loop over identified "bad" components
bad_components = []
for label in ica.labels_:
    bad_components.extend(ica.labels_[label])
Exemple #25
0
    def get_data(self, dataset, subjects=None, verbose=False):
        """Get data from dataset with selected subjects.
        
        Parameters
        ----------
        dataset : Dataset instance.
            Dataset.
        subjects : None|list of int, optional
            Selected subject ids, if None, use all subjects in dataset.
        verbose : bool, optional
            Print processing information.
        
        Returns
        -------
        Xs : dict
            An dict of selected events data X.
        ys : dict
            An dict of selected events label y.
        metas : dict
            An dict of selected events metainfo meta.
        """
        if not self.is_valid(dataset):
            raise TypeError(
                "Dataset {:s} is not valid for the current paradigm. Check your events and channels settings"
                .format(dataset.code))

        data = dataset.get_data(subjects)

        # # events, interval checking
        used_events, used_intervals = self._map_events_intervals(dataset)

        Xs = {}
        ys = {}
        metas = {}
        for subject, sessions in data.items():
            for session, runs in sessions.items():
                for run, raw in runs.items():
                    # do raw hook
                    caches = {}
                    if self._raw_hook:
                        raw, caches = self._raw_hook(raw, caches)

                    # pick selected channels by order
                    channels = dataset.channels if self.select_channels is None else self.select_channels
                    # picks = mne.pick_channels(raw.ch_names, channels, ordered=True)

                    picks = pick_channels(raw.ch_names, channels, ordered=True)

                    # find available events, first check stim_channels then annotations
                    stim_channels = mne.utils._get_stim_channel(
                        None, raw.info, raise_error=False)
                    if len(stim_channels) > 0:
                        events = mne.find_events(raw,
                                                 shortest_event=0,
                                                 initial_event=True)
                    else:
                        # convert event_id to its number type instead of default auto-renaming in 0.19.2
                        events, _ = mne.events_from_annotations(
                            raw, event_id=(lambda x: int(x)))

                    for event_name in used_events.keys():
                        # mne.pick_events returns any matching events in include
                        # only raise Runtime Error when nothing is found
                        # then we just skip this event
                        try:
                            selected_events = mne.pick_events(
                                events, include=used_events[event_name])
                        except RuntimeError:
                            continue

                        # transform Raw to Epochs

                        epochs = mne.Epochs(
                            raw,
                            selected_events,
                            event_id={event_name: used_events[event_name]},
                            event_repeated='drop',
                            tmin=used_intervals[event_name][0],
                            tmax=used_intervals[event_name][1] -
                            1. / raw.info['sfreq'],
                            picks=picks,
                            proj=False,
                            baseline=None,
                            preload=True)

                        # do epochs hook
                        if self._epochs_hook:
                            epochs, caches = self._epochs_hook(epochs, caches)

                        # FIXME: is this resample reasonable?
                        if self.srate:
                            # as MNE suggested, decimate after extract epochs
                            # low-pass raw object in raw_hook to prevent aliasing problem
                            epochs = epochs.resample(self.srate)
                            # epochs = epochs.decimate(dataset.srate//self.srate)

                        # retrieve X, y and meta
                        X = epochs.get_data()
                        y = epochs.events[:, -1]
                        meta = pd.DataFrame({
                            "subject": [subject] * len(epochs),
                            "session": [session] * len(epochs),
                            "run": [run] * len(epochs),
                        })

                        # do data hook
                        if self._data_hook:
                            X, y, meta, caches = self._data_hook(
                                X, y, meta, caches)

                        # collecting data
                        pre_X = Xs.get(event_name)
                        if pre_X is not None:
                            Xs[event_name] = np.concatenate((pre_X, X), axis=0)
                        else:
                            Xs[event_name] = X

                        pre_y = ys.get(event_name)
                        if pre_y is not None:
                            ys[event_name] = np.concatenate((pre_y, y), axis=0)
                        else:
                            ys[event_name] = y

                        pre_meta = metas.get(event_name)
                        if pre_meta is not None:
                            metas[event_name] = pd.concat((pre_meta, meta),
                                                          axis=0,
                                                          ignore_index=True)
                        else:
                            metas[event_name] = meta

        return Xs, ys, metas
Exemple #26
0
def test_events_from_annot_in_raw_objects():
    """Test basic functionality of events_fron_annot for raw objects."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    event_id = {
        'Auditory/Left': 1,
        'Auditory/Right': 2,
        'Visual/Left': 3,
        'Visual/Right': 4,
        'Visual/Smiley': 32,
        'Motor/Button': 5
    }
    event_map = {v: k for k, v in event_id.items()}
    annot = Annotations(onset=raw.times[events[:, 0] - raw.first_samp],
                        duration=np.zeros(len(events)),
                        description=[event_map[vv] for vv in events[:, 2]],
                        orig_time=None)
    raw.set_annotations(annot)

    events2, event_id2 = \
        events_from_annotations(raw, event_id=event_id, regexp=None)
    assert_array_equal(events, events2)
    assert_equal(event_id, event_id2)

    events3, event_id3 = \
        events_from_annotations(raw, event_id=None, regexp=None)

    assert_array_equal(events[:, 0], events3[:, 0])
    assert set(event_id.keys()) == set(event_id3.keys())

    # ensure that these actually got sorted properly
    expected_event_id = {
        desc: idx + 1 for idx, desc in enumerate(sorted(event_id.keys()))}
    assert event_id3 == expected_event_id

    first = np.unique(events3[:, 2])
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    first = np.unique(list(event_id3.values()))
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    events4, event_id4 =\
        events_from_annotations(raw, event_id=None, regexp='.*Left')

    expected_event_id4 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id4.keys(), expected_event_id4.keys())

    expected_events4 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events4[:, 0], events4[:, 0])

    events5, event_id5 = \
        events_from_annotations(raw, event_id=event_id, regexp='.*Left')

    expected_event_id5 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id5, expected_event_id5)

    expected_events5 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events5, events5)

    with pytest.raises(ValueError, match='not find any of the events'):
        events_from_annotations(raw, regexp='not_there')

    with pytest.raises(ValueError, match='Invalid type for event_id'):
        events_from_annotations(raw, event_id='wrong')

    # concat does not introduce BAD or EDGE
    raw_concat = concatenate_raws([raw.copy(), raw.copy()])
    _, event_id = events_from_annotations(raw_concat)
    assert isinstance(event_id, dict)
    assert len(event_id) > 0
    for kind in ('BAD', 'EDGE'):
        assert '%s boundary' % kind in raw_concat.annotations.description
        for key in event_id.keys():
            assert kind not in key

    # remove all events
    raw.set_annotations(None)
    events7, _ = events_from_annotations(raw)
    assert_array_equal(events7, np.empty((0, 3), dtype=int))
Exemple #27
0
def events(raw=None,
           merge=None,
           proj=False,
           name=None,
           bads=None,
           stim_channel=None,
           events=None,
           annotations=None,
           **kwargs):
    """
    Load events from a raw fiff file.

    Parameters
    ----------
    raw : str(path) | None | mne Raw
        The raw fiff file from which to extract events (if raw and events are
        both ``None``, a file dialog will be displayed to select a raw file).
    merge : int
        Merge steps occurring in neighboring samples. The integer value
        indicates over how many samples events should be merged, and the sign
        indicates in which direction they should be merged (negative means
        towards the earlier event, positive towards the later event). By
        default, this parameter is inferred from the data.
    proj : bool | str
        Path to the projections file that will be loaded with the raw file.
        ``'{raw}'`` will be expanded to the raw file's path minus extension.
        With ``proj=True``, ``'{raw}_*proj.fif'`` will be used,
        looking for any projection file starting with the raw file's name.
        If multiple files match the pattern, a ValueError will be raised.
    name : str | None
        A name for the Dataset. If ``None``, the raw filename will be used.
    bads : None | list
        Specify additional bad channels in the raw data file (these are added
        to the ones that are already defined in the raw file).
    stim_channel : None | string | list of string
        Name of the stim channel or all the stim channels
        affected by the trigger. If None, the config variables
        'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
        etc. are read. If these are not found, it will default to
        'STI 014'.
    events : None | str
        If events are stored in a fiff file separate from the Raw object, the
        path to the events file can be supplied here. The events in the Dataset
        will reflect the event sin the events file rather than the raw file.
    annotations : bool
        Generate events from annotations instead of the stim channel (by
        default, annotations are used when present).
    **
        Keyword arguments for loading the raw file (see
        :func:`mne.io.read_raw_kit` or :func:`mne.io.read_raw_kit`).

    Returns
    -------
    events : Dataset
        A Dataset with the following variables:
         - *i_start*: the index of the event in the raw file.
         - *trigger*: the event value.
        The Dataset's info dictionary contains the following values:
         - *raw*: the mne Raw object.

    """
    if (raw is None and events is None) or isinstance(raw, str):
        raw = mne_raw(raw, proj=proj, **kwargs)

    if bads is not None and raw is not None:
        raw.info['bads'].extend(bads)

    if name is None and raw is not None:
        raw_path = raw.filenames[0]
        if isinstance(raw_path, str):
            name = os.path.basename(raw_path)
        else:
            name = None

    if events is None:
        if annotations is None:
            regex = re.compile('(bad|edge)', re.IGNORECASE)
            index = [
                not regex.match(desc) for desc in raw.annotations.description
            ]
            if any(index):
                annotations = raw.annotations[index]
        if annotations:
            evts, _ = mne.events_from_annotations(raw, int)
        else:
            raw.load_data()
            if merge is None:
                evts = mne.find_stim_steps(raw,
                                           merge=-1,
                                           stim_channel=stim_channel)
                if len(evts) == 0:
                    evts = mne.find_stim_steps(raw,
                                               merge=0,
                                               stim_channel=stim_channel)
            else:
                evts = mne.find_stim_steps(raw,
                                           merge=merge,
                                           stim_channel=stim_channel)
            evts = evts[np.flatnonzero(evts[:, 2])]
    else:
        evts = mne.read_events(events)

    i_start = Var(evts[:, 0], name='i_start')
    trigger = Var(evts[:, 2], name='trigger')
    info = {'raw': raw}
    return Dataset((trigger, i_start), name, info=info)
Exemple #28
0
def test_cropped_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # Perform forward pass to determine how many outputs per input
    n_preds_per_input = get_output_shape(model, in_chans,
                                         input_window_samples)[2]

    train_set = create_from_X_y(X[:60],
                                y[:60],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[60:],
                                y[60:],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    train_split = predefined_split(valid_set)

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=torch.nn.functional.nll_loss,
        optimizer=optim.Adam,
        train_split=train_split,
        batch_size=32,
        callbacks=['accuracy'],
    )

    clf.fit(train_set, y=None, epochs=4)

    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array([1.455306, 1.455934, 1.210563, 1.065806]),
        rtol=1e-3,
        atol=1e-4,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array([2.547288, 1.51785, 1.394036, 1.064355]),
        rtol=1e-3,
        atol=1e-3,
    )
    np.testing.assert_allclose(
        clf.history[:, 'train_accuracy'],
        np.array([0.5, 0.5, 0.5, 0.533333]),
        rtol=1e-3,
        atol=1e-4,
    )
    np.testing.assert_allclose(
        clf.history[:, 'valid_accuracy'],
        np.array([0.533333, 0.466667, 0.533333, 0.5]),
        rtol=1e-3,
        atol=1e-4,
    )
Exemple #29
0
def find_false_spikes(path_to_eeg, path_tables):
    file = mne.io.read_raw_eeglab(path_to_eeg, preload=True)
    spikes_delays = mne.events_from_annotations(file)
    paths = [f for f in sorted(os.listdir(path_tables))]
    paths = paths[0:5]
    fin_list, llll, all_spikes = [], 0, 0
    for g, h in enumerate(paths):
        j, ind_1 = 0, 0
        path_to_table = path_tables + '/{0}'.format(paths[g])
        datafile = pd.read_csv(path_to_table)
        tms_sham_order = datafile['TMS'].tolist()
        delays_table = datafile['SND_START'].tolist()
        ind_stim = [
            i for i, e in enumerate(tms_sham_order)
            if e == 1 and math.isnan(delays_table[i]) == 0
        ]
        new_l1 = [delays_table[index] for index in ind_stim]
        min_del = min(([
            round(new_l1[index + 1] * 2000 - new_l1[index] * 2000)
            for index in range(len(new_l1) - 1)
        ]))
        f = []
        for i in range(len(tms_sham_order)):
            f.append(int(tms_sham_order[i]))
        number_spikes = f.count(1)
        all_spikes += number_spikes
        b = llll + number_spikes
        order_table, ind_st_sham = func_c(f)
        order_eeg, eeg_delays = func_a(spikes_delays, llll, b, min_del)
        fin_list_1 = []
        if len(order_eeg) < len(order_table):
            #and g < (len(paths) - 1):
            tau, _ = func_a(spikes_delays, b + 1,
                            b + 2 + len(order_table) - len(order_eeg), min_del)
            order_eeg.extend(tau)
        #elif len(order_eeg) < len(order_table) and g == (len(paths) - 1):
        #order_eeg.append(1)
        ind_f = [y for y, e in enumerate(order_table) if e == 2]
        for i in range(len(order_table)):
            if order_eeg[i] == 1 and order_table[i - j] == 1:
                fin_list_1.append(1)
            # check this
            elif order_eeg[i] == 2 and order_table[i - j] == 1 and i == 1:
                del fin_list_1[-1:]
                fin_list_1.append(3)
                j += 2
            # new condition for false spike in group
            elif order_eeg[i] == 1 and order_table[i - j] == 2:
                hh = ind_f.index(i - j)
                if (i - j) == order_table.index(2):
                    a3 = order_table[:i - j].count(1)
                    if a3 != 0:
                        del fin_list_1[-a3:]
                    fin_list_1.extend([3] + [1] * a3)
                    j += 1
                else:
                    a1, a2 = ind_f[hh - 1], ind_f[hh]
                    del fin_list_1[-(a2 - a1 - 1):]
                    fin_list_1.extend([3] + [1] * (a2 - a1 - 1))
                    j += 1
                # if all(i < 2 * min_del - 1000 for i in delays_table[a1 + 1:a2 - 1]):
                # fin_list_1.extend([3] + [1] * (a2 - a1 - 2))
                # j += 1
                # else:
                # print('false')
            elif order_eeg[i] == 2 and order_table[i - j] == 1 and (0 == i):
                j += 1
            elif ((order_eeg[i] == 2 and order_eeg[i - 1] == 1) and
                  (i != 0)) and ((order_table[i - j] == 1) and func_b(
                      order_eeg, order_table, i, i - j)) != 0:
                del fin_list_1[-1:]
                fin_list_1.append(3)
                j += 2
            # check number of stim between two sham, its important!
            elif ((order_eeg[i] == 2 and order_table[i - j] == 1) and
                  (i != 0)) and (order_table[i - j] == 1 and func_b(
                      order_eeg, order_table, i, i - j) == 0):
                j += 1
            # as a replacement for a global variable
            ind_1 = i - j
        llll = b
        # we should add s1 instead of s!
        # should correct this statement
        if fin_list_1.count(1) != order_table.count(1):
            s = order_table.count(1) - fin_list_1.count(1)
            s1 = order_table[ind_1:].count(1) - order_eeg[ind_1 + j:].count(1)
            fin_list_1.extend([1] * s)
            llll += s1
        fin_list.extend(fin_list_1)
    ind_false_spikes = [i for i, e in enumerate(fin_list) if e == 3]
    true_start_stim = []
    ppf = spikes_delays[0][:, 0][0:]
    # h = 0
    # for i in range(1, (len(ppf)), 2):
    # if h not in ind_false_spikes:
    # true_start_stim.append(ppf[i])
    # h += 1
    return ind_false_spikes, true_start_stim
Exemple #30
0
def _read_events(events_data, event_id, raw, bids_path=None):
    """Retrieve events (for use in *_events.tsv) from FIFF/array & Annotations.

    Parameters
    ----------
    events_data : path-like | np.ndarray | None
        If a string, a path to an events file. If an array, an MNE events array
        (shape n_events, 3). If None, events will be generated from
        ``raw.annotations``.
    event_id : dict | None
        The event id dict used to create a 'trial_type' column in events.tsv,
        mapping a description key to an integer-valued event code.
    raw : mne.io.Raw
        The data as MNE-Python Raw object.
    bids_path : BIDSPath | None
        Can be used to determine if the data is a resting-state or empty-room
        recording, and will suppress a warning about missing events in this
        case.

    Returns
    -------
    all_events : np.ndarray, shape = (n_events, 3)
        The first column contains the event time in samples and the third
        column contains the event id. The second column is ignored for now but
        typically contains the value of the trigger channel either immediately
        before the event or immediately after.
    all_dur : np.ndarray, shape (n_events,)
        The event durations in seconds.
    all_desc : dict
        A dictionary with the keys corresponding to the event descriptions and
        the values to the event IDs.

    """
    # get events from events_data
    if isinstance(events_data, np.ndarray):
        if events_data.ndim != 2:
            raise ValueError('Events must have two dimensions, '
                             f'found {events_data.ndim}')
        if events_data.shape[1] != 3:
            raise ValueError('Events must have second dimension of length 3, '
                             f'found {events_data.shape[1]}')
        events = events_data
    elif events_data is None:
        events = np.empty(shape=(0, 3), dtype=int)
    else:
        events = read_events(events_data).astype(int)

    if events.size > 0:
        # Only keep events for which we have an ID <> description mapping.
        ids_without_desc = set(events[:, 2]) - set(event_id.values())
        if ids_without_desc:
            raise ValueError(
                f'No description was specified for the following event(s): '
                f'{", ".join([str(x) for x in sorted(ids_without_desc)])}. '
                f'Please add them to the event_id dictionary, or drop them '
                f'from the events_data array.')
        del ids_without_desc
        mask = [e in list(event_id.values()) for e in events[:, 2]]
        events = events[mask]

        # Append events to raw.annotations. All event onsets are relative to
        # measurement beginning.
        id_to_desc_map = dict(zip(event_id.values(), event_id.keys()))
        # We don't pass `first_samp`, as set_annotations() below will take
        # care of this shift automatically.
        new_annotations = mne.annotations_from_events(
            events=events,
            sfreq=raw.info['sfreq'],
            event_desc=id_to_desc_map,
            orig_time=raw.annotations.orig_time)

        raw = raw.copy()  # Don't alter the original.
        annotations = raw.annotations.copy()

        # We use `+=` here because `Annotations.__iadd__()` does the right
        # thing and also performs a sanity check on `Annotations.orig_time`.
        annotations += new_annotations
        raw.set_annotations(annotations)
        del id_to_desc_map, annotations, new_annotations

    # Now convert the Annotations to events.
    all_events, all_desc = events_from_annotations(
        raw,
        event_id=event_id,
        regexp=None  # Include `BAD_` and `EDGE_` Annotations, too.
    )
    all_dur = raw.annotations.duration

    # Warn about missing events if not rest or empty-room data
    if ((all_events.size == 0 and bids_path.task is not None)
            and (not bids_path.task.startswith('rest')
                 or not (bids_path.subject == 'emptyroom'
                         and bids_path.task == 'noise'))):
        warn('No events found or provided. Please add annotations to the raw '
             'data, or provide the events_data and event_id parameters. For '
             'resting state data, BIDS recommends naming the task using '
             'labels beginning with "rest".')

    return all_events, all_dur, all_desc
Exemple #31
0
#
# We will work only with 5 stages: Wake (W), Stage 1, Stage 2, Stage 3/4, and
# REM sleep (R). To do so, we use the ``event_id`` parameter in
# :func:`mne.events_from_annotations` to select which events are we
# interested in and we associate an event identifier to each of them.

annotation_desc_2_event_id = {
    'Sleep stage W': 1,
    'Sleep stage 1': 2,
    'Sleep stage 2': 3,
    'Sleep stage 3': 4,
    'Sleep stage 4': 4,
    'Sleep stage R': 5
}

events_train, _ = mne.events_from_annotations(
    raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)

# create a new event_id that unifies stages 3 and 4
event_id = {
    'Sleep stage W': 1,
    'Sleep stage 1': 2,
    'Sleep stage 2': 3,
    'Sleep stage 3/4': 4,
    'Sleep stage R': 5
}

# plot events
mne.viz.plot_events(events_train,
                    event_id=event_id,
                    sfreq=raw_train.info['sfreq'])
import mne
from mne.event import define_target_events
from mne.channels import make_1020_channel_selections

print(__doc__)

###############################################################################
# Load EEGLAB example data (a small EEG dataset)
data_path = mne.datasets.testing.data_path()
fname = data_path + "/EEGLAB/test_raw.set"
montage = data_path + "/EEGLAB/test_chans.locs"

event_id = {"rt": 1, "square": 2}  # must be specified for str events
eog = {"FPz", "EOG1", "EOG2"}
raw = mne.io.read_raw_eeglab(fname, eog=eog, montage=montage)
events = mne.events_from_annotations(raw, event_id)[0]

###############################################################################
# Create Epochs

# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = .7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
                                       tmin=0., tmax=tmax, new_id=2)

epochs = mne.Epochs(raw, events=new_events, tmax=tmax + .1,
                    event_id={"square": 2})
Exemple #33
0
# Rapid Eye Movement sleep, movement (M), and Stage (?) for any none scored
# segment.
#
# We will work only with 5 stages: Wake (W), Stage 1, Stage 2, Stage 3/4, and
# REM sleep (R). To do so, we use the ``event_id`` parameter in
# :func:`mne.events_from_annotations` to select which events are we
# interested in and we associate an event identifier to each of them.

annotation_desc_2_event_id = {'Sleep stage W': 1,
                              'Sleep stage 1': 2,
                              'Sleep stage 2': 3,
                              'Sleep stage 3': 4,
                              'Sleep stage 4': 4,
                              'Sleep stage R': 5}

events_train, _ = mne.events_from_annotations(
    raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)

# create a new event_id that unifies stages 3 and 4
event_id = {'Sleep stage W': 1,
            'Sleep stage 1': 2,
            'Sleep stage 2': 3,
            'Sleep stage 3/4': 4,
            'Sleep stage R': 5}

# plot events
mne.viz.plot_events(events_train, event_id=event_id,
                    sfreq=raw_train.info['sfreq'])

# keep the color-code for further plotting
stage_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14]  # motor imagery: hands vs feet

raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])

# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))

# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')

events, _ = events_from_annotations(raw)

picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2

###############################################################################
# Classification with linear discrimant analysis

# Define a monte-carlo cross-validation generator (reduce variance):
def test_events_from_annot_in_raw_objects():
    """Test basic functionality of events_fron_annot for raw objects."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    event_id = {
        'Auditory/Left': 1,
        'Auditory/Right': 2,
        'Visual/Left': 3,
        'Visual/Right': 4,
        'Visual/Smiley': 32,
        'Motor/Button': 5
    }
    event_map = {v: k for k, v in event_id.items()}
    annot = Annotations(onset=raw.times[events[:, 0] - raw.first_samp],
                        duration=np.zeros(len(events)),
                        description=[event_map[vv] for vv in events[:, 2]],
                        orig_time=None)
    raw.set_annotations(annot)

    events2, event_id2 = \
        events_from_annotations(raw, event_id=event_id, regexp=None)
    assert_array_equal(events, events2)
    assert_equal(event_id, event_id2)

    events3, event_id3 = \
        events_from_annotations(raw, event_id=None, regexp=None)

    assert_array_equal(events[:, 0], events3[:, 0])
    assert set(event_id.keys()) == set(event_id3.keys())

    # ensure that these actually got sorted properly
    expected_event_id = {
        desc: idx + 1 for idx, desc in enumerate(sorted(event_id.keys()))}
    assert event_id3 == expected_event_id

    first = np.unique(events3[:, 2])
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    first = np.unique(list(event_id3.values()))
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    events4, event_id4 =\
        events_from_annotations(raw, event_id=None, regexp='.*Left')

    expected_event_id4 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id4.keys(), expected_event_id4.keys())

    expected_events4 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events4[:, 0], events4[:, 0])

    events5, event_id5 = \
        events_from_annotations(raw, event_id=event_id, regexp='.*Left')

    expected_event_id5 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id5, expected_event_id5)

    expected_events5 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events5, events5)

    with pytest.raises(ValueError, match='not find any of the events'):
        events_from_annotations(raw, regexp='not_there')

    with pytest.raises(ValueError, match='Invalid input event_id'):
        events_from_annotations(raw, event_id='wrong')

    # concat does not introduce BAD or EDGE
    raw_concat = concatenate_raws([raw.copy(), raw.copy()])
    _, event_id = events_from_annotations(raw_concat)
    assert isinstance(event_id, dict)
    assert len(event_id) > 0
    for kind in ('BAD', 'EDGE'):
        assert '%s boundary' % kind in raw_concat.annotations.description
        for key in event_id.keys():
            assert kind not in key

    # remove all events
    raw.set_annotations(None)
    events7, _ = events_from_annotations(raw)
    assert_array_equal(events7, np.empty((0, 3), dtype=int))
def test_events_from_annot_in_raw_objects():
    """Test basic functionality of events_fron_annot for raw objects."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    event_id = {
        'Auditory/Left': 1,
        'Auditory/Right': 2,
        'Visual/Left': 3,
        'Visual/Right': 4,
        'Visual/Smiley': 32,
        'Motor/Button': 5
    }
    event_map = {v: k for k, v in event_id.items()}
    annot = Annotations(onset=raw.times[events[:, 0] - raw.first_samp],
                        duration=np.zeros(len(events)),
                        description=[event_map[vv] for vv in events[:, 2]],
                        orig_time=None)
    raw.set_annotations(annot)

    events2, event_id2 = \
        events_from_annotations(raw, event_id=event_id, regexp=None)
    assert_array_equal(events, events2)
    assert_equal(event_id, event_id2)

    events3, event_id3 = \
        events_from_annotations(raw, event_id=None, regexp=None)

    assert_array_equal(events[:, 0], events3[:, 0])
    assert set(event_id.keys()) == set(event_id3.keys())

    first = np.unique(events3[:, 2])
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    first = np.unique(list(event_id3.values()))
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    events4, event_id4 =\
        events_from_annotations(raw, event_id=None, regexp='.*Left')

    expected_event_id4 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id4.keys(), expected_event_id4.keys())

    expected_events4 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events4[:, 0], events4[:, 0])

    events5, event_id5 = \
        events_from_annotations(raw, event_id=event_id, regexp='.*Left')

    expected_event_id5 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id5, expected_event_id5)

    expected_events5 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events5, events5)

    with pytest.raises(ValueError, match='not find any of the events'):
        events_from_annotations(raw, regexp='not_there')

    raw.set_annotations(None)
    events7, _ = events_from_annotations(raw)
    assert_array_equal(events7, np.empty((0, 3), dtype=int))
Exemple #37
0
    def process_raw(self, raw, dataset, return_epochs=False):
        # find the events, first check stim_channels then annotations
        stim_channels = mne.utils._get_stim_channel(
            None, raw.info, raise_error=False)
        if len(stim_channels) > 0:
            events = mne.find_events(raw, shortest_event=0, verbose=False)
        else:
            events, _ = mne.events_from_annotations(raw, verbose=False)

        channels = () if self.channels is None else self.channels

        # picks channels
        picks = mne.pick_types(raw.info, eeg=True, stim=False,
                               include=channels)

        # get event id
        event_id = self.used_events(dataset)

        # pick events, based on event_id
        try:
            events = mne.pick_events(events, include=list(event_id.values()))
        except RuntimeError:
            # skip raw if no event found
            return

        # get interval
        tmin = self.tmin + dataset.interval[0]
        if self.tmax is None:
            tmax = dataset.interval[1]
        else:
            tmax = self.tmax + dataset.interval[0]

        X = []
        for bandpass in self.filters:
            fmin, fmax = bandpass
            # filter data
            raw_f = raw.copy().filter(fmin, fmax, method='iir',
                                      picks=picks, verbose=False)
            # epoch data
            baseline = self.baseline
            if baseline is not None:
                baseline = (self.baseline[0] + dataset.interval[0],
                            self.baseline[1] + dataset.interval[0])
                bmin = baseline[0] if baseline[0] < tmin else tmin
                bmax = baseline[1] if baseline[1] > tmax else tmax
            else:
                bmin = tmin
                bmax = tmax
            epochs = mne.Epochs(raw_f, events, event_id=event_id,
                                tmin=bmin, tmax=bmax, proj=False,
                                baseline=baseline, preload=True,
                                verbose=False, picks=picks,
                                on_missing='ignore')
            if bmin < tmin or bmax > tmax:
                epochs.crop(tmin=tmin, tmax=tmax)
            if self.resample is not None:
                epochs = epochs.resample(self.resample)
            # rescale to work with uV
            if return_epochs:
                X.append(epochs)
            else:
                X.append(dataset.unit_factor * epochs.get_data())

        inv_events = {k: v for v, k in event_id.items()}
        labels = np.array([inv_events[e] for e in epochs.events[:, -1]])

        # if only one band, return a 3D array, otherwise return a 4D
        if len(self.filters) == 1:
            X = X[0]
        else:
            X = np.array(X).transpose((1, 2, 3, 0))

        metadata = pd.DataFrame(index=range(len(labels)))
        return X, labels, metadata
Exemple #38
0
    def process_raw(self, raw, dataset):
        """
        Process one raw data file.

        This function apply the preprocessing and eventual epoching on the
        individual run, and return the data, labels and a dataframe with
        metadata.

        metadata is a dataframe with as many row as the length of the data
        and labels.

        Parameters
        ----------

        raw: mne.Raw instance
            the raw EEG data.

        dataset : dataset instance
            The dataset corresponding to the raw file. mainly use to access
            dataset specific information.

        returns
        -------
        X : np.ndarray
            the data that will be used as features for the model

        labels: np.ndarray
            the labels for training / evaluating the model

        metadata: pd.DataFrame
            A dataframe containing the metadata

        """
        # find the events, first check stim_channels then annotations
        stim_channels = mne.utils._get_stim_channel(
            None, raw.info, raise_error=False)
        if len(stim_channels) > 0:
            events = mne.find_events(raw, shortest_event=0, verbose=False)
        else:
            events, _ = mne.events_from_annotations(raw, verbose=False)

        channels = () if self.channels is None else self.channels

        # picks channels
        picks = mne.pick_types(raw.info, eeg=True, stim=False,
                               include=channels)

        # get events id
        event_id = self.used_events(dataset)

        # pick events, based on event_id
        try:
            events = mne.pick_events(events, include=list(event_id.values()))
        except RuntimeError:
            # skip raw if no event found
            return

        # get interval
        tmin = self.tmin + dataset.interval[0]
        if self.tmax is None:
            tmax = dataset.interval[1]
        else:
            tmax = self.tmax + dataset.interval[0]

        X = []
        for bandpass in self.filters:
            fmin, fmax = bandpass
            # filter data
            raw_f = raw.copy().filter(fmin, fmax, method='iir',
                                      picks=picks, verbose=False)
            # epoch data
            epochs = mne.Epochs(raw_f, events, event_id=event_id,
                                tmin=tmin, tmax=tmax, proj=False,
                                baseline=None, preload=True,
                                verbose=False, picks=picks,
                                on_missing='ignore')
            if self.resample is not None:
                epochs = epochs.resample(self.resample)
            # rescale to work with uV
            X.append(dataset.unit_factor * epochs.get_data())

        inv_events = {k: v for v, k in event_id.items()}
        labels = np.array([inv_events[e] for e in epochs.events[:, -1]])

        # if only one band, return a 3D array, otherwise return a 4D
        if len(self.filters) == 1:
            X = X[0]
        else:
            X = np.array(X).transpose((1, 2, 3, 0))

        metadata = pd.DataFrame(index=range(len(labels)))
        return X, labels, metadata, epochs, raw_f
Exemple #39
0
def run_epochs(subject, session=None):
    """Extract epochs for one subject."""
    print("Processing subject: %s" % subject)

    raw_list = list()
    print("  Loading raw data")

    # Construct the search path for the data file. `sub` is mandatory
    subject_path = op.join('sub-{}'.format(subject))
    # `session` is optional
    if session is not None:
        subject_path = op.join(subject_path, 'ses-{}'.format(session))

    subject_path = op.join(subject_path, config.kind)

    for run_idx, run in enumerate(config.runs):

        bids_basename = make_bids_basename(subject=subject,
                                           session=session,
                                           task=config.task,
                                           acquisition=config.acq,
                                           run=run,
                                           processing=config.proc,
                                           recording=config.rec,
                                           space=config.space)
        # Prepare a name to save the data
        fpath_deriv = op.join(config.bids_root, 'derivatives',
                              config.PIPELINE_NAME, subject_path)

        raw_fname_in = \
                op.join(fpath_deriv, bids_basename + '_filt_raw.fif')

        print("Input: ", raw_fname_in)

        raw = mne.io.read_raw_fif(raw_fname_in, preload=True)
        raw_list.append(raw)

    print('  Concatenating runs')
    raw = mne.concatenate_raws(raw_list)

    events, event_id = mne.events_from_annotations(raw)
    if "eeg" in config.ch_types:
        raw.set_eeg_reference(projection=True)

    del raw_list

    meg = False
    if 'meg' in config.ch_types:
        meg = True
    elif 'grad' in config.ch_types:
        meg = 'grad'
    elif 'mag' in config.ch_types:
        meg = 'mag'

    eeg = 'eeg' in config.ch_types or config.kind == 'eeg'

    picks = mne.pick_types(raw.info,
                           meg=meg,
                           eeg=eeg,
                           stim=True,
                           eog=True,
                           exclude=())

    # Construct metadata from the epochs
    # Add here if you need to attach a pandas dataframe as metadata
    # to your epochs object:
    # https://martinos.org/mne/dev/auto_tutorials/plot_metadata_epochs.html

    # Epoch the data
    print('  Epoching')
    epochs = mne.Epochs(raw,
                        events,
                        event_id,
                        config.tmin,
                        config.tmax,
                        proj=True,
                        picks=picks,
                        baseline=config.baseline,
                        preload=False,
                        decim=config.decim,
                        reject=config.reject)

    print('  Writing epochs to disk')
    bids_basename = make_bids_basename(subject=subject,
                                       session=session,
                                       task=config.task,
                                       acquisition=config.acq,
                                       run=None,
                                       processing=config.proc,
                                       recording=config.rec,
                                       space=config.space)

    epochs_fname = \
        op.join(fpath_deriv, bids_basename + '-epo.fif')
    epochs.save(epochs_fname, overwrite=True)

    if config.plot:
        epochs.plot()
        epochs.plot_image(combine='gfp',
                          group_by='type',
                          sigma=2.,
                          cmap="YlGnBu_r")
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14]  # motor imagery: hands vs feet

raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])

# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))

# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')

events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))

picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2

###############################################################################
# Classification with linear discrimant analysis

# Define a monte-carlo cross-validation generator (reduce variance):
Exemple #41
0
# ica.exclude = [1, 2]  # details on how we picked these are omitted here
# ica.plot_properties(raw_data, picks=ica.exclude)

mag_channels = mne.pick_types(raw_data.info, eeg=True)
print(mag_channels)
raw_data.plot(duration=60,
              order=mag_channels,
              proj=False,
              n_channels=len(mag_channels),
              remove_dc=False)
"""bandpass filter을 통해 Data filtering"""
raw_highpass = raw_data.load_data().filter(l_freq=10, h_freq=60)
print(raw_highpass.info)
"""이벤트 찾"""
# annotation이 있는경우
events, event_id = mne.events_from_annotations(raw_data)
print(events, event_id)

# stim_cahennel이 있는 경우
#events = mne.find_events(raw_data, stim_channel=None)
# print(events[:5])  # show the first 5
"""
def add_arrows(axes):
    # add some arrows at 60 Hz and its harmonics
    for ax in axes:
        freqs = ax.lines[-1].get_xdata()
        psds = ax.lines[-1].get_ydata()

        idx = np.searchsorted(freqs, 60)
        # get ymax of a small region around the freq. of interest
        y = psds[(idx - 4):(idx + 5)].max()
from mne.time_frequency import tfr_multitaper
from mne.stats import permutation_cluster_1samp_test as pcluster_test
from mne.viz.utils import center_cmap


# load and preprocess data ####################################################
subject = 1  # use data from subject 1
runs = [6, 10, 14]  # use only hand and feet motor imagery runs

fnames = eegbci.load_data(subject, runs)
raws = [read_raw_edf(f, preload=True) for f in fnames]
raw = concatenate_raws(raws)

raw.rename_channels(lambda x: x.strip('.'))  # remove dots from channel names

events, _ = mne.events_from_annotations(raw)

picks = mne.pick_channels(raw.info["ch_names"], ["C3", "Cz", "C4"])

# epoch data ##################################################################
tmin, tmax = -1, 4  # define epochs around events (in s)
event_ids = dict(hands=2, feet=3)  # map event IDs to tasks

epochs = mne.Epochs(raw, events, event_ids, tmin - 0.5, tmax + 0.5,
                    picks=picks, baseline=None, preload=True)

# compute ERDS maps ###########################################################
freqs = np.arange(2, 36, 1)  # frequencies from 2-35Hz
n_cycles = freqs  # use constant t/f resolution
vmin, vmax = -1, 1.5  # set min and max ERDS values in plot
baseline = [-1, 0]  # baseline interval (in s)
Exemple #43
0
raw = mne.io.read_raw_brainvision(bids_fname, preload=True, verbose=False)
raw.info['line_freq'] = 50.

# Set montage
montage = mne.channels.make_standard_montage('easycap-M1')
raw.set_montage(montage, verbose=False)

# Set common average reference
raw.set_eeg_reference('average', projection=False, verbose=False)

# Apply bandpass filter
raw.filter(l_freq=0.1, h_freq=None, fir_design='firwin', verbose=False)

# Construct epochs
event_id = {'12hz': 255, '15hz': 155}
events, _ = mne.events_from_annotations(raw, verbose=False)
raw.info["events"] = events
tmin, tmax = -1., 20.  # in s
baseline = None
epochs = mne.Epochs(raw,
                    events=events,
                    event_id=[event_id['12hz'], event_id['15hz']],
                    tmin=tmin,
                    tmax=tmax,
                    baseline=baseline,
                    verbose=False)

###############################################################################
# Frequency analysis
# ------------------
# Now we compute the frequency spectrum of the EEG data.