Пример #1
0
def test_io_annotation_csv(dummy_annotation_csv_file,
                           dummy_broken_annotation_csv_file,
                           tmpdir_factory):
    """Test CSV input/output."""
    annot = read_annotations(str(dummy_annotation_csv_file))
    assert annot.orig_time == 1038942071.7201
    assert_array_equal(annot.onset, np.array([0., 9.], dtype=np.float32))
    assert_array_almost_equal(annot.duration, [1., 2.425])
    assert_array_equal(annot.description, ['AA', 'BB'])

    # Now test writing
    fname = str(tmpdir_factory.mktemp('data').join('annotations.csv'))
    annot.save(fname)
    annot2 = read_annotations(fname)
    _assert_annotations_equal(annot, annot2)

    # Now without an orig_time
    annot.orig_time = None
    annot.save(fname)
    annot2 = read_annotations(fname)
    _assert_annotations_equal(annot, annot2)

    # Test broken .csv that does not use timestamps
    with pytest.warns(RuntimeWarning, match='save your CSV as a TXT'):
        annot2 = read_annotations(str(dummy_broken_annotation_csv_file))
Пример #2
0
def test_read_vmrk_annotations():
    """Test load brainvision annotations."""
    sfreq = 1000.0
    annotations = read_annotations(vmrk_path, sfreq=sfreq)
    assert annotations.orig_time == 1384359243.794231
    expected = np.array([0, 486., 496., 1769., 1779., 3252., 3262., 4935.,
                         4945., 5999., 6619., 6629., 7629., 7699.]) / sfreq
    description = ['New Segment/',
                   'Stimulus/S253', 'Stimulus/S255', 'Stimulus/S254',
                   'Stimulus/S255', 'Stimulus/S254', 'Stimulus/S255',
                   'Stimulus/S253', 'Stimulus/S255', 'Response/R255',
                   'Stimulus/S254', 'Stimulus/S255',
                   'SyncStatus/Sync On', 'Optic/O  1']
    assert_array_almost_equal(annotations.onset,
                              expected, decimal=7)
    assert_array_equal(annotations.description, description)

    # Test automatic detection of sfreq from header file
    annotations_auto = read_annotations(vmrk_path)
    assert_array_equal(annotations.onset, annotations_auto.onset)

    # Test vmrk file without annotations
    # delete=False is for Windows compatibility
    with open(vmrk_path) as myfile:
        head = [next(myfile) for x in range(6)]
    with NamedTemporaryFile(mode='w+', suffix='.vmrk', delete=False) as temp:
        for item in head:
            temp.write(item)
        temp.seek(0)
        annotations = read_annotations(temp.name, sfreq=sfreq)
    try:
        temp.close()
        unlink(temp.name)
    except FileNotFoundError:
        pass
Пример #3
0
def test_read_brainstorm_annotations():
    """Test reading for Brainstorm events file."""
    fname = op.join(data_dir, 'events_sample_audvis_raw_bst.mat')
    annot = read_annotations(fname)
    assert len(annot) == 238
    assert annot.onset.min() > 40  # takes into account first_samp
    assert np.unique(annot.description).size == 5
Пример #4
0
def test_read_annotation_txt_orig_time(
        dummy_annotation_txt_file_with_orig_time):
    """Test TXT input/output."""
    annot = read_annotations(str(dummy_annotation_txt_file_with_orig_time))
    assert annot.orig_time == 1038942071.7201
    assert_array_equal(annot.onset, [3.14, 6.28])
    assert_array_equal(annot.duration, [42., 48])
    assert_array_equal(annot.description, ['AA', 'BB'])
Пример #5
0
def test_read_vmrk_annotations():
    """Test load brainvision annotations."""
    sfreq = 1000.0

    # Test vmrk file without annotations
    # delete=False is for Windows compatibility
    with open(vmrk_path) as myfile:
        head = [next(myfile) for x in range(6)]
    with NamedTemporaryFile(mode='w+', suffix='.vmrk', delete=False) as temp:
        for item in head:
            temp.write(item)
        temp.seek(0)
        read_annotations(temp.name, sfreq=sfreq)
    try:
        temp.close()
        unlink(temp.name)
    except FileNotFoundError:
        pass
Пример #6
0
def test_io_annotation_csv(dummy_annotation_csv_file, tmpdir_factory):
    """Test CSV input/output."""
    annot = read_annotations(str(dummy_annotation_csv_file))
    assert annot.orig_time == 1038942071.7201
    assert_array_equal(annot.onset, np.array([0., 9.], dtype=np.float32))
    assert_array_almost_equal(annot.duration, [1., 2.425])
    assert_array_equal(annot.description, ['AA', 'BB'])

    # Now test writing
    fname = str(tmpdir_factory.mktemp('data').join('annotations.csv'))
    annot.save(fname)
    annot2 = read_annotations(fname)
    _assert_annotations_equal(annot, annot2)

    # Now without an orig_time
    annot.orig_time = None
    annot.save(fname)
    annot2 = read_annotations(fname)
    _assert_annotations_equal(annot, annot2)
Пример #7
0
def test_io_annotation_txt(dummy_annotation_txt_file, tmpdir_factory):
    """Test TXT input/output."""
    annot = read_annotations(str(dummy_annotation_txt_file))
    assert annot.orig_time is None
    assert_array_equal(annot.onset, [3.14, 6.28])
    assert_array_equal(annot.duration, [42., 48])
    assert_array_equal(annot.description, ['AA', 'BB'])

    # Now test writing
    fname = str(tmpdir_factory.mktemp('data').join('annotations.txt'))
    annot.save(fname)
    annot2 = read_annotations(fname)
    _assert_annotations_equal(annot, annot2)

    # Now with an orig_time
    annot.orig_time = 1038942071.7201
    annot.save(fname)
    annot2 = read_annotations(fname)
    _assert_annotations_equal(annot, annot2)
Пример #8
0
def load_labelled_data(subjects,
                       recording=[1, 2],
                       path='/home/raphael_hotter/datasets',
                       filter=False):
    files = _fetch_data(subjects, path, recording)
    epochs = []
    for x in tqdm(files):
        # load the data
        edf_file = x[0]
        annot_file = x[1]
        raw = mne.io.read_raw_edf(edf_file, verbose='WARNING')
        annot_train = mne.read_annotations(annot_file)

        raw.set_annotations(annot_train, emit_warning=False)
        raw.set_channel_types(MAPPING)

        if filter:
            raw.load_data()
            raw.filter(None, 30., fir_design='firwin')  # low pass filter

        # extract epochs
        events_train, _ = mne.events_from_annotations(
            raw,
            event_id=annotation_desc_2_event_id,
            chunk_duration=30.,
            verbose='WARNING')

        tmax = 30. - 1. / raw.info['sfreq']  # tmax in included
        recording_epochs = mne.Epochs(raw=raw,
                                      events=events_train,
                                      event_id=event_id,
                                      tmin=0.,
                                      tmax=tmax,
                                      baseline=None,
                                      on_missing='ignore',
                                      verbose='WARNING')
        epochs.append(recording_epochs)
    print("concatenating")
    epochs = mne.concatenate_epochs(epochs)
    print("picking types")
    epochs.pick_types(eeg=True, verbose='WARNING')  # only keep EEG channels
    return epochs
Пример #9
0
def read_raw_and_annotation_data(raw_data_name,
                                 annotation_name,
                                 mapping,
                                 should_plot=False):
    '''Returns a raw object and the annotation object.
    Input:
        - raw_data_name: string of raw_data file. Name endedd with PSG.edf.
        - annotation_name: string of annotation file. Name ended with Hypnogram.edf.
        - should_plot: plot the data if set to true for debug purpose.
    '''
    raw_train = mne.io.read_raw_edf(raw_data_name)
    annot_train = mne.read_annotations(annotation_name)

    raw_train.set_annotations(annot_train, emit_warning=False)
    raw_train.set_channel_types(mapping)

    # plot some data
    if should_plot:
        raw_train.plot(duration=60, scalings='auto')
    return raw_train, annot_train
Пример #10
0
def test_read_ctf_annotations_smoke_test():
    """Test reading CTF marker file.

    `testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading
    of whatever is in the MarkerFile.mrk.
    """
    EXPECTED_ONSET = [
        0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667,
        0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667,
        1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333,
        2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57,
        3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667,
        4.57, 4.7125, 4.85583333, 4.99833333
    ]
    fname = op.join(ctf_dir, 'testdata_ctf_mc.ds')
    annot = read_annotations(fname)
    assert_allclose(annot.onset, EXPECTED_ONSET)

    raw = read_raw_ctf(fname)
    _assert_annotations_equal(raw.annotations, annot)
Пример #11
0
def make_epochs(fif_path, annot_path, beh_path, ep_path):
    raw = read_raw_fif(fif_path)
    if annot_path.exists():
        logger.info("Loading annotations from file.")
        raw.set_annotations(read_annotations(annot_path))
    events = find_events(raw, min_duration=2 / raw.info["sfreq"])

    if beh_path:
        beh_df = pd.read_csv(beh_path, sep="\t")
        metadata = get_events_metadata(events, beh_df)
    else:
        metadata = None

    epochs = Epochs(
        raw,
        events,
        event_id=cfg.EVENTS_ID,
        metadata=metadata,
        **cfg.epochs_config
    )
    epochs.save(ep_path, overwrite=True)
Пример #12
0
def edfplot(psg_name, ann_name):
    raw_train = mne.io.read_raw_edf(psg_name)
    annot_train = mne.read_annotations(ann_name)

    raw_train.set_annotations(annot_train, emit_warning=False)
    raw_train.set_channel_types(mapping)

    # plot some data
    raw_train.plot(duration=60, scalings='auto')

    annotation_desc_2_event_id = {'Sleep stage W': 1,
                                'Sleep stage 1': 2,
                                'Sleep stage 2': 3,
                                'Sleep stage 3': 4,
                                'Sleep stage 4': 4,
                                'Sleep stage R': 5}

    # keep last 30-min wake events before sleep and first 30-min wake events after
    # sleep and redefine annotations on raw data
    annot_train.crop(annot_train[1]['onset'] - 30 * 60,
                    annot_train[-2]['onset'] + 30 * 60)
    raw_train.set_annotations(annot_train, emit_warning=False)

    events_train, _ = mne.events_from_annotations(
        raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)

    # create a new event_id that unifies stages 3 and 4
    event_id = {'Sleep stage W': 1,
                'Sleep stage 1': 2,
                'Sleep stage 2': 3,
                'Sleep stage 3/4': 4,
                'Sleep stage R': 5}

    # plot events
    fig = mne.viz.plot_events(events_train, event_id=event_id,
                            sfreq=raw_train.info['sfreq'],
                            first_samp=events_train[0, 0])

    # keep the color-code for further plotting
    stage_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
Пример #13
0
def test_read_ctf_annotations():
    """Test reading CTF marker file."""
    EXPECTED_LATENCIES = np.array([
         5640,   7950,   9990,  12253,  14171,  16557,  18896,  20846,  # noqa
        22702,  24990,  26830,  28974,  30906,  33077,  34985,  36907,  # noqa
        38922,  40760,  42881,  45222,  47457,  49618,  51802,  54227,  # noqa
        56171,  58274,  60394,  62375,  64444,  66767,  68827,  71109,  # noqa
        73499,  75807,  78146,  80415,  82554,  84508,  86403,  88426,  # noqa
        90746,  92893,  94779,  96822,  98996,  99001, 100949, 103325,  # noqa
       105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663,  # noqa
       121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210,  # noqa
       139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646,  # noqa
       156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519,  # noqa
       174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330,  # noqa
       192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409,  # noqa
       209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305,  # noqa
       226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762,  # noqa
       243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354,  # noqa
       260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197,  # noqa
       278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183,  # noqa
       295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124,  # noqa
       312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959,  # noqa
       329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034,  # noqa
       344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603,  # noqa
       361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204,  # noqa
       378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785,  # noqa
       396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686,  # noqa
       413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215,  # noqa
       429278, 431668  # noqa
    ]) - 1  # Fieldtrip has 1 sample difference with MNE

    raw = RawArray(
        data=np.empty((1, 432000), dtype=np.float64),
        info=create_info(ch_names=1, sfreq=1200.0))
    raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date'])
    raw.set_annotations(read_annotations(somato_fname))

    events, _ = events_from_annotations(raw)
    latencies = np.sort(events[:, 0])
    assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6)
Пример #14
0
    def _load_raw(raw_fname, ann_fname, preload, load_eeg_only=True,
                  crop_wake_mins=False):
        ch_mapping = {
            'EOG horizontal': 'eog',
            'Resp oro-nasal': 'misc',
            'EMG submental': 'misc',
            'Temp rectal': 'misc',
            'Event marker': 'misc'
        }
        exclude = ch_mapping.keys() if load_eeg_only else ()

        raw = mne.io.read_raw_edf(raw_fname, preload=preload, exclude=exclude)
        annots = mne.read_annotations(ann_fname)
        raw.set_annotations(annots, emit_warning=False)

        if crop_wake_mins > 0:
            # Find first and last sleep stages
            mask = [
                x[-1] in ['1', '2', '3', '4', 'R'] for x in annots.description]
            sleep_event_inds = np.where(mask)[0]

            # Crop raw
            tmin = annots[int(sleep_event_inds[0])]['onset'] - crop_wake_mins * 60
            tmax = annots[int(sleep_event_inds[-1])]['onset'] + crop_wake_mins * 60
            raw.crop(tmin=tmin, tmax=tmax)

        # Rename EEG channels
        ch_names = {
            i: i.replace('EEG ', '') for i in raw.ch_names if 'EEG' in i}
        mne.rename_channels(raw.info, ch_names)

        if not load_eeg_only:
            raw.set_channel_types(ch_mapping)

        basename = os.path.basename(raw_fname)
        subj_nb = int(basename[3:5])
        sess_nb = int(basename[5])
        desc = pd.Series({'subject': subj_nb, 'recording': sess_nb}, name='')

        return raw, desc
def preprocess_single_sub(raw, subj_id):
    """Performs all neccessary preprocessing steps on one single subject."""


    # set the EEG Montage. We use 64 chans from the standard 10-05 system.
    montage = mne.channels.make_standard_montage("standard_1005")
    raw.set_montage(montage)

    # Apply high pass/low pass filter
    raw.filter(l_freq = 0.1, h_freq = 120) # using firwin
    raw.notch_filter(freqs=[50])  # bandstop AC

    # mark the bad channels
    raw.load_bad_channels(op.join(BAD_CH_PATH, subj_id + "-bad_ch.csv"))

    # add the bad segments to the annotations
    annots_path = op.join(BAD_SEG_PATH, subj_id + "-annot.csv")
    annots = mne.read_annotations(annots_path)
    raw.set_annotations(annots)

    # Load and apply ICA
    ica = load_ica(subj_id)
    raw = ica.apply(raw)

    # Interpolate bad channels
    raw = raw.interpolate_bads(reset_bads=True)

    # Pick relevant channels:
    # raw.pick_channels() # TODO: define pick channel set here. Do we even need this anymore?

    # Do the surface laplacian
    #raw.plot()
    #raw.plot_psd()
    # TODO: the surface laplacian has parameter lambda and stiffness. Check which fits best
    raw = mne.preprocessing.compute_current_source_density(raw)
    #raw.plot()
    #raw.plot_psd()

    return raw
def load_annotations(filepath):
    annotations = mne.read_annotations(filepath)

    annotation_event_id = {'Sleep stage W': 'W',
                           'Sleep stage 1': 'N1',
                           'Sleep stage 2': 'N2',
                           'Sleep stage 3': 'N3',
                           'Sleep stage 4': 'N3',
                           'Sleep stage R': 'R',
                           'Sleep stage ?': 'Sleep stage ?',
                           'Movement time': 'Movement time'}

    annotations_dict = dict()
    last = 0  # to remember the index of the sample of the last label
    for index, desc in enumerate(annotations.description):
        duration = int(
            annotations.duration[index] / 30)  # number of 30s epochs
        # for a label
        annotations_dict[(duration + last) * 3000] = annotation_event_id[desc]
        # duration * 3000 -> number of samples for the label
        last += duration
    return annotations_dict
def get_epochs_data(raw_fname, annot_fname):
    print("Extracting Epochs from: %s" % os.path.basename(raw_fname))
    raw = mne.io.read_raw_edf(raw_fname)
    annot = mne.read_annotations(annot_fname)
    raw.set_annotations(annot, emit_warning=False)
    raw.set_channel_types(mapping)
    annotation_desc_2_event_id = {
        'Sleep stage W': 1,
        'Sleep stage 1': 2,
        'Sleep stage 2': 3,
        'Sleep stage 3': 4,
        'Sleep stage 4': 4,
        'Sleep stage R': 5
    }

    events, _ = mne.events_from_annotations(
        raw, event_id=annotation_desc_2_event_id, chunk_duration=30.)

    # create a new event_id that unifies stages 3 and 4
    event_id = {
        'Sleep stage W': 1,
        'Sleep stage 1': 2,
        'Sleep stage 2': 3,
        'Sleep stage 3/4': 4,
        'Sleep stage R': 5
    }

    tmax = 30. - 1. / raw.info['sfreq']  # tmax in included
    picks = mne.pick_types(raw.info, eeg=True, eog=True)
    epochs = mne.Epochs(raw=raw,
                        events=events,
                        picks=picks,
                        preload=True,
                        event_id=event_id,
                        tmin=0.,
                        tmax=tmax,
                        baseline=None)
    return epochs.get_data(), epochs.events[:, 2] - 1
Пример #18
0
def get_data(file_1, file_2, data_path):

    EEG_path = data_path + '/' + file_1
    target_path = data_path + '/' + file_2
    #print(EEG_path)
    print('#########', EEG_path)
    print('#########', target_path)
    rawEEG = read_raw_edf(EEG_path)
    rawtarget = mne.read_annotations(target_path)
    data = rawEEG.to_data_frame().values.T  #所有的数据
    EEG_1 = data[0, :]
    EEG_2 = data[1, :]
    onset = rawtarget.onset  #每段数据的开始时间
    duration = rawtarget.duration  #每段数据持续的时间
    stage = rawtarget.description  #每段数据对应的睡眠阶段
    print(type(stage))
    print(stage[1])
    stage_code = []
    for i in stage:
        stage_code.append(stages[i])
    stage_code = np.array(stage_code)

    return EEG_1, EEG_2, onset, duration, stage_code
def mark_bads(raw, subj_id, sensor_map=False,
                       block=True, **plot_kwargs):

    bad_ch_path = op.join(BAD_CH_PATH, subj_id + "-bad_ch.csv")
    bad_seg_path = op.join(BAD_SEG_PATH, subj_id + "-annot.csv")


    if op.isfile(bad_ch_path):
        print("Loading preexisting marked channels\n"
              "Additionally marked channels will be added to file.")
        raw.load_bad_channels(bad_ch_path)

    if op.isfile(bad_seg_path):
        print("Loading preexisting marked segments\n"
              "Additionally marked segments will be added to file.")
        annots = mne.read_annotations(bad_seg_path)
        annots._orig_time = None
        raw.set_annotations(annots)

    # set the EEG Montage. We use 64 chans from the standard 10-05 system.
    montage = mne.channels.make_standard_montage("standard_1005")
    raw.set_montage(montage)

    # Define Template Annotation labels if they're not existing
    if len(raw.annotations) == 0:
        annot_buffer = np.zeros(len(ANNOTATION_TYPES))
        raw.set_annotations(mne.Annotations(annot_buffer, annot_buffer,
                                            ANNOTATION_TYPES))

    print("Plotting data. Click on channels to mark them as bad. "
          "Type 'a' to enter Annotations mode and mark bad segments.\n"
          "Close the plot to carry on with preprocessing.")

    # plot the data
    if sensor_map:
        raw.plot_sensors(kind='3d', ch_type='eeg', ch_groups='position')
    return raw.plot(block=block, **plot_kwargs)
Пример #20
0
def load_data():
    fld = Path(__file__).parent.parent / "daten"

    # Import the BrainVision data into an MNE Raw object
    raw = mne.io.read_raw_brainvision(fld / "00_rest_pre.vhdr", preload=True)

    # Read in the event information as MNE annotations
    annot = mne.read_annotations(fld / "00_rest_pre.vmrk")

    # Add the annotations to our raw object so we can use them with the data
    raw.set_annotations(annot)

    # Reconstruct the original events from our Raw object
    events, event_ids = mne.events_from_annotations(raw)
    event_labels = {v: k for k, v in event_ids.items()}
    for event in events:
        print("Sample {0:<10} with {1}".format(event[0],
                                               event_labels[event[-1]]))
    data = raw.get_data()
    channel_labels = raw.ch_names
    print(channel_labels)
    print(data.shape)

    return raw
Пример #21
0
###############################################################################
# Step 2: Formatting as BIDS
# --------------------------
#
# Let's start by formatting a single subject. We are reading the data using
# MNE-Python's io module and the `read_raw_edf` function. Note that we must
# use `preload=False`, the default in MNE-Python. It prevents the data from
# being loaded and modified when converting to BIDS.
edf_path = eegbci.load_data(subject=1, runs=2)[0]
raw = mne.io.read_raw_edf(edf_path, preload=False, stim_channel=None)

###############################################################################
# The annotations stored in the file must be read in separately and converted
# into a 2D numpy array of events that is compatible with MNE.
annot = mne.read_annotations(edf_path)
raw.set_annotations(annot)
events, event_id = mne.events_from_annotations(raw)

print(raw)

###############################################################################
# With this step, we have everything to start a new BIDS directory using
# our data. To do that, we can use the function `write_raw_bids`
# Generally, `write_raw_bids` tries to extract as much
# meta data as possible from the raw data and then formats it in a BIDS
# compatible way. `write_raw_bids` takes a bunch of inputs, most of which are
# however optional. The required inputs are:
#
# * raw
# * bids_basename
    def get_data(self):
        """
        Create iterator fetching data ('EEG Fpz-Cz' and 'EEG Pz-Oz'  time-series).

        :return iterator generating dictionaries with the following data:
            - PSG_name: string, name of the PSG file
            - hypnogram_name: string, name of the hypnogram file
            - id: int, patient's id
            - night: int
            - ann_id: int
            - data: array with shape (n_epochs, 2, n_time_steps), epochs of 30s extracted from EEG Fpz-Cz EEG Pz-Oz
            - times: array with shape (n_epochs,), starting time of each epoch
            - labels: array with shape (n_epochs,), labels of the epochs
        """
        annotation_desc_2_event_id = {
            'Sleep stage W': 1,
            'Sleep stage 1': 2,
            'Sleep stage 2': 3,
            'Sleep stage 3': 4,
            'Sleep stage 4': 4,
            'Sleep stage R': 5
        }

        # create a new event_id that unifies stages 3 and 4
        event_id = {
            'Sleep stage W': 1,
            'Sleep stage 1': 2,
            'Sleep stage 2': 3,
            'Sleep stage 3/4': 4,
            'Sleep stage R': 5
        }

        for patient in range(self.nb_patients):
            raw_file = mne.io.read_raw_edf(
                os.path.join(self.path, self.files[2 * patient]))
            annotations = mne.read_annotations(
                os.path.join(self.path, self.files[2 * patient + 1]))
            raw_data = raw_file.get_data()

            raw_file.set_annotations(annotations, emit_warning=False)

            annotations.crop(annotations[1]['onset'] - 30 * 60,
                             annotations[-2]['onset'] + 30 * 60)
            raw_file.set_annotations(annotations, emit_warning=False)

            a, _ = mne.events_from_annotations(
                raw_file,
                event_id=annotation_desc_2_event_id,
                chunk_duration=30.)

            tmax = 30. - 1. / raw_file.info['sfreq']  # tmax in included

            epochs = mne.Epochs(raw=raw_file,
                                events=a,
                                event_id=event_id,
                                tmin=0.,
                                tmax=tmax,
                                baseline=None)

            # build dictionary
            resu = dict()
            resu['PSG_name'] = self.files[2 * patient]
            resu['hypnogram_name'] = self.files[2 * patient + 1]
            resu['id'] = int(self.files[2 * patient][3:5])
            resu['night'] = int(self.files[2 * patient][5])
            resu['ann_id'] = int(self.files[2 * patient][7])
            resu['data'] = epochs.get_data(picks=['EEG Fpz-Cz', 'EEG Pz-Oz'])
            resu['times'] = epochs.events[:, 0]
            resu['labels'] = epochs.events[:, 2]

            yield resu
Пример #23
0
def loaddata_sleep_edf(opt,
                       filedir,
                       filenum,
                       signal_name,
                       BID='median',
                       filter=True):
    filenames = os.listdir(filedir)
    for filename in filenames:
        if str(filenum) in filename and 'Hypnogram' in filename:
            f_stage_name = filename
        if str(filenum) in filename and 'PSG' in filename:
            f_signal_name = filename
    # print(f_stage_name)

    raw_data = mne.io.read_raw_edf(os.path.join(filedir, f_signal_name),
                                   preload=True)
    raw_annot = mne.read_annotations(os.path.join(filedir, f_stage_name))
    eeg = raw_data.pick_channels([signal_name]).to_data_frame().values.T
    eeg = eeg.reshape(-1)

    raw_data.set_annotations(raw_annot, emit_warning=False)
    event_id = {
        'Sleep stage 4': 0,
        'Sleep stage 3': 0,
        'Sleep stage 2': 1,
        'Sleep stage 1': 2,
        'Sleep stage R': 3,
        'Sleep stage W': 4,
        'Sleep stage ?': 5,
        'Movement time': 5
    }
    events, _ = mne.events_from_annotations(raw_data,
                                            event_id=event_id,
                                            chunk_duration=30.)

    stages = []
    signals = []
    for i in range(len(events) - 1):
        stages.append(events[i][2])
        signals.append(eeg[events[i][0]:events[i][0] + 3000])
    stages = np.array(stages)
    signals = np.array(signals)
    if BID == 'median':
        signals = signals * 13 / np.median(np.abs(signals))

    # #select sleep time
    if opt.select_sleep_time:
        if 'SC' in f_signal_name:
            signals = signals[
                np.clip(int(raw_annot[0]['duration']) // 30 -
                        60, 0, 9999999):int(raw_annot[-2]['onset']) // 30 + 60]
            stages = stages[
                np.clip(int(raw_annot[0]['duration']) // 30 -
                        60, 0, 9999999):int(raw_annot[-2]['onset']) // 30 + 60]

    stages_copy = stages.copy()
    cnt = 0
    for i in range(len(stages_copy)):
        if stages_copy[i] == 5:
            signals = np.delete(signals, i - cnt, axis=0)
            stages = np.delete(stages, i - cnt, axis=0)
            cnt += 1
    print('shape:', signals.shape, stages.shape)

    return signals.astype(np.int16), stages.astype(np.int16)
Пример #24
0
def test_crop():
    """Test cropping with annotations."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    onset = events[events[:, 2] == 1, 0] / raw.info['sfreq']
    duration = np.full_like(onset, 0.5)
    description = ['bad %d' % k for k in range(len(onset))]
    annot = mne.Annotations(onset,
                            duration,
                            description,
                            orig_time=raw.info['meas_date'])
    raw.set_annotations(annot)

    split_time = raw.times[-1] / 2. + 2.
    split_idx = len(onset) // 2 + 1
    raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq'])
    assert_array_equal(raw_cropped_left.annotations.description,
                       raw.annotations.description[:split_idx])
    assert_allclose(raw_cropped_left.annotations.duration,
                    raw.annotations.duration[:split_idx])
    assert_allclose(raw_cropped_left.annotations.onset,
                    raw.annotations.onset[:split_idx])
    raw_cropped_right = raw.copy().crop(split_time, None)
    assert_array_equal(raw_cropped_right.annotations.description,
                       raw.annotations.description[split_idx:])
    assert_allclose(raw_cropped_right.annotations.duration,
                    raw.annotations.duration[split_idx:])
    assert_allclose(raw_cropped_right.annotations.onset,
                    raw.annotations.onset[split_idx:])
    raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right],
                                      verbose='debug')
    assert_allclose(raw_concat.times, raw.times)
    assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20)
    assert_and_remove_boundary_annot(raw_concat)
    # Ensure we annotations survive round-trip crop->concat
    assert_array_equal(raw_concat.annotations.description,
                       raw.annotations.description)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(raw_concat.annotations, attr),
                        getattr(raw.annotations, attr),
                        err_msg='Failed for %s:' % (attr, ))

    raw.set_annotations(None)  # undo

    # Test concatenating annotations with and without orig_time.
    raw2 = raw.copy()
    raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date']))
    raw2.set_annotations(Annotations([2.], [3], 'BAD', None))
    expected_onset = [45., 2. + raw._last_time]
    raw = concatenate_raws([raw, raw2])
    assert_and_remove_boundary_annot(raw)
    assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2)

    # Test IO
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test-annot.fif')
    raw.annotations.save(fname)
    annot_read = read_annotations(fname)
    for attr in ('onset', 'duration', 'orig_time'):
        assert_allclose(getattr(annot_read, attr),
                        getattr(raw.annotations, attr))
    assert_array_equal(annot_read.description, raw.annotations.description)
    annot = Annotations((), (), ())
    annot.save(fname)
    pytest.raises(IOError, read_annotations, fif_fname)  # none in old raw
    annot = read_annotations(fname)
    assert isinstance(annot, Annotations)
    assert len(annot) == 0
    # Test that empty annotations can be saved with an object
    fname = op.join(tempdir, 'test_raw.fif')
    raw.set_annotations(annot)
    raw.save(fname)
    raw_read = read_raw_fif(fname)
    assert isinstance(raw_read.annotations, Annotations)
    assert len(raw_read.annotations) == 0
    raw.set_annotations(None)
    raw.save(fname, overwrite=True)
    raw_read = read_raw_fif(fname)
    assert raw_read.annotations is not None  # XXX to be fixed in #5416
    assert len(raw_read.annotations.onset) == 0  # XXX to be fixed in #5416
Пример #25
0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

ALICE, BOB = 0, 1

[alice_files, bob_files] = fetch_data(subjects=[ALICE, BOB], recording=[1])

mapping = {
    'EOG horizontal': 'eog',
    'Resp oro-nasal': 'resp',
    'EMG submental': 'emg',
    'Temp rectal': 'misc',
    'Event marker': 'misc'
}

raw_train = mne.io.read_raw_edf(alice_files[0])
annot_train = mne.read_annotations(alice_files[1])

raw_train.set_annotations(annot_train, emit_warning=False)
raw_train.set_channel_types(mapping)

# plot some data
# scalings were chosen manually to allow for simultaneous visualization of
# different channel types in this specific dataset
raw_train.plot(start=60,
               duration=60,
               scalings=dict(eeg=1e-4, resp=1e3, eog=1e-4, emg=1e-7,
                             misc=1e-1))

##############################################################################
# Extract 30s events from annotations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tasks=['AVLearn','AVLearn']
days=[100,200]

for subject_id in Ids:
    
    subject = group_name+"%d" % subject_id
    print("processing subject: %s" % subject)
    
    for task,day in zip(tasks,days): 

        fname=op.join(MEG_data_path,subject,task+'_%d'%(day+subject_id)+'_tsss_mc.fif')
        RAW=mne.io.read_raw_fif(fname,preload=True)
        
        # noisy data segments related to movements should be manually annotated
        if op.isfile(fname.replace("tsss_mc", "annot")):
            RAW.set_annotations(mne.read_annotations(fname.replace("tsss_mc", "annot")))
            print("Annotion loaded!")
        
        RAW.filter(l_freq=1, h_freq=40.0,fir_design='firwin',n_jobs=-1)  # band-pass filter data      
        
        #cut the continuous MEG data info 1s epoch
        epochs=mne.Epochs(RAW, events=mne.make_fixed_length_events(RAW),tmin=0, tmax=1, baseline=None,reject=None,picks=mne.pick_types(RAW.info))
        
        ICA_reject_threshold = dict(grad=1500e-13, mag=4.00e-12)
        
        # after excluding the bad move artifacts, less than 5% of the data are defined as bad to select an threshold for ICA
        while mne.Epochs(RAW, events=mne.make_fixed_length_events(RAW), tmin=0, tmax=1, baseline=None,reject=ICA_reject_threshold).drop_bad().drop_log_stats(ignore=('BAD_move','BAD_ACQ_SKIP')) >=5:        
            ICA_reject_threshold['mag']=ICA_reject_threshold['mag']+0.25e-12
            
        print(ICA_reject_threshold)
        # see which channels are noisy
Пример #27
0
sleep_stages = {
    'Sleep stage W' : 0,
    'Sleep stage 1' : 1,
    'Sleep stage 2' : 2,
    'Sleep stage 3' : 3,
    'Sleep stage 4' : 3,
    'Sleep stage R' : 4,
    'Sleep stage ?' : 5
}

for no_subject in subjects:
    # Returns RawGDF and RawEDF
    raw_data = mne.io.read_raw_edf(files[no_subject][0], preload=True)
    # Returns a mne.Annotations object
    annot_data = mne.read_annotations(files[no_subject][1])

    raw_data.set_annotations(annot_data, emit_warning=False)
    raw_data.set_channel_types(mapping)

    df = pd.DataFrame(np.array(raw_data._data).transpose())

    # # keep only the EEG signals data
    subject_data = df.drop([2,3,4,5,6],axis=1)

    nb_entries = subject_data.shape[0]

    classifications = ['Sleep stage W']*nb_entries
    
    # add the classification for every row
    for i, sleep_stage in enumerate(annot_data.description):
Пример #28
0
                                      exclude = (), preload = False, verbose=None)
        # read raw data from file each channel seperatly for flexibility later on
        sampling_rate = data.info['sfreq']
        times = data[0][1] * 100

        raw_ch_fpz = pd.DataFrame(data[0][0].T, columns=['raw'])
        raw_ch_pz = pd.DataFrame(data[1][0].T, columns=['raw'])
        raw_ch_eog = pd.DataFrame(data[2][0].T, columns=['raw'])

        data.set_channel_types(mapping)
        data_orig_time = data.annotations.orig_time

        # find and read annotation file
        for fname in os.listdir(data_dir):
            if (keyword in fname) and (fn[:6] in fname):
                annot = mn.read_annotations(os.path.join(data_dir, fname))
        # generate lables and remove indices
        remove_idx = []
        labels = []
        label_idx = []
        for i in range(len(annot)):
            onset = annot[i]["onset"]
            duration = annot[i]["duration"]
            label = annot[i]["description"]
            label = ann2label[label]
            # detecting data which is wrongly or unlabed
            if label != UNKNOWN:
                if duration % EPOCH_SEC_SIZE != 0:
                    raise Exception("something wrong with epoch length")
                duration_epoch = int(duration / EPOCH_SEC_SIZE)
                label_epoch = np.ones(int(
Пример #29
0
def test_automatic_vmrk_sfreq_recovery():
    """Test proper sfreq inference by checking the onsets."""
    assert_array_equal(read_annotations(vmrk_path, sfreq='auto'),
                       read_annotations(vmrk_path, sfreq=1000.0))
Пример #30
0
    "ATT_24", "ATT_25", "ATT_26", "ATT_27", "ATT_28", "ATT_29"
]
subjs = ["ATT_10"]
runs = [str(x + 1) for x in range(5)]
#runs = ["2"]
filelist = listdir(annot_dir)

for sub in subjs:
    for run in runs:
        raw = mne.io.Raw("{dir}nc_{sub}_{run}-raw.fif".format(dir=proc_dir,
                                                              sub=sub,
                                                              run=run))
        if "nc_{sub}_{run}-annot.fif".format(sub=sub, run=run) in filelist:
            print("Applying annotations...")
            annot = mne.read_annotations(
                "{dir}nc_{sub}_{run}-annot.fif".format(dir=annot_dir,
                                                       sub=sub,
                                                       run=run))
            raw.set_annotations(annot)
        bad_chan_txt = "nc_{sub}_{run}-badchans.txt".format(sub=sub, run=run)
        if bad_chan_txt in filelist:
            print("Applying bad channels...")
            with open(annot_dir + bad_chan_txt, "r") as f:
                bad_chans = f.readlines()
            bad_chans = [x.strip() for x in bad_chans]
            raw.info["bads"] = bad_chans
        raw.info["bads"] += ["A51", "A188", "A71"]  # bad data channels
        raw.info["bads"] += ["MRyA", "MRyaA"]  # bad reference channels
        raw.save("{dir}nc_{sub}_{run}_hand-raw.fif".format(dir=proc_dir,
                                                           sub=sub,
                                                           run=run),
                 overwrite=True)
Пример #31
0
# later_annot WILL be changed, because we're modifying the first element of
# later_annot.onset directly:
later_annot.onset[0] = 99

# later_annot WILL NOT be changed, because later_annot[0] returns a copy
# before the 'onset' field is changed:
later_annot[0]['onset'] = 77

print(later_annot[0]['onset'])

###############################################################################
# Reading and writing Annotations to/from a file
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# `~mne.Annotations` objects have a :meth:`~mne.Annotations.save` method
# which can write :file:`.fif`, :file:`.csv`, and :file:`.txt` formats (the
# format to write is inferred from the file extension in the filename you
# provide). There is a corresponding :func:`~mne.read_annotations` function to
# load them from disk:

raw.annotations.save('saved-annotations.csv')
annot_from_file = mne.read_annotations('saved-annotations.csv')
print(annot_from_file)

###############################################################################
# .. LINKS
#
# .. _`POSIX timestamp`: https://en.wikipedia.org/wiki/Unix_time
# .. _`ISO 8601`: https://en.wikipedia.org/wiki/ISO_8601
def sleep_physionet_converter(src_path, trg_path, duration=30):
    # Physionet Sleep Dataset Converter (Sleep-EDF expanded-1.0.0)
    # We used EEG Fpz-Cz channels
    # * Input  : Physionet Sleep Dataset (.edf)
    # * Output : Converted Dataset (.npy)

    psg_fnames = glob.glob(os.path.join(src_path, '*PSG.edf'))
    ann_fnames = glob.glob(os.path.join(src_path, '*Hypnogram.edf'))
    psg_fnames.sort()
    ann_fnames.sort()

    annotation_desc_2_event_id = {
        'Sleep stage W': 0,
        'Sleep stage 1': 1,
        'Sleep stage 2': 2,
        'Sleep stage 3': 3,
        'Sleep stage 4': 3,
        'Sleep stage R': 4
    }

    for psg_fname, ann_fname in zip(psg_fnames, ann_fnames):
        total_x, total_y = [], []

        raw = mne.io.read_raw_edf(psg_fname, preload=True)
        ann = mne.read_annotations(ann_fname)
        raw.set_annotations(ann, emit_warning=True)
        raw.set_channel_types(
            mapping={
                'EOG horizontal': 'eog',
                'Resp oro-nasal': 'misc',
                'EMG submental': 'misc',
                'Temp rectal': 'misc',
                'Event marker': 'misc'
            })

        raw.pick_channels(
            ch_names=['EEG Fpz-Cz', 'EOG horizontal', 'EMG submental'])
        event, _ = mne.events_from_annotations(
            raw=raw,
            event_id=annotation_desc_2_event_id,
            chunk_duration=duration)

        t_max = 30. - 1. / raw.info['sfreq']  # t_max in included
        try:
            epochs = mne.Epochs(raw=raw,
                                events=event,
                                event_id=event_id,
                                tmin=0.,
                                tmax=t_max,
                                baseline=None)
        except ValueError:
            continue

        for epoch, event in zip(epochs, epochs.events):
            total_x.append(epoch)
            total_y.append(event[-1])

        total_x = np.array(total_x)
        total_y = np.array(total_y)

        # Saving Numpy Array
        name = os.path.basename(psg_fname).split('-')[0].lower()
        np_path = os.path.join(trg_path, name)
        np.savez(np_path, x=total_x, y=total_y)
Пример #33
0
        for Complex Physiologic Signals. Circulation 101(23):e215-e2203

"""

import numpy as np
import matplotlib.pyplot as plt
import os

import mne
from mne.time_frequency import psd_welch

# %% download the data and create file path variables

# create a folder for the sleep data
path_sleep_data = mne.datasets.sleep_physionet.age.data_path()
if not os.path.isdir(path_sleep_data):
    os.mkdir(path_sleep_data)

link = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-cassette/'
print(
    f'\nPlease download the first two files from\n\t{link}\n to the folder\n\t{path_sleep_data}'
)

fp_raw = os.path.join(path_sleep_data, 'SC4001E0-PSG.edf')
fp_annot = os.path.join(path_sleep_data, 'SC4001EC-Hyponogram.edf')

# %% load the data

raw = mne.io.read_raw_edf(fp_raw)
annot = mne.read_annotations(fp_annot)
Пример #34
0
#
# Read the PSG data and Hypnograms to create a raw object
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

ALICE, BOB = 0, 1

[alice_files, bob_files] = fetch_data(subjects=[ALICE, BOB], recording=[1])

mapping = {'EOG horizontal': 'eog',
           'Resp oro-nasal': 'misc',
           'EMG submental': 'misc',
           'Temp rectal': 'misc',
           'Event marker': 'misc'}

raw_train = mne.io.read_raw_edf(alice_files[0])
annot_train = mne.read_annotations(alice_files[1])

raw_train.set_annotations(annot_train, emit_warning=False)
raw_train.set_channel_types(mapping)

# plot some data
raw_train.plot(duration=60, scalings='auto')

##############################################################################
# Extract 30s events from annotations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The Sleep Physionet dataset is annotated using `8 labels <physionet_labels>`:
# Wake (W), Stage 1, Stage 2, Stage 3, Stage 4 corresponding to the range from
# light sleep to deep sleep, REM sleep (R) where REM is the abbreviation for
# Rapid Eye Movement sleep, movement (M), and Stage (?) for any none scored
ann_fnames = []

for i in range(len(psg_files)):
    psg_fnames.append(
        'D:/Project/Dataset/Sleep EDF Database Expanded/sleep-cassette/' +
        psg_files[i] + '.edf')
    ann_fnames.append(
        'D:/Project/Dataset/Sleep EDF Database Expanded/sleep-cassette/' +
        ann_files[i] + '.edf')

for i in range(len(psg_fnames)):
    raw = read_raw_edf(psg_fnames[i], preload=True, stim_channel=None)
    sampling_rate = raw.info['sfreq']
    raw_ch_df = raw.to_data_frame(scaling_time=sampling_rate)[select_chs]
    raw_ch_df.set_index(np.arange(len(raw_ch_df)))
    ann = read_annotations(ann_fnames[i])
    #Generate label and remove indices
    remove_idx = []
    labels = []
    label_idx = []
    for x in range(len(ann.description)):
        onset_sec = ann.onset[x]
        duration_sec = ann.duration[x]
        ann_str = ann.description[x]
        label = ann2label[ann_str]
        if label != UNKNOWN:
            if duration_sec % EPOCH_SEC_SIZE != 0:
                raise Exception("Something wrong")
            duration_epoch = int(duration_sec / EPOCH_SEC_SIZE)
            label_epoch = np.ones(duration_epoch, dtype=np.int) * label
            labels.append(label_epoch)
Пример #36
0
 
 # Initialize path and read in data to MNE
 print("\n\n=== Converting '{0}'... ===\n".format(filepath))
 bids_path = BIDSPath(subject=study_id, task=taskname, root=bids_root)
 dat = mne.io.read_raw_edf(filepath, preload=False)
 
 # Fix channel types and round numbers before export
 dat.set_channel_types(channel_type_overrides)
 dat.info['sfreq'] = round(dat.info['sfreq'], 8)
 dat.info['lowpass'] = round(dat.info['lowpass'], 8)
 dat.info['highpass'] = round(dat.info['highpass'], 8)
 dat.info['line_freq'] = 60
 
 # Extract trigger event data from EEG annotations
 try:
     annot = mne.read_annotations(filepath)
     if len(annot) > 10:
         dat.set_annotations(annot)
         events, e_id = mne.events_from_annotations(dat, event_id=event_map)
         orig_time = dat.annotations.orig_time
     else:
         events = mne.find_events(dat, shortest_event=1, mask=65280,
                                  mask_type="not_and")
         orig_time = dat.info['meas_date']
     events = mne.pick_events(events, include=list(event_map.values()))
     annot_new = mne.annotations_from_events(
         events=events, sfreq=dat.info['sfreq'], orig_time=orig_time,
         event_desc=event_name_map, verbose=False
     )
     dat.set_annotations(annot_new)
 except (ValueError, RuntimeError):
Пример #37
0
cut_pulse = [-2 / 1000, 18 / 1000]

# Load
raw = mne.io.read_raw_brainvision(eeg_dir + raw_name,
                                  eog=['EOG_aux1_vert', 'EOG_aux2_horz'],
                                  preload=False)

# Montage
import sys
sys.path.append(function_dir)
from my_mne_functions import import_dig_electrodes
montage = import_dig_electrodes(elec_dir + elec_file, raw, plot=plot_steps)
raw.set_montage(montage)

# Events
events = mne.read_annotations(eeg_dir + raw_name[0:-4] + 'vmrk')
raw.set_annotations(events)
events, event_ids = mne.events_from_annotations(raw)

# Interpolate Pulse
data, times = raw[:]
count_trials = 0
for event in events:
    if event[2] == 13:
        count_trials += 1
        pulse_start = int(event[0] + cut_pulse[0] * raw.info['sfreq'])
        pulse_end = int(event[0] + cut_pulse[1] * raw.info['sfreq'])
        for chan in range(0, len(data)):
            #interpolate (slope=dy/dx, then y=i+xb)
            dy = (data[chan][pulse_end] - data[chan][pulse_start])
            dx = times[pulse_end] - times[pulse_start]
Пример #38
0
def test_crop():
    """Test cropping with annotations."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    onset = events[events[:, 2] == 1, 0] / raw.info['sfreq']
    duration = np.full_like(onset, 0.5)
    description = ['bad %d' % k for k in range(len(onset))]
    annot = mne.Annotations(onset, duration, description,
                            orig_time=raw.info['meas_date'])
    raw.set_annotations(annot)

    split_time = raw.times[-1] / 2. + 2.
    split_idx = len(onset) // 2 + 1
    raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq'])
    assert_array_equal(raw_cropped_left.annotations.description,
                       raw.annotations.description[:split_idx])
    assert_allclose(raw_cropped_left.annotations.duration,
                    raw.annotations.duration[:split_idx])
    assert_allclose(raw_cropped_left.annotations.onset,
                    raw.annotations.onset[:split_idx])
    raw_cropped_right = raw.copy().crop(split_time, None)
    assert_array_equal(raw_cropped_right.annotations.description,
                       raw.annotations.description[split_idx:])
    assert_allclose(raw_cropped_right.annotations.duration,
                    raw.annotations.duration[split_idx:])
    assert_allclose(raw_cropped_right.annotations.onset,
                    raw.annotations.onset[split_idx:])
    raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right],
                                      verbose='debug')
    assert_allclose(raw_concat.times, raw.times)
    assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20)
    # Get rid of the boundary events
    raw_concat.annotations.delete(-1)
    raw_concat.annotations.delete(-1)
    # Ensure we annotations survive round-trip crop->concat
    assert_array_equal(raw_concat.annotations.description,
                       raw.annotations.description)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(raw_concat.annotations, attr),
                        getattr(raw.annotations, attr),
                        err_msg='Failed for %s:' % (attr,))

    raw.set_annotations(None)  # undo

    # Test concatenating annotations with and without orig_time.
    raw2 = raw.copy()
    raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date']))
    raw2.set_annotations(Annotations([2.], [3], 'BAD', None))
    expected_onset = [45., 2. + raw._last_time]
    raw = concatenate_raws([raw, raw2])
    raw.annotations.delete(-1)  # remove boundary annotations
    raw.annotations.delete(-1)
    assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2)

    # Test IO
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test-annot.fif')
    raw.annotations.save(fname)
    annot_read = read_annotations(fname)
    for attr in ('onset', 'duration', 'orig_time'):
        assert_allclose(getattr(annot_read, attr),
                        getattr(raw.annotations, attr))
    assert_array_equal(annot_read.description, raw.annotations.description)
    annot = Annotations((), (), ())
    annot.save(fname)
    pytest.raises(IOError, read_annotations, fif_fname)  # none in old raw
    annot = read_annotations(fname)
    assert isinstance(annot, Annotations)
    assert len(annot) == 0
    # Test that empty annotations can be saved with an object
    fname = op.join(tempdir, 'test_raw.fif')
    raw.set_annotations(annot)
    raw.save(fname)
    raw_read = read_raw_fif(fname)
    assert isinstance(raw_read.annotations, Annotations)
    assert len(raw_read.annotations) == 0
    raw.set_annotations(None)
    raw.save(fname, overwrite=True)
    raw_read = read_raw_fif(fname)
    assert raw_read.annotations is not None  # XXX to be fixed in #5416
    assert len(raw_read.annotations.onset) == 0  # XXX to be fixed in #5416
Пример #39
0
def test_automatic_vmrk_sfreq_recovery():
    """Test proper sfreq inference by checking the onsets."""
    assert_array_equal(read_annotations(vmrk_path, sfreq='auto'),
                       read_annotations(vmrk_path, sfreq=1000.0))
Пример #40
0
def load_precomputed_badData(bids_root, subject_id):
    fn = _get_filepath(bids_root, subject_id)
    annotations = mne.read_annotations(fn + 'badSegments.csv')
    badChannels = np.loadtxt(fn + 'badChannels.tsv', delimiter='\t')
    return annotations, badChannels