Ejemplo n.º 1
0
def test_gdf_data():
    """Test reading raw GDF 1.x files."""
    with warnings.catch_warnings(record=True):  # interpolate / overlap events
        raw = read_raw_edf(gdf1_path + '.gdf', eog=None,
                           misc=None, preload=True, stim_channel='auto')
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    data, _ = raw[picks]

    # this .npy was generated using the official biosig python package
    raw_biosig = np.load(gdf1_path + '_biosig.npy')
    raw_biosig = raw_biosig * 1e-6  # data are stored in microvolts
    data_biosig = raw_biosig[picks]

    # Assert data are almost equal
    assert_array_almost_equal(data, data_biosig, 8)

    # Test for stim channel
    events = find_events(raw, shortest_event=1)
    # The events are overlapping.
    assert_array_equal(events[:, 0], raw._raw_extras[0]['events'][1][::2])

    # Test events are encoded to stim channel.
    events = find_events(raw)
    evs = raw.find_edf_events()
    assert_true(all([event in evs[1] for event in events[:, 0]]))
Ejemplo n.º 2
0
def test_gdf_data():
    """Test reading raw GDF 1.x files."""
    with pytest.warns(RuntimeWarning, match='Overlapping events'):
        raw = read_raw_edf(gdf1_path + '.gdf', eog=None,
                           misc=None, preload=True, stim_channel='auto')
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    data, _ = raw[picks]

    # this .npy was generated using the official biosig python package
    raw_biosig = np.load(gdf1_path + '_biosig.npy')
    raw_biosig = raw_biosig * 1e-6  # data are stored in microvolts
    data_biosig = raw_biosig[picks]

    # Assert data are almost equal
    assert_array_almost_equal(data, data_biosig, 8)

    # Test for stim channel
    events = find_events(raw, shortest_event=1)
    # The events are overlapping.
    assert_array_equal(events[:, 0], raw._raw_extras[0]['events'][1][::2])

    # Test events are encoded to stim channel.
    events = find_events(raw)
    evs = raw.find_edf_events()
    assert (all([event in evs[1] for event in events[:, 0]]))

    # gh-5604
    assert raw.info['meas_date'] == DATE_NONE
    with pytest.warns(RuntimeWarning, match='Overlapping events'):
        _test_raw_reader(read_raw_edf, input_fname=gdf1_path + '.gdf',
                         eog=None, misc=None, stim_channel='auto')
Ejemplo n.º 3
0
def test_kit2fiff_model():
    """Test CombineMarkersModel Traits Model"""
    from mne.gui._kit2fiff_gui import Kit2FiffModel

    model = Kit2FiffModel()
    assert_false(model.can_save)
    model.markers.mrk1.file = mrk_pre_path
    model.markers.mrk2.file = mrk_post_path
    model.sqd_file = sqd_path
    model.hsp_file = hsp_path
    assert_false(model.can_save)
    model.fid_file = fid_path

    # export raw
    assert_true(model.can_save)
    raw_out = model.get_raw()
    raw_out.save(tgt_fname)
    raw = Raw(tgt_fname)

    # Compare exported raw with the original binary conversion
    raw_bin = Raw(fif_path)
    trans_bin = raw.info['dev_head_t']['trans']
    assert_equal(raw_bin.info.keys(), raw.info.keys())
    trans_transform = raw_bin.info['dev_head_t']['trans']
    assert_allclose(trans_transform, trans_bin, 0.1)

    # Averaging markers
    model.markers.mrk3.method = "Average"
    trans_avg = model.dev_head_trans
    assert_false(np.all(trans_avg == trans_transform))
    assert_allclose(trans_avg, trans_bin, 0.1)

    # Test exclusion of one marker
    model.markers.mrk3.method = "Transform"
    model.use_mrk = [1, 2, 3, 4]
    assert_false(np.all(model.dev_head_trans == trans_transform))
    assert_false(np.all(model.dev_head_trans == trans_avg))
    assert_false(np.all(model.dev_head_trans == np.eye(4)))

    # test setting stim channels
    model.stim_slope = '+'
    events_bin = mne.find_events(raw_bin, stim_channel='STI 014')

    model.stim_chs = '<'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin)

    events_rev = events_bin.copy()
    events_rev[:, 2] = 1
    model.stim_chs = '>'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_rev)

    model.stim_chs = 'man'
    model.stim_chs_manual = list(range(167, 159, -1))
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin)
Ejemplo n.º 4
0
def test_raw_events():
    """Test creating stim channel from raw SQD file."""
    def evts(a, b, c, d, e, f=None):
        out = [[269, a, b], [281, b, c], [1552, c, d], [1564, d, e]]
        if f is not None:
            out.append([2000, e, f])
        return out

    raw = read_raw_kit(sqd_path)
    assert_array_equal(find_events(raw, output='step', consecutive=True),
                       evts(255, 254, 255, 254, 255, 0))

    raw = read_raw_kit(sqd_path, slope='+')
    assert_array_equal(find_events(raw, output='step', consecutive=True),
                       evts(0, 1, 0, 1, 0))

    raw = read_raw_kit(sqd_path, stim='<', slope='+')
    assert_array_equal(find_events(raw, output='step', consecutive=True),
                       evts(0, 128, 0, 128, 0))

    raw = read_raw_kit(sqd_path, stim='<', slope='+', stim_code='channel')
    assert_array_equal(find_events(raw, output='step', consecutive=True),
                       evts(0, 160, 0, 160, 0))

    raw = read_raw_kit(sqd_path, stim=range(160, 162), slope='+',
                       stim_code='channel')
    assert_array_equal(find_events(raw, output='step', consecutive=True),
                       evts(0, 160, 0, 160, 0))
Ejemplo n.º 5
0
def _assert_iter_sim(raw_sim, raw_new, new_event_id):
    events = find_events(raw_sim, initial_event=True)
    events_tuple = find_events(raw_new, initial_event=True)
    assert_array_equal(events_tuple[:, :2], events[:, :2])
    assert_array_equal(events_tuple[:, 2], new_event_id)
    data_sim = raw_sim[:-1][0]
    data_new = raw_new[:-1][0]
    assert_array_equal(data_new, data_sim)
Ejemplo n.º 6
0
def test_fix_stim():
    """Test fixing stim STI016 for Neuromag."""
    raw = read_raw_fif(raw_fname, preload=True)
    # 32768 (016) + 3 (002+001) bits gets incorrectly coded during acquisition
    raw._data[raw.ch_names.index('STI 014'), :3] = [0, -32765, 0]
    with pytest.warns(RuntimeWarning, match='STI016'):
        events = find_events(raw, 'STI 014')
    assert_array_equal(events[0], [raw.first_samp + 1, 0, 32765])
    events = find_events(raw, 'STI 014', uint_cast=True)
    assert_array_equal(events[0], [raw.first_samp + 1, 0, 32771])
Ejemplo n.º 7
0
def test_fix_stim():
    """Test fixing stim STI016 for Neuromag."""
    raw = read_raw_fif(raw_fname, preload=True)
    # 32768 (016) + 3 (002+001) bits gets incorrectly coded during acquisition
    raw._data[raw.ch_names.index('STI 014'), :3] = [0, -32765, 0]
    with warnings.catch_warnings(record=True) as w:
        events = find_events(raw, 'STI 014')
    assert_true(len(w) >= 1)
    assert_true(any('STI016' in str(ww.message) for ww in w))
    assert_array_equal(events[0], [raw.first_samp + 1, 0, 32765])
    events = find_events(raw, 'STI 014', uint_cast=True)
    assert_array_equal(events[0], [raw.first_samp + 1, 0, 32771])
Ejemplo n.º 8
0
def test_events():
    """Test reading and modifying events"""
    tempdir = _TempDir()
    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True)

    # check that events are read and stim channel is synthesized correcly
    events = raw.get_brainvision_events()
    assert_array_equal(
        events,
        [
            [487, 1, 253],
            [497, 1, 255],
            [1770, 1, 254],
            [1780, 1, 255],
            [3253, 1, 254],
            [3263, 1, 255],
            [4936, 1, 253],
            [4946, 1, 255],
            [6620, 1, 254],
            [6630, 1, 255],
        ],
    )

    mne_events = mne.find_events(raw, stim_channel="STI 014")
    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])

    # modify events and check that stim channel is updated
    index = events[:, 2] == 255
    events = events[index]
    raw.set_brainvision_events(events)
    mne_events = mne.find_events(raw, stim_channel="STI 014")
    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])

    # remove events
    nchan = raw.info["nchan"]
    ch_name = raw.info["chs"][-2]["ch_name"]
    events = np.empty((0, 3))
    raw.set_brainvision_events(events)
    assert_equal(raw.info["nchan"], nchan - 1)
    assert_equal(len(raw._data), nchan - 1)
    assert_equal(raw.info["chs"][-1]["ch_name"], ch_name)
    fname = op.join(tempdir, "evt_raw.fif")
    raw.save(fname)

    # add events back in
    events = [[10, 1, 2]]
    raw.set_brainvision_events(events)
    assert_equal(raw.info["nchan"], nchan)
    assert_equal(len(raw._data), nchan)
    assert_equal(raw.info["chs"][-1]["ch_name"], "STI 014")
Ejemplo n.º 9
0
def test_utils():
    """Test utils."""

    event_id = {'Visual/Left': 3}
    tmin, tmax = -0.2, 0.5
    events = mne.find_events(raw)
    picks = mne.pick_channels(raw.info['ch_names'],
                              ['MEG 2443', 'MEG 2442', 'MEG 2441'])
    epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
                        picks=picks, baseline=(None, 0),
                        reject=None, preload=True)

    this_epoch = epochs.copy()
    epochs_clean = clean_by_interp(this_epoch)
    assert_array_equal(this_epoch.get_data(), epochs.get_data())
    assert_raises(AssertionError, assert_array_equal, epochs_clean.get_data(),
                  this_epoch.get_data())

    picks_meg = mne.pick_types(evoked.info, meg='grad', eeg=False, exclude=[])
    picks_eeg = mne.pick_types(evoked.info, meg=False, eeg=True, exclude=[])
    picks_bad_meg = mne.pick_channels(evoked.ch_names, include=['MEG 2443'])
    picks_bad_eeg = mne.pick_channels(evoked.ch_names, include=['EEG 053'])
    evoked_orig = evoked.copy()
    for picks, picks_bad in zip([picks_meg, picks_eeg],
                                [picks_bad_meg, picks_bad_eeg]):
        evoked_autoreject = interpolate_bads(evoked, picks=picks,
                                             reset_bads=False)
        evoked.interpolate_bads(reset_bads=False)
        assert_array_equal(evoked.data[picks_bad],
                           evoked_autoreject.data[picks_bad])
        assert_raises(AssertionError, assert_array_equal,
                      evoked_orig.data[picks_bad], evoked.data[picks_bad])
Ejemplo n.º 10
0
def test_cov_mismatch():
    """Test estimation with MEG<->Head mismatch."""
    raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
    events = find_events(raw, stim_channel='STI 014')
    raw.pick_channels(raw.ch_names[:5])
    raw.add_proj([], remove_existing=True)
    epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
    for kind in ('shift', 'None'):
        epochs_2 = epochs.copy()
        # This should be fine
        with warnings.catch_warnings(record=True) as w:
            compute_covariance([epochs, epochs_2])
            assert_equal(len(w), 0)
            if kind == 'shift':
                epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
            else:  # None
                epochs_2.info['dev_head_t'] = None
            assert_raises(ValueError, compute_covariance, [epochs, epochs_2])
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch='ignore')
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch='warn')
            assert_raises(ValueError, compute_covariance, epochs,
                          on_mismatch='x')
        assert_true(any('transform mismatch' in str(ww.message) for ww in w))
    # This should work
    epochs.info['dev_head_t'] = None
    epochs_2.info['dev_head_t'] = None
    compute_covariance([epochs, epochs_2], method=None)
Ejemplo n.º 11
0
    def crop_file(self):
        # get the breaks
        breaks = self._get_breaks()
        # read the bdf_file
        raw = mne.io.read_raw_edf(self.bdf_fname, preload=True,
                                  stim_channel='Status')
        # set the counters for each block type
        pv_counter = 0
        wm_counter = 0

        # loop through the break points
        for b in range(2, 6):
            # crop the file
            this_raw = raw.crop(tmin=breaks[b-1], tmax=breaks[b], copy=True)
            # get the events
            eve = mne.find_events(this_raw, mask=255)

            # assert that there 48 (passive view) or 144 (working memory)
            assert np.in1d(len(eve), [48, 144]).all()

            # construct the filename to save to
            if len(eve) == 48:  # passive view block
                fname = data_dir + '/RAWFIF/%d_PV%d-raw.fif' % \
                                   (self.participant_id, pv_counter)
                pv_counter += 1
            else:  # working memory block
                fname = data_dir + '/RAWFIF/%d_WM%d-raw.fif' % \
                                   (self.participant_id, wm_counter)
                wm_counter += 1
            # save the data
            this_raw.save(fname, proj=False, overwrite=True)
Ejemplo n.º 12
0
def test_cov_mismatch():
    """Test estimation with MEG<->Head mismatch."""
    raw = read_raw_fif(raw_fname, add_eeg_ref=False).crop(0, 5).load_data()
    events = find_events(raw, stim_channel="STI 014")
    raw.pick_channels(raw.ch_names[:5])
    raw.add_proj([], remove_existing=True)
    epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0.0, preload=True, add_eeg_ref=False)
    for kind in ("shift", "None"):
        epochs_2 = epochs.copy()
        # This should be fine
        with warnings.catch_warnings(record=True) as w:
            compute_covariance([epochs, epochs_2])
            assert_equal(len(w), 0)
            if kind == "shift":
                epochs_2.info["dev_head_t"]["trans"][:3, 3] += 0.001
            else:  # None
                epochs_2.info["dev_head_t"] = None
            assert_raises(ValueError, compute_covariance, [epochs, epochs_2])
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch="ignore")
            assert_equal(len(w), 0)
            compute_covariance([epochs, epochs_2], on_mismatch="warn")
            assert_raises(ValueError, compute_covariance, epochs, on_mismatch="x")
        assert_true(any("transform mismatch" in str(ww.message) for ww in w))
    # This should work
    epochs.info["dev_head_t"] = None
    epochs_2.info["dev_head_t"] = None
    compute_covariance([epochs, epochs_2], method=None)
Ejemplo n.º 13
0
def test_io_egi_mff():
    """Test importing EGI MFF simple binary files."""
    egi_fname_mff = op.join(data_path(), 'EGI', 'test_egi.mff')
    raw = read_raw_egi(egi_fname_mff, include=None)
    assert ('RawMff' in repr(raw))
    include = ['DIN1', 'DIN2', 'DIN3', 'DIN4', 'DIN5', 'DIN7']
    raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname_mff,
                           include=include, channel_naming='EEG %03d')

    assert_equal('eeg' in raw, True)
    eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
    assert_equal(len(eeg_chan), 129)
    picks = pick_types(raw.info, eeg=True)
    assert_equal(len(picks), 129)
    assert_equal('STI 014' in raw.ch_names, True)

    events = find_events(raw, stim_channel='STI 014')
    assert_equal(len(events), 8)
    assert_equal(np.unique(events[:, 1])[0], 0)
    assert (np.unique(events[:, 0])[0] != 0)
    assert (np.unique(events[:, 2])[0] != 0)

    pytest.raises(ValueError, read_raw_egi, egi_fname_mff, include=['Foo'],
                  preload=False)
    pytest.raises(ValueError, read_raw_egi, egi_fname_mff, exclude=['Bar'],
                  preload=False)
    for ii, k in enumerate(include, 1):
        assert (k in raw.event_id)
        assert (raw.event_id[k] == ii)
def make_events_files(sub_id, session):
    """ This function read in a fiff file and write out a event file
    """

    # SETUP AND LOAD FILES ####
    # name with subject id & session
    fname = "sub_%d_%s" % (sub_id, session)

    # load the raw fif
    print '\nLoading raw file'
    raw = fiff.Raw(fname + "_tsss_mc_autobad.fif", preload=False)

    # EPOCHS ####
    events = mne.find_events(raw, stim_channel="STI101")
    events_classic = []
    events_interupt = []
    for i in range(len(events)):
        if i > 0:
            if events[i, 2] == 1 and events[i - 1, 2] == 1:
                events_classic.append(i)
            elif events[i, 2] == 1 and events[i - 1, 2] == 2:
                events_interupt.append(i)
   
    if len(events_classic) > 0:
        outname = "sub_%d_%s.eve" % (sub_id, session)
        mne.write_events(outname, events[events_classic])

    if len(events_interupt) is not 0:
        outname_classic = "sub_%d_%s_%s.eve" % (sub_id, session, "classic")
        outname_interrupt = "sub_%d_%s_%s.eve" % (sub_id, session, "interrupt")

        mne.write_events(outname_classic, events[events_classic])
        mne.write_events(outname_interrupt, events[events_interupt])
    def load_raw(self, filename=None, mark_bads=True):

        if filename is None:
            filename = op.join(self.ptp_dir, '%s_%s-raw.fif' %(self.subject, self.experiment))
        print "Loading raw data from file %s" %filename
        raw = mne.io.read_raw_fif(filename, preload=True, verbose=False)
        event_check = mne.find_events(raw=raw, stim_channel='STI 014', min_duration=0.002, verbose=False)
        exp_event_check = event_check[np.in1d(event_check[:,2], self.trigger_scheme.values())]
        print len(event_check), "total events found."
        print len(exp_event_check), "events found with experiment-relevant triggers."

        if mark_bads:
            bad_file = glob.glob(self.processed_files + '*bad_channels.txt')
            if len(bad_file) == 0:
                print "No bad channels file found."
            elif len(bad_file) > 1:
                raise RuntimeError("Two or more bad channels files were found")
            else:
                bads = [i.strip() for i in open(bad_file[0]).readlines()]
                raw.info['bads'] += bads
                print "Marking the following channels as bad: " + str(bads)

        self.add_preprocessing_notes("Raw data loaded from file %s" %filename)
        for bad in raw.info['bads']:
            self.add_preprocessing_notes("%s marked as bad channel." %bad)

        self.raw_unfiltered = raw

        return raw
    def load_raw_from_blocknums(self,block_ns=['1']):
        """
        block_ns:       list of strings, corresponding to number of raw files to combine
        """

        raws = []

        # loads each of the raw blocks - assumes file is called _01_raw.fif, _02_raw.fif...
        for blockn in block_ns:
            raw_oneblock = self.ptp_dir + self.subj_code + '_0%s_raw.fif' % blockn
            raw = mne.io.Raw(raw_oneblock, preload=True, verbose=False)

            # make sure all have the same name of the trigger line, and same bad channels marked (i.e., none)
            raw.info['ch_names'][0] = 'STI 014'
            raw.info['bads'] = []

            # check number of events in each raw
            event_check = mne.find_events(raw=raw, stim_channel='STI 014', min_duration=0.002, verbose=False)
            print len(event_check), "events found for raw_0%s" % blockn

            self.add_preprocessing_notes( "%s events found for raw_0%s" % (str(len(event_check)), str(blockn)) )

            # append raws together
            raws.append(raw)

        # put all the raws together into one object
        for number in map(int,block_ns)[:-1]:
            raws[0].append(raws[number])
            raw = raws[0]

        # save the unfiltered raw to the class
        self.raw_unfiltered = raw

        return self.raw_unfiltered
Ejemplo n.º 17
0
def test_find_events():
    """Test find events in raw file
    """
    events = mne.read_events(fname)
    raw = mne.fiff.Raw(raw_fname)
    events2 = mne.find_events(raw)
    assert_array_almost_equal(events, events2)
Ejemplo n.º 18
0
def test_tfr_with_inverse_operator():
    """Test time freq with MNE inverse computation"""

    tmin, tmax, event_id = -0.2, 0.5, 1

    # Setup for reading the raw data
    raw = io.Raw(fname_data)
    events = find_events(raw, stim_channel='STI 014')
    inverse_operator = read_inverse_operator(fname_inv)
    inv = prepare_inverse_operator(inverse_operator, nave=1,
                                   lambda2=1. / 9., method="dSPM")

    raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg=True, eeg=False, eog=True,
                       stim=False, exclude='bads')

    # Load condition 1
    event_id = 1
    events3 = events[:3]  # take 3 events to keep the computation time low
    epochs = Epochs(raw, events3, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
                    preload=True)

    # Compute a source estimate per frequency band
    bands = dict(alpha=[10, 10])
    label = read_label(fname_label)

    stcs = source_band_induced_power(epochs, inv, bands,
                                     n_cycles=2, use_fft=False, pca=True,
                                     label=label, prepared=True)

    stc = stcs['alpha']
    assert_true(len(stcs) == len(list(bands.keys())))
    assert_true(np.all(stc.data > 0))
    assert_array_almost_equal(stc.times, epochs.times)

    stcs_no_pca = source_band_induced_power(epochs, inv, bands,
                                            n_cycles=2, use_fft=False,
                                            pca=False, label=label,
                                            prepared=True)

    assert_array_almost_equal(stcs['alpha'].data, stcs_no_pca['alpha'].data)

    # Compute a source estimate per frequency band
    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
                    preload=True)

    frequencies = np.arange(7, 30, 2)  # define frequencies of interest
    power, phase_lock = source_induced_power(epochs, inv,
                                             frequencies, label,
                                             baseline=(-0.1, 0),
                                             baseline_mode='percent',
                                             n_cycles=2, n_jobs=1,
                                             prepared=True)
    assert_true(np.all(phase_lock > 0))
    assert_true(np.all(phase_lock <= 1))
    assert_true(np.max(power) > 10)
Ejemplo n.º 19
0
def fiff_events(source_path=None, name=None):
    """
    Returns a dataset containing events from a raw fiff file. Use
    :func:`fiff_epochs` to load MEG data corresponding to those events.
    
    source_path : str (path)
        the location of the raw file (if ``None``, a file dialog will be 
        displayed).
    
    name : str
        A name for the dataset.
    """
    if source_path is None:
        source_path = ui.ask_file("Pick a Fiff File", "Pick a Fiff File",
                                  ext=[('fif', 'Fiff')])
    
    if name is None:
        name = os.path.basename(source_path)
    
    raw = mne.fiff.Raw(source_path)
    events = mne.find_events(raw)
    if any(events[:,1] != 0):
        raise NotImplementedError("Events starting with ID other than 0")
        # this was the case in the raw-eve file, which contained all event 
        # offsets, but not in the raw file created by kit2fiff. For handling
        # see :func:`fiff_event_file`
    istart = _data.var(events[:,0], name='i_start')
    event = _data.var(events[:,2], name='eventID')
    info = {'source': source_path}
    return _data.dataset(event, istart, name=name, info=info)
Ejemplo n.º 20
0
def test_gdf2_data():
    """Test reading raw GDF 2.x files."""
    raw = read_raw_edf(gdf2_path + '.gdf', eog=None, misc=None, preload=True,
                       stim_channel='STATUS')

    nchan = raw.info['nchan']
    ch_names = raw.ch_names  # Renamed STATUS -> STI 014.
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    data, _ = raw[picks]

    # This .mat was generated using the official biosig matlab package
    mat = sio.loadmat(gdf2_path + '_biosig.mat')
    data_biosig = mat['dat'] * 1e-6  # data are stored in microvolts
    data_biosig = data_biosig[picks]

    # Assert data are almost equal
    assert_array_almost_equal(data, data_biosig, 8)

    # Find events
    events = find_events(raw, verbose=1)
    events[:, 2] >>= 8  # last 8 bits are system events in biosemi files
    assert_equal(events.shape[0], 2)  # 2 events in file
    assert_array_equal(events[:, 2], [20, 28])

    with warnings.catch_warnings(record=True) as w:
        # header contains no events
        raw = read_raw_edf(gdf2_path + '.gdf', stim_channel='auto')
        assert_equal(len(w), 1)
        assert_true(str(w[0].message).startswith('No events found.'))
    assert_equal(nchan, raw.info['nchan'])  # stim channel not constructed
    assert_array_equal(ch_names[1:], raw.ch_names[1:])
def run_evoked(subject_id):
    subject = "sub%03d" % subject_id
    print("processing subject: %s" % subject)
    data_path = op.join(meg_dir, subject)
    for run in range(1, 7):
        run_fname = op.join(data_path, 'run_%02d_filt_sss_raw.fif' % run)
        if not os.path.exists(run_fname):
            continue

        raw = mne.io.Raw(run_fname)

        events = mne.find_events(raw, stim_channel='STI101', consecutive='increasing',
                                 min_duration=0.003, verbose=True)
        for key in all_events_id:
            events = mne.merge_events(events, all_events_id[key], events_id[key])

        mask = (events[:, 2] == 1) | (events[:, 2] == 2) | (events[:, 2] == 3)
        events = events[mask]

        # df = pd.read_csv(data_path + '/Trials/run_%02d_trldef.txt' % run, sep='\t', header=None)
        # ev = np.c_[df[1], np.zeros_like(df[0]), le.transform(df[3])]
        # ev[:, 0] = np.round(ev[:, 0] / 3.)  # decimation by 3
        # ev[:, 0] += raw.first_samp
        # ev[:, 0] -= 452
        # ev[ev[:, 2] == 3, 2] = 4
        # ev[ev[:, 2] == 2, 2] = 3
        # ev[ev[:, 2] == 4, 2] = 2

        # print events - ev
        print "S %s - R %s" % (subject, run)
        # print (events - ev)[:, 2]
        # assert not np.any((events - ev)[:, 1:])
        # assert np.max(np.abs((events - ev)[:, 0])) == 1

        mne.write_events(op.join(data_path, 'run_%02d_filt_sss-eve.fif' % run), events)
Ejemplo n.º 22
0
    def generate_beat_events(self, verbose=None):

        assert self.filtered is True
        assert self.downsampled is False
        raw = self.raw

        ## generate beat events and epochs before downsampling
        # read trial events

        if hasattr(self, 'trial_events'):
            trial_events = self.trial_events
        else:
            trial_events = mne.find_events(raw, stim_channel='STI 014', shortest_event=0)

        # generate simple beat events with same ID (10000)
        beat_events = generate_beat_events(trial_events,
                                           version=self.stimuli_version,
                                           beat_event_id_generator=simple_beat_event_id_generator,
                                           verbose=verbose)

        # FIXME: read from settings
        picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=True, stim=True, exclude=[])
        event_id = None # any
        tmin = -0.2  # start of each epoch (200ms before the trigger)
        tmax = 0.8  # end of each epoch (600ms after the trigger) - longest beat is 0.57s long
        detrend = 0 # remove dc
        # reject = dict(eog=250e-6) # TODO: optionally reject epochs
        beat_epochs = mne.Epochs(raw, beat_events, event_id,
                                      tmin, tmax, preload=True,
                                      proj=False, picks=picks, verbose=False)
        print beat_epochs

        self.beat_epochs = beat_epochs
Ejemplo n.º 23
0
def hilbert_process(raw, bands, return_evoked=False):
    """Make hilbert transform of raw data and epoch it.

    Parameters
    ----------

    raw : ???
        The raw data to be transformed.
    bands : dict
        Dictionary with frequencies to calculate. Shape "band": [low, high]
    return_evoked : bool
        If true, an evoked data set will be returned, if False epochs will be
        returned.

    Returns
    -------
    Epochs if return_evoked is False. These are complex number!
    if return_evoked is True, an evoked object is returned. This does only
    have the envelope.
    """
    tmin, tmax = -2, 2
    event_id = {'voluntary': 243, 'involuntary': 219}
    picks = mne.pick_types(
        raw.info, meg=False, eeg=True, stim=False, exclude='bads')
    events = mne.find_events(raw)
    results_dict = {}

    for band in bands.keys():
        raw_tmp = raw.copy()
        raw_tmp.filter(bands[band][0], bands[band][1])

        if return_evoked:
            evokeds = []
            raw_tmp.apply_hilbert(picks=picks, envelope=True)
            epochs = mne.Epochs(
                raw_tmp,
                events,
                event_id,
                tmin,
                tmax,
                picks=picks,
                baseline=(None, -1.8),
                reject=reject)
            for cond in epochs.event_id.keys():
                evokeds.append(epochs[cond].average())
            results_dict[band] = evokeds
        else:
            raw_tmp.apply_hilbert(picks=picks, envelope=False)
            epochs = mne.Epochs(
                raw_tmp,
                events,
                event_id,
                tmin,
                tmax,
                picks=picks,
                baseline=None,
                reject=reject)
            results_dict[band] = epochs

    return results_dict
Ejemplo n.º 24
0
def test_ransac():
    """Some basic tests for ransac."""

    event_id = {'Visual/Left': 3}
    tmin, tmax = -0.2, 0.5

    events = mne.find_events(raw)
    epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
                        baseline=(None, 0), decim=8,
                        reject=None, preload=True)
    # normal case
    picks = mne.pick_types(epochs.info, meg='mag', eeg=False, stim=False,
                           eog=False, exclude=[])

    ransac = Ransac(picks=picks)
    epochs_clean = ransac.fit_transform(epochs)
    assert_true(len(epochs_clean) == len(epochs))
    # Pass numpy instead of epochs
    X = epochs.get_data()
    assert_raises(AttributeError, ransac.fit, X)
    #
    # should not contain both channel types
    picks = mne.pick_types(epochs.info, meg=True, eeg=False, stim=False,
                           eog=False, exclude=[])
    ransac = Ransac(picks=picks)
    assert_raises(ValueError, ransac.fit, epochs)
    #
    # should not contain other channel types.
    picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=True,
                           eog=False, exclude=[])
    ransac = Ransac(picks=picks)
    assert_raises(ValueError, ransac.fit, epochs)
Ejemplo n.º 25
0
def test_epochs_vector_inverse():
    """Test vector inverse consistency between evoked and epochs."""
    raw = read_raw_fif(fname_raw)
    events = find_events(raw, stim_channel='STI 014')[:2]
    reject = dict(grad=2000e-13, mag=4e-12, eog=150e-6)

    epochs = Epochs(raw, events, None, 0, 0.01, baseline=None,
                    reject=reject, preload=True)

    assert_equal(len(epochs), 2)

    evoked = epochs.average(picks=range(len(epochs.ch_names)))

    inv = read_inverse_operator(fname_inv)

    method = "MNE"
    snr = 3.
    lambda2 = 1. / snr ** 2

    stcs_epo = apply_inverse_epochs(epochs, inv, lambda2, method=method,
                                    pick_ori='vector', return_generator=False)
    stc_epo = np.mean(stcs_epo)

    stc_evo = apply_inverse(evoked, inv, lambda2, method=method,
                            pick_ori='vector')

    assert_allclose(stc_epo.data, stc_evo.data, rtol=1e-9, atol=0)
Ejemplo n.º 26
0
def merge_trial_and_audio_onsets(raw, use_audio_onsets=True, inplace=True, stim_channel='STI 014', verbose=None):
    events = mne.find_events(raw, stim_channel='STI 014', shortest_event=0)

    merged = list()
    last_trial_event = None
    for i, event in enumerate(events):
        etype = event[2]
        if etype < 1000 or etype == 1111: # trial or noise onset
            if use_audio_onsets and events[i+1][2] == 1000: # followed by audio onset
                onset = events[i+1][0]
                merged.append([onset, 0, etype])
                if verbose:
                    log.debug('merged {} + {} = {}'.format(event, events[i+1], merged[-1]))
            else:
                # either we are not interested in audio onsets or there is none
                merged.append(event)
                if verbose:
                    log.debug('kept {}'.format(merged[-1]))
        # audio onsets (etype == 1000) are not copied
        if etype > 1111: # other events (keystrokes)
            merged.append(event)
            if verbose:
                log.debug('kept other {}'.format(merged[-1]))

    merged = np.asarray(merged, dtype=int)

    if inplace:
        stim_id = raw.ch_names.index(stim_channel)
        raw._data[stim_id,:].fill(0)     # delete data in stim channel
        raw.add_events(merged)

    return merged
Ejemplo n.º 27
0
def get_epochs_and_cov(X, y, window=500):
    """return epochs from array."""
    raw_train = toMNE(X, y)
    picks = range(len(getChannelNames()))

    events = list()
    events_id = dict()
    for j, eid in enumerate(getEventNames()):
        tmp = find_events(raw_train, stim_channel=eid, verbose=False)
        tmp[:, -1] = j + 1
        events.append(tmp)
        events_id[eid] = j + 1

    # concatenate and sort events
    events = np.concatenate(events, axis=0)
    order_ev = np.argsort(events[:, 0])
    events = events[order_ev]

    epochs = Epochs(raw_train, events, events_id,
                    tmin=-(window / 500.0) + 1 / 500.0 + 0.150,
                    tmax=0.150, proj=False, picks=picks, baseline=None,
                    preload=True, add_eeg_ref=False, verbose=False)

    cov_signal = compute_raw_data_covariance(raw_train, verbose=False)
    return epochs, cov_signal
Ejemplo n.º 28
0
def test_gdf2_data():
    """Test reading raw GDF 2.x files."""
    raw = read_raw_edf(gdf2_path + '.gdf', eog=None, misc=None, preload=True)

    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    data, _ = raw[picks]

    # This .mat was generated using the official biosig matlab package
    mat = sio.loadmat(gdf2_path + '_biosig.mat')
    data_biosig = mat['dat'] * 1e-6  # data are stored in microvolts
    data_biosig = data_biosig[picks]

    # Assert data are almost equal
    assert_array_almost_equal(data, data_biosig, 8)

    # Find events
    events = find_events(raw, verbose=1)
    events[:, 2] >>= 8  # last 8 bits are system events in biosemi files
    assert_equal(events.shape[0], 2)  # 2 events in file
    assert_array_equal(events[:, 2], [20, 28])

    # gh-5604
    assert raw.info['meas_date'] == DATE_NONE
    _test_raw_reader(read_raw_edf, input_fname=gdf2_path + '.gdf',
                     eog=None, misc=None)
Ejemplo n.º 29
0
def test_cov_estimation_with_triggers():
    """Test estimation from raw with triggers
    """
    events = find_events(raw)
    event_ids = [1, 2, 3, 4]
    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
                    baseline=(-0.2, -0.1), proj=True,
                    reject=reject, preload=True)

    cov = compute_covariance(epochs, keep_sample_mean=True)
    cov_mne = read_cov(cov_km_fname)
    assert_true(cov_mne.ch_names == cov.ch_names)
    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
            / linalg.norm(cov.data, ord='fro')) < 0.005)

    # Test with tmin and tmax (different but not too much)
    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
    assert_true(np.all(cov.data != cov_tmin_tmax.data))
    assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro')
            / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05)

    # cov using a list of epochs and keep_sample_mean=True
    epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
              baseline=(-0.2, -0.1), proj=True, reject=reject)
              for ev_id in event_ids]

    cov2 = compute_covariance(epochs, keep_sample_mean=True)
    assert_array_almost_equal(cov.data, cov2.data)
    assert_true(cov.ch_names == cov2.ch_names)

    # cov with keep_sample_mean=False using a list of epochs
    cov = compute_covariance(epochs, keep_sample_mean=False)
    cov_mne = read_cov(cov_fname)
    assert_true(cov_mne.ch_names == cov.ch_names)
    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
            / linalg.norm(cov.data, ord='fro')) < 0.005)

    # test IO when computation done in Python
    cov.save('test-cov.fif')  # test saving
    cov_read = read_cov('test-cov.fif')
    assert_true(cov_read.ch_names == cov.ch_names)
    assert_true(cov_read.nfree == cov.nfree)
    assert_true((linalg.norm(cov.data - cov_read.data, ord='fro')
            / linalg.norm(cov.data, ord='fro')) < 1e-5)

    # cov with list of epochs with different projectors
    epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
              baseline=(-0.2, -0.1), proj=True, reject=reject),
              Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
              baseline=(-0.2, -0.1), proj=False, reject=reject)]
    # these should fail
    assert_raises(ValueError, compute_covariance, epochs)
    assert_raises(ValueError, compute_covariance, epochs, projs=None)
    # these should work, but won't be equal to above
    cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
    cov = compute_covariance(epochs, projs=[])
Ejemplo n.º 30
0
def test_io_egi():
    """Test importing EGI simple binary files"""
    # test default
    tempdir = _TempDir()
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always', category=RuntimeWarning)
        raw = read_raw_egi(egi_fname, include=None)
        assert_true('RawEGI' in repr(raw))
        raw.load_data()  # currently does nothing
        assert_equal(len(w), 1)
        assert_true(w[0].category == RuntimeWarning)
        msg = 'Did not find any event code with more than one event.'
        assert_true(msg in '%s' % w[0].message)

    include = ['TRSP', 'XXX1']
    raw = read_raw_egi(egi_fname, include=include)
    repr(raw)
    repr(raw.info)

    assert_equal('eeg' in raw, True)
    out_fname = op.join(tempdir, 'test_egi_raw.fif')
    raw.save(out_fname)

    raw2 = Raw(out_fname, preload=True)
    data1, times1 = raw[:10, :]
    data2, times2 = raw2[:10, :]
    assert_array_almost_equal(data1, data2, 9)
    assert_array_almost_equal(times1, times2)

    eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
    assert_equal(len(eeg_chan), 256)
    picks = pick_types(raw.info, eeg=True)
    assert_equal(len(picks), 256)
    assert_equal('STI 014' in raw.ch_names, True)

    events = find_events(raw, stim_channel='STI 014')
    assert_equal(len(events), 2)  # ground truth
    assert_equal(np.unique(events[:, 1])[0], 0)
    assert_true(np.unique(events[:, 0])[0] != 0)
    assert_true(np.unique(events[:, 2])[0] != 0)
    triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]])

    # test trigger functionality
    assert_raises(RuntimeError, _combine_triggers, triggers, None)
    triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]])
    events_ids = [12, 24]
    new_trigger = _combine_triggers(triggers, events_ids)
    assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24]))

    assert_raises(ValueError, read_raw_egi, egi_fname,
                  include=['Foo'])
    assert_raises(ValueError, read_raw_egi, egi_fname,
                  exclude=['Bar'])
    for ii, k in enumerate(include, 1):
        assert_true(k in raw.event_id)
        assert_true(raw.event_id[k] == ii)

    # Make sure concatenation works
    raw_concat = concatenate_raws([raw.copy(), raw])
    assert_equal(raw_concat.n_times, 2 * raw.n_times)
Ejemplo n.º 31
0
# - MEG channel selection
# - 1-30 Hz band-pass filter
# - epoching -0.2 to 0.5 seconds with respect to events
# - rejection based on peak-to-peak amplitude

data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'

raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True).load_data()
raw.filter(1, 30, fir_design='firwin')

# peak-to-peak amplitude rejection parameters
reject = dict(grad=4000e-13, mag=4e-12)
# longer + more epochs for more artifact exposure
events = mne.find_events(raw, stim_channel='STI 014')
epochs = mne.Epochs(raw,
                    events,
                    event_id=None,
                    tmin=-0.2,
                    tmax=0.5,
                    reject=reject)

###############################################################################
# Fit ICA model using the FastICA algorithm, detect and plot components
# explaining ECG artifacts.

ica = ICA(n_components=0.95, method='fastica').fit(epochs)

ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto')
Ejemplo n.º 32
0
def importbdf(bdfname,
              nchans=34,
              refchans=['EXG1', 'EXG2'],
              hptsname=None,
              mask=255,
              extrachans=[],
              exclude=[],
              verbose=None):
    """Wrapper around mne-python to import BDF files

    Parameters
    ----------

    bdfname - Name of the biosemi .bdf filename with full path

    nchans -  Number of EEG channels (including references)
              (Optional) By default, 34 (32 + 2 references)
    refchans - list of strings with reference channel names
               (Optional) By default ['EXG1','EXG2'].
               Use None for average reference.
    hptsname - Name of the electrode position file in .hpts format with path
               (Optional) By default a 32 channel Biosemi layout is used. If
               the nchans is >= 64 and < 96, a 64 channel Biosemi layout is
               used. If nchans >= 96, a 96 channel biosemi layout is used.
               Formats other than .hpts will also likely work, but behavior
               may vary.
    mask - Integer mask to use for trigger channel (Default is 255).
    extrachans - Additional channels other than EEG and EXG that may be in the
                 bdf file. These will be marked as MISC in mne-python.
                 Specify as list of names.
    exclude - List of channel names to exclude from importing
    verbose - bool, str, int, or None (Optional)
        The verbosity of messages to print. If a str, it can be either DEBUG,
        INFO, WARNING, ERROR, or CRITICAL.

    Returns
    -------
    raw - MNE raw data object of rereferences and preloaded data

    eves - Event list (3 column array as required by mne.Epochs)

    Requires
    --------
    mne-python module > release 0.7
    """

    # Default HPTS file
    if (hptsname is None):
        anlffr_root = os.path.dirname(sys.modules['anlffr'].__file__)
        if nchans >= 64 and nchans < 96:
            logger.info('Number of channels is greater than 64.'
                        ' Hence loading a 64 channel montage.')
            hptspath = os.path.join(anlffr_root, 'helper/sysfiles/')
            hptsname = 'biosemi64.hpts'
            montage = read_dig_hpts(hptspath + hptsname)
            misc = ['EXG3', 'EXG4', 'EXG5', 'EXG6', 'EXG7', 'EXG8']
        else:
            if nchans >= 96:
                logger.info('Number of channels is greater than 96.'
                            ' Hence loading a 96 channel montage.')
                hptspath = os.path.join(anlffr_root, 'helper/sysfiles/')
                hptsname = 'biosemi96.hpts'
                montage = read_dig_hpts(hptspath + hptsname)
                misc = ['EXG3', 'EXG4', 'EXG5', 'EXG6', 'EXG7', 'EXG8']
            else:
                if nchans == 2:
                    logger.info('Number of channels is 2.'
                                'Guessing ABR montage or saccades.')
                    montage = None
                    misc = []
                else:
                    logger.info('Loading a default 32 channel montage.')
                    hptspath = os.path.join(anlffr_root, 'helper/sysfiles/')
                    hptsname = 'biosemi32.hpts'
                    montage = read_dig_hpts(hptspath + hptsname)
                    misc = ['EXG3', 'EXG4', 'EXG5', 'EXG6', 'EXG7', 'EXG8']
    else:
        montage = read_dig_hpts(hptsname)  # User-supplied
        misc = ['EXG3', 'EXG4', 'EXG5', 'EXG6', 'EXG7', 'EXG8']

    misc += extrachans
    raw = read_raw_bdf(bdfname,
                       preload=True,
                       misc=misc,
                       exclude=exclude,
                       stim_channel='auto')
    raw.set_montage(montage, on_missing='warn')

    # Rereference
    if refchans is not None:
        sys.stdout.write('Re-referencing data to: ' + ' '.join(refchans))
        (raw, ref_data) = set_eeg_reference(raw, refchans, copy=False)
        raw.info['bads'] += refchans
    else:
        # Add average reference operator for possible use later
        ave_ref_operator = make_eeg_average_ref_proj(raw.info, activate=False)
        raw = raw.add_proj(ave_ref_operator)

    eves = find_events(raw, shortest_event=1, mask=mask)

    return (raw, eves)
Ejemplo n.º 33
0
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'

###############################################################################
# Load and filter data, set up epochs

raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'

raw = fiff.Raw(raw_fname % 1, preload=True) # Take first run


picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 45, method='iir')

events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces":1, "scrambled":2}

tmin, tmax = -0.2, 0.6
baseline = None  # no baseline as high-pass is applied
reject = dict(mag=1.5e-12)

epochs = mne.Epochs(raw, events, event_ids, tmin, tmax,  picks=picks,
                    baseline=baseline, preload=True, reject=reject)

# Fit ICA, find and remove major artifacts

ica = ICA(None, 50).decompose_epochs(epochs, decim=2)

for ch_name in ['MRT51-2908', 'MLF14-2908']:  # ECG, EOG contaminated chs
    scores = ica.find_sources_epochs(epochs, ch_name, 'pearsonr')
Ejemplo n.º 34
0
def test_io_set():
    """Test importing EEGLAB .set files"""
    from scipy import io
    with warnings.catch_warnings(record=True) as w1:
        warnings.simplefilter('always')
        # main tests, and test missing event_id
        _test_raw_reader(read_raw_eeglab,
                         input_fname=raw_fname,
                         montage=montage)
        _test_raw_reader(read_raw_eeglab,
                         input_fname=raw_fname_onefile,
                         montage=montage)
        assert_equal(len(w1), 20)
        # f3 or preload_false and a lot for dropping events
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # test finding events in continuous data
        event_id = {'rt': 1, 'square': 2}
        raw0 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=True)
        raw1 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=False)
        raw2 = read_raw_eeglab(input_fname=raw_fname_onefile,
                               montage=montage,
                               event_id=event_id)
        raw3 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id)
        raw4 = read_raw_eeglab(input_fname=raw_fname, montage=montage)
        Epochs(raw0, find_events(raw0), event_id, add_eeg_ref=False)
        epochs = Epochs(raw1, find_events(raw1), event_id, add_eeg_ref=False)
        assert_equal(len(find_events(raw4)), 0)  # no events without event_id
        assert_equal(epochs["square"].average().nave, 80)  # 80 with
        assert_array_equal(raw0[:][0], raw1[:][0], raw2[:][0], raw3[:][0])
        assert_array_equal(raw0[:][-1], raw1[:][-1], raw2[:][-1], raw3[:][-1])
        assert_equal(len(w), 4)
        # 1 for preload=False / str with fname_onefile, 3 for dropped events
        raw0.filter(1,
                    None,
                    l_trans_bandwidth='auto',
                    filter_length='auto',
                    phase='zero')  # test that preloading works

    # test that using uin16_codec does not break stuff
    raw0 = read_raw_eeglab(input_fname=raw_fname,
                           montage=montage,
                           event_id=event_id,
                           preload=False,
                           uint16_codec='ascii')

    # test old EEGLAB version event import
    eeg = io.loadmat(raw_fname, struct_as_record=False, squeeze_me=True)['EEG']
    for event in eeg.event:  # old version allows integer events
        event.type = 1
    assert_equal(_read_eeglab_events(eeg)[-1, -1], 1)
    eeg.event = eeg.event[0]  # single event
    assert_equal(_read_eeglab_events(eeg)[-1, -1], 1)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        epochs = read_epochs_eeglab(epochs_fname)
        epochs2 = read_epochs_eeglab(epochs_fname_onefile)
    # one warning for each read_epochs_eeglab because both files have epochs
    # associated with multiple events
    assert_equal(len(w), 2)
    assert_array_equal(epochs.get_data(), epochs2.get_data())

    # test different combinations of events and event_ids
    temp_dir = _TempDir()
    out_fname = op.join(temp_dir, 'test-eve.fif')
    write_events(out_fname, epochs.events)
    event_id = {'S255/S8': 1, 'S8': 2, 'S255/S9': 3}

    epochs = read_epochs_eeglab(epochs_fname, epochs.events, event_id)
    assert_equal(len(epochs.events), 4)
    epochs = read_epochs_eeglab(epochs_fname, out_fname, event_id)
    assert_raises(ValueError, read_epochs_eeglab, epochs_fname, None, event_id)
    assert_raises(ValueError, read_epochs_eeglab, epochs_fname, epochs.events,
                  None)

    # test reading file with one event
    eeg = io.loadmat(raw_fname, struct_as_record=False, squeeze_me=True)['EEG']
    one_event_fname = op.join(temp_dir, 'test_one_event.set')
    io.savemat(
        one_event_fname, {
            'EEG': {
                'trials': eeg.trials,
                'srate': eeg.srate,
                'nbchan': eeg.nbchan,
                'data': 'test_one_event.fdt',
                'epoch': eeg.epoch,
                'event': eeg.event[0],
                'chanlocs': eeg.chanlocs,
                'pnts': eeg.pnts
            }
        })
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    op.join(temp_dir, 'test_one_event.fdt'))
    event_id = {eeg.event[0].type: 1}
    read_raw_eeglab(input_fname=one_event_fname,
                    montage=montage,
                    event_id=event_id,
                    preload=True)

    # test reading file with one channel
    one_chan_fname = op.join(temp_dir, 'test_one_channel.set')
    io.savemat(
        one_chan_fname, {
            'EEG': {
                'trials': eeg.trials,
                'srate': eeg.srate,
                'nbchan': 1,
                'data': np.random.random((1, 3)),
                'epoch': eeg.epoch,
                'event': eeg.epoch,
                'chanlocs': {
                    'labels': 'E1',
                    'Y': -6.6069,
                    'X': 6.3023,
                    'Z': -2.9423
                },
                'times': eeg.times[:3],
                'pnts': 3
            }
        })
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        read_raw_eeglab(input_fname=one_chan_fname, preload=True)
    # no warning for 'no events found'
    assert_equal(len(w), 0)

    # test if .dat file raises an error
    eeg = io.loadmat(epochs_fname, struct_as_record=False,
                     squeeze_me=True)['EEG']
    eeg.data = 'epochs_fname.dat'
    bad_epochs_fname = op.join(temp_dir, 'test_epochs.set')
    io.savemat(
        bad_epochs_fname, {
            'EEG': {
                'trials': eeg.trials,
                'srate': eeg.srate,
                'nbchan': eeg.nbchan,
                'data': eeg.data,
                'epoch': eeg.epoch,
                'event': eeg.event,
                'chanlocs': eeg.chanlocs
            }
        })
    shutil.copyfile(op.join(base_dir, 'test_epochs.fdt'),
                    op.join(temp_dir, 'test_epochs.dat'))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        assert_raises(NotImplementedError, read_epochs_eeglab,
                      bad_epochs_fname)
    assert_equal(len(w), 1)
Ejemplo n.º 35
0
                task = "RS"
            else:
                task = "gradCPT"
            bids_basename = "sub-{}_ses-{}_task-{}_run-{}".format(
                subject, session, task, run)
            bidspath = BIDSPath(
                subject=subject,
                session=session,
                task=task,
                run=run,
                suffix="meg",
                extension=".ds",
                root=BIDS_PATH,
            )
            if not op.isdir(bidspath):
                raw_fname = op.join(ACQ_PATH, rec_date, file)
                raw = mne.io.read_raw_ctf(raw_fname, preload=False)
                if task == "gradCPT":
                    events = mne.find_events(
                        raw, min_duration=2 / raw.info["sfreq"]
                    )  # refaire le trick avec 1 VS 2 sample
                    write_raw_bids(
                        raw,
                        bidspath,
                        events_data=events,
                        event_id=EVENT_ID,
                        overwrite=True,
                    )
                else:
                    write_raw_bids(raw, bidspath, overwrite=True)
def _compute_evoked(subject, kind):

    fname = op.join(cfg.camcan_meg_raw_path, subject, kind,
                    '%s_raw.fif' % kind)

    raw = mne.io.read_raw_fif(fname)
    mne.channels.fix_mag_coil_types(raw.info)
    raw = _run_maxfilter(raw, subject, kind)
    if DEBUG:
        raw.crop(0, 60)
    raw.filter(1, 30)
    _compute_add_ssp_exg(raw)

    out = {}
    for ii, event_id in enumerate(task_info[kind]['event_id']):
        epochs_params = task_info[kind]['epochs_params'][ii]
        lock = task_info[kind]['lock'][ii]
        events = mne.find_events(raw,
                                 uint_cast=True,
                                 min_duration=2. / raw.info['sfreq'])

        if kind == 'task' and lock == 'resp':
            event_map = np.array([(k, v)
                                  for k, v in Counter(events[:, 2]).items()])
            button_press = event_map[:, 0][np.argmax(event_map[:, 1])]
            if event_map[:, 1][np.argmax(event_map[:, 1])] >= 50:
                events[events[:, 2] == button_press, 2] = 8192
            else:
                raise RuntimeError('Could not guess button press')

        reject = _get_global_reject_epochs(raw,
                                           events=events,
                                           event_id=event_id,
                                           epochs_params=epochs_params)

        epochs = mne.Epochs(raw,
                            events=events,
                            event_id=event_id,
                            reject=reject,
                            preload=True,
                            **epochs_params)

        # noise_cov = mne.compute_covariance(
        #     epochs, tmax=0, method='oas')

        evokeds = list()
        for kk in event_id:
            evoked = epochs[kk].average()
            evoked.comment = kk
            evokeds.append(evoked)

        out_path = op.join(cfg.derivative_path, subject)

        if not op.exists(out_path):
            os.makedirs(out_path)

        out_fname = op.join(out_path, '%s_%s_sensors-ave.fif' % (kind, lock))

        mne.write_evokeds(out_fname, evokeds)
        out.update({lock: (kind, epochs.average().nave)})

    return out
Ejemplo n.º 37
0
from mne.datasets.megsim import load_data

condition = 'visual'  # or 'auditory' or 'somatosensory'

# Load experimental RAW files for the visual condition
raw_fnames = load_data(condition=condition,
                       data_format='raw',
                       data_type='experimental')

# Load simulation evoked files for the visual condition
evoked_fnames = load_data(condition=condition,
                          data_format='evoked',
                          data_type='simulation')

raw = Raw(raw_fnames[0])
events = find_events(raw, stim_channel="STI 014", shortest_event=1)

# Visualize raw file
raw.plot()

# Make an evoked file from the experimental data
picks = pick_types(raw.info, meg=True, eog=True, exclude='bads')

# Read epochs
event_id, tmin, tmax = 9, -0.2, 0.5
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                baseline=(None, 0),
Ejemplo n.º 38
0
from mne.report import Report
import numpy as np
from autoreject import AutoReject, compute_thresholds
from autoreject import get_rejection_threshold

# data description
raw = io.read_raw_fif(file_name, preload=True)
print(raw)
raw.info

#filter
raw.notch_filter(np.arange(50, 150, 200), n_jobs=1, fir_design='firwin')
raw = raw.copy().filter(0.1, 40., fir_design='firwin')

#events
events = mne.find_events(raw, stim_channel='STI101', shortest_event=1)
print(events)
events_id = {'visual1': 37, 'visual2': 77, 'audio1': 117, 'audio2': 157}

#epochs
epochs = mne.Epochs(raw,
                    events,
                    event_id=events_id,
                    tmin=-0.2,
                    tmax=0.5,
                    baseline=(None, 0),
                    reject_by_annotation=True,
                    verbose=True,
                    preload=True)
print(epochs)
Ejemplo n.º 39
0
                          Vectorizer, CSP)

data_path = sample.data_path()

subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax = -0.200, 0.500
event_id = {'Auditory/Left': 1, 'Visual/Left': 3}  # just use two
raw = mne.io.read_raw_fif(raw_fname, preload=True)

# The subsequent decoding analyses only capture evoked responses, so we can
# low-pass the MEG data. Usually a value more like 40 Hz would be used,
# but here low-pass at 20 so we can more heavily decimate, and allow
# the examlpe to run faster. The 2 Hz high-pass helps improve CSP.
raw.filter(2, 20)
events = mne.find_events(raw, 'STI 014')

# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more

# Read epochs
epochs = mne.Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=('grad', 'eog'),
                    baseline=(None, 0.),
                    preload=True,
                    reject=dict(grad=4000e-13, eog=150e-6),
Ejemplo n.º 40
0
            pick_channels = raw_solo.ch_names[0:32] + [raw_solo.ch_names[-1]]
        elif subject == 2:
            pick_channels = raw_solo.ch_names[32:-1] + [raw_solo.ch_names[-1]]        
        raw_solo.pick_channels(pick_channels)
        raw_cola = sessions['collaborative']['run_1']
        raw_cola = raw_cola.copy().pick_channels(pick_channels)

        for condition, raw in zip(['solo', 'cola'], [raw_solo, raw_cola]):        

            # filter data and resample
            fmin = 1
            fmax = 20
            raw.filter(fmin, fmax, verbose=False)            

            # detect the events and cut the signal into epochs
            events = mne.find_events(raw=raw, shortest_event=1, verbose=False)
            event_id = {'NonTarget': 1, 'Target': 2}
            epochs = mne.Epochs(raw, events, event_id, tmin=0.0, tmax=0.8, baseline=None, verbose=False, preload=True)
            epochs.pick_types(eeg=True)

            # get trials and labels
            X = epochs.get_data()
            y = epochs.events[:,-1]
            y = y - 1

            # cross validation
            skf = StratifiedKFold(n_splits=5)
            clf = make_pipeline(ERPCovariances(estimator='lwf', classes=[1]), MDM())
            scr = cross_val_score(clf, X, y, cv=skf, scoring = 'roc_auc').mean()
            scores[pair][subject][condition] = scr
Ejemplo n.º 41
0
###############################################################################
# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
# are read to construct instances of :class:`mne.io.Raw`.
data_path = bst_phantom_elekta.data_path(verbose=True)

raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)

###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
# 102 axial magnetometers, and 3 stimulus channels. Let's get the events
# for the phantom, where each dipole (1-32) gets its own event:

events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG2421']

###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
# to save memory:

raw.plot_psd(tmax=60., average=False)

###############################################################################
# Let's use Maxwell filtering to clean the data a bit.
# Ideally we would have the fine calibration and cross-talk information
# for the site of interest, but we don't, so we just do:
Ejemplo n.º 42
0
def test_crop():
    """Test cropping with annotations."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    onset = events[events[:, 2] == 1, 0] / raw.info['sfreq']
    duration = np.full_like(onset, 0.5)
    description = ['bad %d' % k for k in range(len(onset))]
    annot = mne.Annotations(onset,
                            duration,
                            description,
                            orig_time=raw.info['meas_date'])
    raw.set_annotations(annot)

    split_time = raw.times[-1] / 2. + 2.
    split_idx = len(onset) // 2 + 1
    raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq'])
    assert_array_equal(raw_cropped_left.annotations.description,
                       raw.annotations.description[:split_idx])
    assert_allclose(raw_cropped_left.annotations.duration,
                    raw.annotations.duration[:split_idx])
    assert_allclose(raw_cropped_left.annotations.onset,
                    raw.annotations.onset[:split_idx])
    raw_cropped_right = raw.copy().crop(split_time, None)
    assert_array_equal(raw_cropped_right.annotations.description,
                       raw.annotations.description[split_idx:])
    assert_allclose(raw_cropped_right.annotations.duration,
                    raw.annotations.duration[split_idx:])
    assert_allclose(raw_cropped_right.annotations.onset,
                    raw.annotations.onset[split_idx:])
    raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right],
                                      verbose='debug')
    assert_allclose(raw_concat.times, raw.times)
    assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20)
    # Get rid of the boundary events
    raw_concat.annotations.delete(-1)
    raw_concat.annotations.delete(-1)
    # Ensure we annotations survive round-trip crop->concat
    assert_array_equal(raw_concat.annotations.description,
                       raw.annotations.description)
    for attr in ('onset', 'duration'):
        assert_allclose(getattr(raw_concat.annotations, attr),
                        getattr(raw.annotations, attr),
                        err_msg='Failed for %s:' % (attr, ))

    raw.set_annotations(None)  # undo

    # Test concatenating annotations with and without orig_time.
    raw2 = raw.copy()
    raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date']))
    raw2.set_annotations(Annotations([2.], [3], 'BAD', None))
    expected_onset = [45., 2. + raw._last_time]
    raw = concatenate_raws([raw, raw2])
    raw.annotations.delete(-1)  # remove boundary annotations
    raw.annotations.delete(-1)
    assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2)

    # Test IO
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test-annot.fif')
    raw.annotations.save(fname)
    annot_read = read_annotations(fname)
    for attr in ('onset', 'duration', 'orig_time'):
        assert_allclose(getattr(annot_read, attr),
                        getattr(raw.annotations, attr))
    assert_array_equal(annot_read.description, raw.annotations.description)
    annot = Annotations((), (), ())
    annot.save(fname)
    pytest.raises(IOError, read_annotations, fif_fname)  # none in old raw
    annot = read_annotations(fname)
    assert isinstance(annot, Annotations)
    assert len(annot) == 0
    # Test that empty annotations can be saved with an object
    fname = op.join(tempdir, 'test_raw.fif')
    raw.set_annotations(annot)
    raw.save(fname)
    raw_read = read_raw_fif(fname)
    assert isinstance(raw_read.annotations, Annotations)
    assert len(raw_read.annotations) == 0
    raw.set_annotations(None)
    raw.save(fname, overwrite=True)
    raw_read = read_raw_fif(fname)
    assert raw_read.annotations is not None  # XXX to be fixed in #5416
    assert len(raw_read.annotations.onset) == 0  # XXX to be fixed in #5416
Ejemplo n.º 43
0
def test_events_from_annot_in_raw_objects():
    """Test basic functionality of events_fron_annot for raw objects."""
    raw = read_raw_fif(fif_fname)
    events = mne.find_events(raw)
    event_id = {
        'Auditory/Left': 1,
        'Auditory/Right': 2,
        'Visual/Left': 3,
        'Visual/Right': 4,
        'Visual/Smiley': 32,
        'Motor/Button': 5
    }
    event_map = {v: k for k, v in event_id.items()}
    annot = Annotations(onset=raw.times[events[:, 0] - raw.first_samp],
                        duration=np.zeros(len(events)),
                        description=[event_map[vv] for vv in events[:, 2]],
                        orig_time=None)
    raw.set_annotations(annot)

    events2, event_id2 = \
        events_from_annotations(raw, event_id=event_id, regexp=None)
    assert_array_equal(events, events2)
    assert_equal(event_id, event_id2)

    events3, event_id3 = \
        events_from_annotations(raw, event_id=None, regexp=None)

    assert_array_equal(events[:, 0], events3[:, 0])
    assert set(event_id.keys()) == set(event_id3.keys())

    first = np.unique(events3[:, 2])
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    first = np.unique(list(event_id3.values()))
    second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)
    assert_array_equal(first, second)

    events4, event_id4 =\
        events_from_annotations(raw, event_id=None, regexp='.*Left')

    expected_event_id4 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id4.keys(), expected_event_id4.keys())

    expected_events4 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events4[:, 0], events4[:, 0])

    events5, event_id5 = \
        events_from_annotations(raw, event_id=event_id, regexp='.*Left')

    expected_event_id5 = {k: v for k, v in event_id.items() if 'Left' in k}
    assert_equal(event_id5, expected_event_id5)

    expected_events5 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]
    assert_array_equal(expected_events5, events5)

    with pytest.raises(ValueError, match='not find any of the events'):
        events_from_annotations(raw, regexp='not_there')

    raw.set_annotations(None)
    events7, _ = events_from_annotations(raw)
    assert_array_equal(events7, np.empty((0, 3), dtype=int))
                   int(date[5:].split('-')[1]))

# add modified subject info to dataset
raw.info['subject_info'] = dict(id=subject,
                                sex=int(sex),
                                birthday=approx_birthday)

# frequency of power line
raw.info['line_freq'] = 50.0
raw.info['lowpass'] = raw.info['sfreq'] / 2

###############################################################################
# 4) Create events info
# extract events
events = find_events(raw,
                     stim_channel='Status',
                     output='onset',
                     min_duration=0.002)

###############################################################################
# 5) Extract events from the status channel and save them as file annotations
# events to data frame
events = pd.DataFrame(events,
                      columns=['onset', 'duration', 'description'])
# onset to seconds
events['onset_in_s'] = events['onset'] / raw.info['sfreq']
# sort by onset
events = events.sort_values(by=['onset_in_s'])
# only keep relevant events
events = events.loc[(events['description'] <= 245)]

# crate annotations object
Ejemplo n.º 45
0
###############################################################################
# Load EEG and extract covariance matrices for SSVEP
# --------------------------------------------------

frequencies = [13, 17, 21]
freq_band = 0.1
events_id = {'13 Hz': 2, '17 Hz': 4, '21 Hz': 3, 'resting-state': 1}

duration = 2.5    # duration of epochs
interval = 0.25   # interval between successive epochs for online processing

# Subject 12: first 4 sessions for training, last session for test

# Training set
raw = Raw(download_data(subject=12, session=1), preload=True, verbose=False)
events = find_events(raw, shortest_event=0, verbose=False)
raw = raw.pick_types(eeg=True)
ch_count = len(raw.info['ch_names'])
raw_ext = extend_signal(raw, frequencies, freq_band)
epochs = Epochs(
    raw_ext, events, events_id, tmin=2, tmax=5, baseline=None, verbose=False)
x_train = BlockCovariances(
    estimator='lwf', block_size=ch_count).transform(epochs.get_data())
y_train = events[:, 2]

# Testing set
raw = Raw(download_data(subject=12, session=4), preload=True, verbose=False)
raw = raw.pick_types(eeg=True)
raw_ext = extend_signal(raw, frequencies, freq_band)
epochs = make_fixed_length_epochs(
    raw_ext, duration=duration, overlap=duration - interval, verbose=False)
Ejemplo n.º 46
0
def test_compute_covariance_auto_reg():
    """Test automated regularization."""
    raw = read_raw_fif(raw_fname, preload=True)
    raw.resample(100, npad='auto')  # much faster estimation
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(mag=4e-12)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    # we need a few channels for numerical reasons in PCA/FA
    picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
    raw.pick_channels([raw.ch_names[pick] for pick in picks])
    raw.info.normalize_proj()
    epochs = Epochs(
        raw, events_merged, 1234, tmin=-0.2, tmax=0,
        baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
    epochs = epochs.crop(None, 0)[:10]

    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
                         pca=dict(iter_n_components=[3]))

    covs = compute_covariance(epochs, method='auto',
                              method_params=method_params,
                              return_estimators=True)
    # make sure regularization produces structured differencess
    diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
    off_diag_mask = np.invert(diag_mask)
    for cov_a, cov_b in itt.combinations(covs, 2):
        if (cov_a['method'] == 'diagonal_fixed' and
                # here we have diagnoal or no regularization.
                cov_b['method'] == 'empirical'):

            assert not np.any(cov_a['data'][diag_mask] ==
                              cov_b['data'][diag_mask])

            # but the rest is the same
            assert_array_equal(cov_a['data'][off_diag_mask],
                               cov_b['data'][off_diag_mask])

        else:
            # and here we have shrinkage everywhere.
            assert not np.any(cov_a['data'][diag_mask] ==
                              cov_b['data'][diag_mask])

            assert not np.any(cov_a['data'][diag_mask] ==
                              cov_b['data'][diag_mask])

    logliks = [c['loglik'] for c in covs]
    assert np.diff(logliks).max() <= 0  # descending order

    methods = ['empirical', 'factor_analysis', 'ledoit_wolf', 'oas', 'pca',
               'shrunk', 'shrinkage']
    cov3 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=True)
    method_names = [cov['method'] for cov in cov3]
    for method in ['factor_analysis', 'ledoit_wolf', 'oas', 'pca',
                   'shrinkage']:
        this_lik = cov3[method_names.index(method)]['loglik']
        assert -55 < this_lik < -45
    this_lik = cov3[method_names.index('empirical')]['loglik']
    assert -110 < this_lik < -100
    this_lik = cov3[method_names.index('shrunk')]['loglik']
    assert -45 < this_lik < -35

    assert_equal(set([c['method'] for c in cov3]), set(methods))

    cov4 = compute_covariance(epochs, method=methods,
                              method_params=method_params, projs=None,
                              return_estimators=False)
    assert cov3[0]['method'] == cov4['method']  # ordering

    # invalid prespecified method
    pytest.raises(ValueError, compute_covariance, epochs, method='pizza')

    # invalid scalings
    pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
                  scalings=dict(misc=123))
Ejemplo n.º 47
0
def test_find_events():
    """Test find events in raw file."""
    events = read_events(fname)
    raw = read_raw_fif(raw_fname, preload=True)
    # let's test the defaulting behavior while we're at it
    extra_ends = ['', '_1']
    orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
    os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
    if 'MNE_STIM_CHANNEL_1' in os.environ:
        del os.environ['MNE_STIM_CHANNEL_1']
    events2 = find_events(raw)
    assert_array_almost_equal(events, events2)
    # now test with mask
    events11 = find_events(raw, mask=3, mask_type='not_and')
    with pytest.warns(RuntimeWarning, match='events masked'):
        events22 = read_events(fname, mask=3, mask_type='not_and')
    assert_array_equal(events11, events22)

    # Reset some data for ease of comparison
    raw._first_samps[0] = 0
    raw.info['sfreq'] = 1000

    stim_channel = 'STI 014'
    stim_channel_idx = pick_channels(raw.info['ch_names'],
                                     include=[stim_channel])

    # test digital masking
    raw._data[stim_channel_idx, :5] = np.arange(5)
    raw._data[stim_channel_idx, 5:] = 0
    # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'

    pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and')
    pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah')
    # testing mask_type. default = 'not_and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='not_and'),
        [[2, 0, 2], [4, 2, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='not_and'),
        [[1, 0, 1], [3, 0, 1], [4, 1, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='not_and'),
        [[4, 0, 4]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='not_and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    # testing with mask_type = 'and'
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=1, mask_type='and'),
        [[1, 0, 1], [3, 0, 1]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=2, mask_type='and'),
        [[2, 0, 2]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=3, mask_type='and'),
        [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    assert_array_equal(
        find_events(raw, shortest_event=1, mask=4, mask_type='and'),
        [[4, 0, 4]])

    # test empty events channel
    raw._data[stim_channel_idx, :] = 0
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, :4] = 1
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, -1:] = 9
    assert_array_equal(find_events(raw), [[14399, 0, 9]])

    # Test that we can handle consecutive events with no gap
    raw._data[stim_channel_idx, 10:20] = 5
    raw._data[stim_channel_idx, 20:30] = 6
    raw._data[stim_channel_idx, 30:32] = 5
    raw._data[stim_channel_idx, 40] = 6

    assert_array_equal(find_events(raw, consecutive=False),
                       [[10, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, consecutive=True),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw),
                       [[10, 0, 5], [20, 5, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, output='offset', consecutive=False),
                       [[31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True),
        [[19, 6, 5], [29, 5, 6], [31, 0, 5], [40, 0, 6], [14399, 0, 9]])
    pytest.raises(ValueError,
                  find_events,
                  raw,
                  output='step',
                  consecutive=True)
    assert_array_equal(
        find_events(raw, output='step', consecutive=True, shortest_event=1),
        [[10, 0, 5], [20, 5, 6], [30, 6, 5], [32, 5, 0], [40, 0, 6],
         [41, 6, 0], [14399, 0, 9], [14400, 9, 0]])
    assert_array_equal(find_events(raw, output='offset'),
                       [[19, 6, 5], [31, 0, 6], [40, 0, 6], [14399, 0, 9]])
    assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
                       [[10, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
                       [[10, 0, 5], [20, 5, 6], [30, 6, 5]])
    assert_array_equal(
        find_events(raw,
                    output='offset',
                    consecutive=False,
                    min_duration=0.002), [[31, 0, 5]])
    assert_array_equal(
        find_events(raw, output='offset', consecutive=True,
                    min_duration=0.002), [[19, 6, 5], [29, 5, 6], [31, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
                       [[10, 0, 5], [20, 5, 6]])

    # test find_stim_steps merge parameter
    raw._data[stim_channel_idx, :] = 0
    raw._data[stim_channel_idx, 0] = 1
    raw._data[stim_channel_idx, 10] = 4
    raw._data[stim_channel_idx, 11:20] = 5
    assert_array_equal(
        find_stim_steps(raw, pad_start=0, merge=0, stim_channel=stim_channel),
        [[0, 0, 1], [1, 1, 0], [10, 0, 4], [11, 4, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=-1, stim_channel=stim_channel),
        [[1, 1, 0], [10, 0, 5], [20, 5, 0]])
    assert_array_equal(
        find_stim_steps(raw, merge=1, stim_channel=stim_channel),
        [[1, 1, 0], [11, 0, 5], [20, 5, 0]])

    # put back the env vars we trampled on
    for s, o in zip(extra_ends, orig_envs):
        if o is not None:
            os.environ['MNE_STIM_CHANNEL%s' % s] = o

    # Test with list of stim channels
    raw._data[stim_channel_idx, 1:101] = np.zeros(100)
    raw._data[stim_channel_idx, 10:11] = 1
    raw._data[stim_channel_idx, 30:31] = 3
    stim_channel2 = 'STI 015'
    stim_channel2_idx = pick_channels(raw.info['ch_names'],
                                      include=[stim_channel2])
    raw._data[stim_channel2_idx, :] = 0
    raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105]
    events1 = find_events(raw, stim_channel='STI 014')
    events2 = events1.copy()
    events2[:, 0] -= 5
    events = find_events(raw, stim_channel=['STI 014', stim_channel2])
    assert_array_equal(events[::2], events2)
    assert_array_equal(events[1::2], events1)

    # test initial_event argument
    info = create_info(['MYSTI'], 1000, 'stim')
    data = np.zeros((1, 1000))
    raw = RawArray(data, info)
    data[0, :10] = 100
    data[0, 30:40] = 200
    assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]])
    assert_array_equal(find_events(raw, 'MYSTI', initial_event=True),
                       [[0, 0, 100], [30, 0, 200]])

    # test error message for raw without stim channels
    raw = read_raw_fif(raw_fname, preload=True)
    raw.pick_types(meg=True, stim=False)
    # raw does not have annotations
    with pytest.raises(ValueError, match="'stim_channel'"):
        find_events(raw)
    # if raw has annotations, we show a different error message
    raw.set_annotations(Annotations(0, 2, "test"))
    with pytest.raises(ValueError, match="mne.events_from_annotations"):
        find_events(raw)
Ejemplo n.º 48
0
def load_raw(subj_dir,
             filenames,
             ica_filter_filename='ica_filter-ica.fif',
             index=None,
             m_files=None,
             load_error_trials=True):
    """
    Load a raw data file
    
    :param subj_dir: Directory with the subject data
    :param filenames: Name of raw file (no path)
    :param ica_filter_filename: A name of a .pkl file (path relative to "subdir"). If file exists, it should contain
                                an ICA filter (obtained from dpm.filtering.find_ecg_eog_components), and this filter
                                will be immediately applied to the loaded raw data.
    :param index: A "dpm.Index" object - index of the relevant data files per MEG file
    :param m_files: Read up to this number of files (for debugging)
    :param load_error_trials: Load the error trials or not. This affects only the STIMULUS events, not the RESPONSE events.
    :return: dpm.files.Data
    """

    #subj_dir = dpm.subj_path['cc']
    # filenames = data_filenames
    subdir = 'sss'  # previously this was an argument to the function. For now, keep it here

    if index is None:
        index = dpm.Index(subj_dir)

    if isinstance(filenames, str):
        filenames = [filenames]

    index.load_trigger_mapping()

    for filename in filenames:
        if index.get_entry_for_sss(filename) is None:
            raise Exception(
                "%s does not have trigger mapping defined in index.csv" %
                filename)
    if len(
            set([
                index.get_entry_for_sss(filename)['trigger_mapping_fn']
                for filename in filenames
            ])) > 1:
        raise Exception(
            "The files provided do not rely on the same trigger mapping. Check out the index.csv to see that"
        )

    if m_files is not None and m_files < len(filenames):
        filenames = filenames[:m_files]

    #-- Load data
    raws = [
        mne.io.read_raw_fif(subj_dir + "/" + subdir + "/" + filename,
                            preload=True) for filename in filenames
    ]
    raw = mne.concatenate_raws(raws)

    #-- Remove ECG/EoG channels
    ica_file = subj_dir + "/" + subdir + "/" + ica_filter_filename
    if os.path.exists(ica_file):
        ica = mne.preprocessing.read_ica(ica_file)
        raw = ica.apply(raw, exclude=ica.exclude)

    #-- Get events
    stimulus_events = mne.find_events(raw,
                                      stim_channel='STI101',
                                      consecutive='increasing',
                                      min_duration=0.002,
                                      mask=0x000000FF)

    _remap_triggers(filenames, index, stimulus_events)

    #-- Load behavioral results and create metadata accordingly
    behavioral_results = load_bresults_multiple_files([
        index.sss_fn_to_behavior_file_path(filename) for filename in filenames
    ])
    stim_metadata = create_stim_metadata(stimulus_events, behavioral_results,
                                         subj_dir)

    response_events, response_metadata, stimulus_events, stim_metadata = \
        _events_by_responses(stimulus_events, stim_metadata, behavioral_results, load_error_trials)

    return Data(filenames, raw, stimulus_events, response_events,
                stim_metadata, response_metadata)
Ejemplo n.º 49
0
def load_and_preprocess_raw(
    subject,
    onsets='audio',
    interpolate_bad_channels=True,
    reference_mastoids=True,
    l_freq=0.5,
    h_freq=30,
    sfreq=None,
    ica_cleaning=True,
    ica_name='100p_64c',
    l_freq2=None,
    h_freq2=None,
    verbose=None,
    n_jobs=4,
    mne_data_root=None,
    ica_data_root=None,
):

    # load the imported fif data, use the specified onsets
    raw = load_raw(
        subject,
        onsets=onsets,
        interpolate_bad_channels=interpolate_bad_channels,
        reference_mastoids=reference_mastoids,
        verbose=verbose,
        mne_data_root=mne_data_root,
    )

    # apply bandpass filter, use 4 processes to speed things up
    log.info('Applying filter: low_cut_freq={} high_cut_freq={}'.format(
        l_freq, h_freq))
    eeg_picks = mne.pick_types(raw.info,
                               meg=False,
                               eeg=True,
                               eog=False,
                               stim=False)
    raw.filter(l_freq=l_freq,
               h_freq=h_freq,
               picks=eeg_picks,
               filter_length='10s',
               l_trans_bandwidth=0.1,
               h_trans_bandwidth=0.5,
               method='fft',
               n_jobs=n_jobs,
               verbose=verbose)

    # extract events
    # this comprises 240 trials, 60 noise events (1111) and 60 feedback events (2000=No, 2001=Yes)
    trial_events = mne.find_events(raw,
                                   stim_channel='STI 014',
                                   shortest_event=0,
                                   verbose=verbose)
    if verbose:
        log.debug('trial events: {}'.format(trial_events.shape))

    # resample data and eventa
    if sfreq is not None:
        orig_sfreq = raw.info['sfreq']
        fast_resample_mne(raw,
                          sfreq,
                          res_type='sinc_fastest',
                          preserve_events=True,
                          verbose=False)

        # IMPORTANT: extracted events have to be resampled, too - otherwise misalignment
        trial_events = resample_mne_events(trial_events, orig_sfreq, sfreq)

    if ica_cleaning:
        # load ica
        ica = load_ica(subject,
                       description=ica_name,
                       ica_data_root=ica_data_root)
        if verbose:
            log.info('Applying ICA: {}'.format(ica))
        log.info('Excluding ICA components: {}'.format(ica.exclude))
        raw = ica.apply(raw, exclude=ica.exclude)

    if l_freq2 is not None or h_freq2 is not None:
        log.info(
            'Applying additional filter: low_cut_freq={} high_cut_freq={}'.
            format(l_freq2, h_freq2))
        raw.filter(l_freq=l_freq2,
                   h_freq=h_freq2,
                   picks=eeg_picks,
                   filter_length='10s',
                   l_trans_bandwidth=0.1,
                   h_trans_bandwidth=0.5,
                   method='fft',
                   n_jobs=n_jobs,
                   verbose=verbose)

    return raw, trial_events
Ejemplo n.º 50
0
def run_events_concatenate(list_ica_files, subject):
    '''
    The events are extracted from stim channel 'STI101'. The events are saved
    to the Node directory.
    For each subject, the different run are concatenated in one single raw file
    and saved in the Node directory. We take the different run from the
    preprocessing workflow directory, i.e. the cleaned raw data.
    '''

    print(subject, list_ica_files)
    import os
    import mne

    # could be added in a node to come
    mask = 4096 + 256  # mask for excluding high order bits
    delay_item = 0.0345
    min_duration = 0.015

    print("processing subject: %s" % subject)

    raw_list = list()
    events_list = list()
    fname_events_files = []

    print("  Loading raw data")
    for i, run_fname in enumerate(list_ica_files):
        run = i + 1

        raw = mne.io.read_raw_fif(run_fname, preload=True)
        events = mne.find_events(raw,
                                 stim_channel='STI101',
                                 consecutive='increasing',
                                 mask=mask,
                                 mask_type='not_and',
                                 min_duration=min_duration)

        print("  S %s - R %s" % (subject, run))

        fname_events = os.path.abspath('run_%02d-eve.fif' % run)
        mne.write_events(fname_events, events)
        fname_events_files.append(fname_events)

        delay = int(round(delay_item * raw.info['sfreq']))
        events[:, 0] = events[:, 0] + delay
        events_list.append(events)

        raw_list.append(raw)

    raw, events = mne.concatenate_raws(raw_list, events_list=events_list)
    raw.set_eeg_reference(projection=True)
    raw_file = os.path.abspath('{}_sss_filt_dsamp_ica-raw.fif'.format(subject))
    print(raw_file)

    raw.save(raw_file, overwrite=True)

    event_file = os.path.abspath(
        '{}_sss_filt_dsamp_ica-raw-eve.fif'.format(subject))
    mne.write_events(event_file, events)

    del raw_list
    del raw

    return raw_file, event_file, fname_events_files
def test_kit2fiff_model():
    """Test Kit2Fiff model."""
    from mne.gui._kit2fiff_gui import Kit2FiffModel
    tempdir = _TempDir()
    tgt_fname = os.path.join(tempdir, 'test-raw.fif')

    model = Kit2FiffModel()
    assert not model.can_save
    assert model.misc_chs_desc == "No SQD file selected..."
    assert model.stim_chs_comment == ""
    model.markers.mrk1.file = mrk_pre_path
    model.markers.mrk2.file = mrk_post_path
    model.sqd_file = sqd_path
    assert model.misc_chs_desc == "160:192"
    model.hsp_file = hsp_path
    assert not model.can_save
    model.fid_file = fid_path
    assert model.can_save

    # events
    model.stim_slope = '+'
    assert model.get_event_info() == {1: 2}
    model.stim_slope = '-'
    assert model.get_event_info() == {254: 2, 255: 2}

    # stim channels
    model.stim_chs = "181:184, 186"
    assert_array_equal(model.stim_chs_array, [181, 182, 183, 186])
    assert model.stim_chs_ok
    assert model.get_event_info() == {}
    model.stim_chs = "181:184, bad"
    assert not model.stim_chs_ok
    assert not model.can_save
    model.stim_chs = ""
    assert model.can_save

    # export raw
    raw_out = model.get_raw()
    raw_out.save(tgt_fname)
    raw = read_raw_fif(tgt_fname)

    # Compare exported raw with the original binary conversion
    raw_bin = read_raw_fif(fif_path)
    trans_bin = raw.info['dev_head_t']['trans']
    want_keys = list(raw_bin.info.keys())
    assert sorted(want_keys) == sorted(list(raw.info.keys()))
    trans_transform = raw_bin.info['dev_head_t']['trans']
    assert_allclose(trans_transform, trans_bin, 0.1)

    # Averaging markers
    model.markers.mrk3.method = "Average"
    trans_avg = model.dev_head_trans
    assert not np.all(trans_avg == trans_transform)
    assert_allclose(trans_avg, trans_bin, 0.1)

    # Test exclusion of one marker
    model.markers.mrk3.method = "Transform"
    model.use_mrk = [1, 2, 3, 4]
    assert not np.all(model.dev_head_trans == trans_transform)
    assert not np.all(model.dev_head_trans == trans_avg)
    assert not np.all(model.dev_head_trans == np.eye(4))

    # test setting stim channels
    model.stim_slope = '+'
    events_bin = mne.find_events(raw_bin, stim_channel='STI 014')

    model.stim_coding = '<'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin)

    events_rev = events_bin.copy()
    events_rev[:, 2] = 1
    model.stim_coding = '>'
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_rev)

    model.stim_coding = 'channel'
    model.stim_chs = "160:161"
    raw = model.get_raw()
    events = mne.find_events(raw, stim_channel='STI 014')
    assert_array_equal(events, events_bin + [0, 0, 32])

    # test reset
    model.clear_all()
    assert model.use_mrk == [0, 1, 2, 3, 4]
    assert model.sqd_file == ""
Ejemplo n.º 52
0
def make_ecr_events(raw_file, data_file, out_file, pattern=False):

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Load behavioral file.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    data = read_csv(data_file)
    n_events, _ = data.shape
    n_trials, _ = data[data.Condition != 0].shape  # <--- Excludes rest trials.
    n_responses = (~np.isnan(data[data.Condition != 0].ResponseOnset)).sum()

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Load raw file.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    raw = Raw(raw_file, preload=False, verbose=False)
    stim_onsets = find_events(raw,
                              stim_channel='STI001',
                              output='onset',
                              verbose=False)
    response_onsets = find_events(raw,
                                  stim_channel='STI002',
                                  output='onset',
                                  verbose=False)

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Error catching.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#

    if n_events != stim_onsets.shape[0]:
        raise ValueError('Number of trial onsets in %s and %s do not match!' %
                         (data_file, raw_file))
    elif n_responses != response_onsets.shape[0]:
        raise ValueError('Number of responses in %s and %s do not match!' %
                         (data_file, raw_file))
    else:
        pass

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Make events file.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#

    # Ammend Conflict and ResponseAccuracy Categories.
    data['Conflict'] = np.where(data.Conflict == 0, 1,
                                data.Conflict)  # No Conflict: [0,1] --> 1
    data['ResponseAccuracy'] = np.where(
        data.ResponseAccuracy == 0, 2,
        data.ResponseAccuracy)  # Accuracy: 0 --> 2
    data['ResponseAccuracy'] = np.where(
        data.ResponseAccuracy == 99, 3,
        data.ResponseAccuracy)  # Accuracy:99 --> 3

    # Append Word Valence category.
    data['WordValence'] = np.where(  # Con + Angry Face = Angry Word
        (data.Condition == 1) & (data.Valence == 1),
        1,
        np.where(
            # Con + Happy Face = Happy Word
            (data.Condition == 1) & (data.Valence == 2),
            2,
            np.where(
                # Incon + Angry Face = Happy Word
                (data.Condition == 2) & (data.Valence == 1),
                2,
                np.where(
                    # Incon + Happy Face = Angry Word
                    (data.Condition == 2) & (data.Valence == 2),
                    1,
                    99))))

    # Make unique identifiers.
    data['StimIDs'] = '1' + data.Condition.map(str) + data.Conflict.map(str) + data.Valence.map(str) +\
                       data.WordValence.map(str) + data.ResponseAccuracy.map(str)
    data['RespIDs'] = '2' + data.Condition.map(str) + data.Conflict.map(str) + data.Valence.map(str) +\
                       data.WordValence.map(str) + data.ResponseAccuracy.map(str)

    # Add identifiers to onset arrays.
    stim_onsets = stim_onsets[np.where(data.Condition == 0, False, True), :]
    stim_onsets[:, 2] = data.StimIDs[data.Condition != 0].astype(int)
    response_onsets[:, 2] = data.RespIDs[data.ResponseKey != 99].astype(int)

    # Merge and sort.
    events = np.concatenate([stim_onsets, response_onsets])
    events = events[events[:, 0].argsort(), :]

    # Reduce to pattern.
    if pattern:
        p = re.compile(pattern)
        idx, = np.where([
            True if re.findall(p, event.astype(str)) else False
            for event in events[:, 2]
        ])
        events = events[idx, :]

    # Insert first sample.
    events = np.insert(events, 0, [raw.first_samp, 0, 0], 0)

    # Write to fif file.
    write_events(out_file, events)

    # Write to text file.
    if out_file.endswith('.fif'): out_file = out_file[:-4] + '.txt'
    else: out_file = out_file + '.txt'
    for n, p in enumerate(pattern):
        events[:, 2] = np.where([
            True if re.findall(p, event.astype(str)) else False
            for event in events[:, 2]
        ], n + 1, events[:, 2])
    events = np.insert(events, 1, raw.index_as_time(events[:, 0]), 1)
    np.savetxt(out_file,
               events,
               fmt='%s',
               header='pattern = %s' % ' | '.join(pattern))
Ejemplo n.º 53
0
def test_cov_estimation_with_triggers():
    """Test estimation from raw with triggers."""
    tempdir = _TempDir()
    raw = read_raw_fif(raw_fname)
    raw.set_eeg_reference(projection=True).load_data()
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
                    baseline=(-0.2, -0.1), proj=True,
                    reject=reject, preload=True)

    cov = compute_covariance(epochs, keep_sample_mean=True)
    _assert_cov(cov, read_cov(cov_km_fname))

    # Test with tmin and tmax (different but not too much)
    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
    assert np.all(cov.data != cov_tmin_tmax.data)
    err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
           linalg.norm(cov_tmin_tmax.data, ord='fro'))
    assert err < 0.05

    # cov using a list of epochs and keep_sample_mean=True
    epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
              baseline=(-0.2, -0.1), proj=True, reject=reject)
              for ev_id in event_ids]
    cov2 = compute_covariance(epochs, keep_sample_mean=True)
    assert_array_almost_equal(cov.data, cov2.data)
    assert cov.ch_names == cov2.ch_names

    # cov with keep_sample_mean=False using a list of epochs
    cov = compute_covariance(epochs, keep_sample_mean=False)
    _assert_cov(cov, read_cov(cov_fname), nfree=False)

    method_params = {'empirical': {'assume_centered': False}}
    pytest.raises(ValueError, compute_covariance, epochs,
                  keep_sample_mean=False, method_params=method_params)
    pytest.raises(ValueError, compute_covariance, epochs,
                  keep_sample_mean=False, method='factor_analysis')

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    _assert_cov(cov, cov_read, 1e-5)

    # cov with list of epochs with different projectors
    epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
                     baseline=(-0.2, -0.1), proj=True),
              Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
                     baseline=(-0.2, -0.1), proj=False)]
    # these should fail
    pytest.raises(ValueError, compute_covariance, epochs)
    pytest.raises(ValueError, compute_covariance, epochs, projs=None)
    # these should work, but won't be equal to above
    with warnings.catch_warnings(record=True) as w:  # too few samples warning
        warnings.simplefilter('always')
        cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
        cov = compute_covariance(epochs, projs=[])
    assert_equal(len(w), 2)

    # test new dict support
    epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
                    proj=True, reject=reject, preload=True)
    with warnings.catch_warnings(record=True):  # samples
        compute_covariance(epochs)

        # projs checking
        compute_covariance(epochs, projs=[])
    pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
    pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
Ejemplo n.º 54
0
            ('feedback', 0): 70,
            ('rest_delay', 0): 80
        }

        mapping = dict((v, k) for k, v in mapping.iteritems())
        #    I created the key_map to give a number to each trial.

        #I get metadata and timing of the raw data.
        print 'get metadata from raw data'
        meta, timing = meg.preprocessing.get_meta(raw, mapping, trial_pins,
                                                  150, 151, other_pins)
        index = meta.block_start

        #I separate the raw data by blocks
        #time where the block start
        events = mne.find_events(raw, 'UPPT001', shortest_event=1)
        index_b_start = np.where(
            events[:, 2] == 100)  #block start on every trigger 100
        time_block = []
        time_block2 = []
        for i in index_b_start[0]:
            time_block.append(events[i][0])
        if len(meta.groupby('block_start')) == len(time_block):
            pass
        else:
            for i in range(len(time_block) - 1):
                ii = timing[
                    timing['baseline_start_time'] > time_block[i]].index[2]
                jj = timing[
                    timing['baseline_start_time'] > time_block[i + 1]].index[2]
                if meta['block_start'][ii] == meta['block_start'][jj]:
Ejemplo n.º 55
0
def epoching(subj, task, run, slowVSfast=False, FD='class', window=None):
    preproc_name, preproc_path = get_pareidolia_bids(FOLDERPATH,
                                                     subj,
                                                     task,
                                                     run,
                                                     stage='preproc')
    preproc = mne.io.read_raw_fif(preproc_path)
    events = mne.find_events(preproc,
                             shortest_event=1)  #stim_channel= 'STI 014'
    ##Get Fractal dimension info from behavioral data
    if task == 'pareidolia':
        behav_name, behav_path = get_pareidolia_bids(FOLDERPATH,
                                                     subj,
                                                     task,
                                                     '1',
                                                     stage='behav')
        behav = pd.read_csv(behav_path)
        behav = arrange_dataframe(behav)
        if run == '1':
            behav = behav.loc[behav['bloc'] == 1]
            FDlist = list(np.array(behav['FD']))
        if run == '2':
            behav = behav.loc[behav['bloc'] == 2]
            FDlist = list(np.array(behav['FD']))
        if run == '3':
            behav = behav.loc[behav['bloc'] == 3]
            FDlist = list(np.array(behav['FD']))
        if run == '4':
            behav = behav.loc[behav['bloc'] == 4]
            FDlist = list(np.array(behav['FD']))
    if task == 'RS':
        FDlist = None
    ##Fixing bugs of event ids
    if subj == '01':
        if task == 'pareidolia':
            if run == '1':
                events[:, 2] = np.where(events[:, 2] == 3, 1, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 4, 2, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 5, 3, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 6, 4, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 7, 5, events[:, 2])
    if subj == '02':
        if task == 'pareidolia':
            if run == '4':
                events[:, 2] = np.where(events[:, 2] == 2, 1, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 3, 2, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 4, 3, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 6, 4, events[:, 2])
                events[:, 2] = np.where(events[:, 2] == 5, 5, events[:, 2])
    #print(events)
    #print(FDlist)
    #This line uses the function 'reformat_events', which you can find in EEG_pareidolia_utils, to add information about FD and slowVSfast paeidolia in the
    #event ids.
    events, medianRT = reformat_events(events, FDlist, RT_thresh, task, run,
                                       slowVSfast, FD)
    print(events[:50])
    #Here is a CRUCIAL part of the function, which determines which part of the signal is use for each epoch, and for the baseline.
    tmin, tmax = -1.5, 8  # Here we define the amount of time we want to keep before (tmin) and after (tmax) the event.
    baseline = (-1.5, 0)
    #Identification of channels of interest
    EOG_chs = ['E1', 'E8', 'E25', 'E32', 'E126', 'E127']
    Unwanted = [
        'E43', 'E48', 'E49', 'E128', 'E113', 'E120', 'E125', 'E119', 'E129'
    ]
    All_chs = preproc.info['ch_names'][0:129]
    EEG_chs = [ele for ele in All_chs if ele not in Unwanted]
    EEG_chs = [ele for ele in EEG_chs if ele not in EOG_chs]

    ##This whole section determines which event_id to choose depending on the task, and the values set for slowVSfast and FD.
    if task == 'RS':
        if run == '1':
            event_id = {'RS10': 100}
        if run == '2':
            event_id = {'RS20': 200}

    if task == 'pareidolia':
        if slowVSfast == False:
            event_id = {'Image_on_nopar': 7, 'Image_on_par': 77}
            if FD == 'all':
                event_id = {
                    'Image_on_nopar_08': 78,
                    'Image_on_par_08': 778,
                    'Image_on_nopar_09': 79,
                    'Image_on_par_09': 779,
                    'Image_on_nopar_10': 710,
                    'Image_on_par_10': 7710,
                    'Image_on_nopar_11': 711,
                    'Image_on_par_11': 7711,
                    'Image_on_nopar_12': 712,
                    'Image_on_par_12': 7712,
                    'Image_on_nopar_13': 713,
                    'Image_on_par_13': 7713,
                    'Image_on_nopar_14': 714,
                    'Image_on_par_14': 7714,
                    'Image_on_nopar_15': 715,
                    'Image_on_par_15': 7715,
                    'Image_on_nopar_16': 716,
                    'Image_on_par_16': 7716,
                    'Image_on_nopar_17': 717,
                    'Image_on_par_17': 7717,
                    'Image_on_nopar_18': 718,
                    'Image_on_par_18': 7718,
                    'Image_on_nopar_19': 719,
                    'Image_on_par_19': 7719,
                    'Image_on_nopar_20': 720,
                    'Image_on_par_20': 7720,
                }
            if FD == 'class':
                event_id = {
                    'Image_on_nopar_low': 70,
                    'Image_on_par_low': 770,
                    'Image_on_nopar_mid': 71,
                    'Image_on_par_mid': 771,
                    'Image_on_nopar_high': 72,
                    'Image_on_par_high': 772
                }
        if slowVSfast == True:
            event_id = {
                'Image_on_nopar': 7,
                'Image_on_par_fast': 77,
                'Image_on_par_slow': 777
            }
            if FD == 'class':
                event_id = {
                    'nopar_low': 70,
                    'par_low_fast': 770,
                    'par_low_slow': 7770,
                    'nopar_mid': 71,
                    'par_mid_fast': 771,
                    'par_mid_slow': 7771,
                    'nopar_high': 72,
                    'par_high_fast': 772,
                    'par_high_slow': 7772
                }
            if window == 'RT':
                for e in range(len(events)):
                    if (events[e][2] == 7 or events[e][2] == 70
                            or events[e][2] == 71 or events[e][2]
                            == 72):  # and events[e+1][2] != 4):
                        events[e][0] = events[e][0] + medianRT
                event_id = {
                    'early_low': 40,
                    'early_mid': 41,
                    'early_high': 42,
                    'late_low': 440,
                    'late_mid': 441,
                    'late_high': 442,
                    'nopar_low': 70,
                    'nopar_mid': 71,
                    'nopar_high': 72
                }
    #Here we call the function that generates the epochs, using all the necessary information created earlier
    print(event_id)

    epochs = mne.Epochs(preproc,
                        events=events,
                        event_id=event_id,
                        tmin=tmin,
                        tmax=tmax,
                        baseline=baseline,
                        reject=None,
                        preload=True,
                        picks=EEG_chs)
    #You can get rid of those two line (which perform autorejection of bad epochs) if your computer have difficulties
    print(epochs)
    #ar = AutoReject()
    #epochs= ar.fit_transform(epochs)
    return epochs
Ejemplo n.º 56
0
def test_io_set_raw(fnames, tmpdir):
    """Test importing EEGLAB .set files."""
    tmpdir = str(tmpdir)
    raw_fname, raw_fname_onefile = fnames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # main tests, and test missing event_id
        _test_raw_reader(read_raw_eeglab,
                         input_fname=raw_fname,
                         montage=montage)
        _test_raw_reader(read_raw_eeglab,
                         input_fname=raw_fname_onefile,
                         montage=montage)
    for want in ('Events like', 'consist entirely', 'could not be mapped',
                 'string preload is not supported'):
        assert (any(want in str(ww.message) for ww in w))
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # test finding events in continuous data
        event_id = {'rt': 1, 'square': 2}
        raw0 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=True)
        raw1 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=False)
        raw2 = read_raw_eeglab(input_fname=raw_fname_onefile,
                               montage=montage,
                               event_id=event_id)
        raw3 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id)
        raw4 = read_raw_eeglab(input_fname=raw_fname, montage=montage)
        Epochs(raw0, find_events(raw0), event_id)
        epochs = Epochs(raw1, find_events(raw1), event_id)
        assert_equal(len(find_events(raw4)), 0)  # no events without event_id
        assert_equal(epochs["square"].average().nave, 80)  # 80 with
        assert_array_equal(raw0[:][0], raw1[:][0], raw2[:][0], raw3[:][0])
        assert_array_equal(raw0[:][-1], raw1[:][-1], raw2[:][-1], raw3[:][-1])
        assert_equal(len(w), 4)
        # 1 for preload=False / str with fname_onefile, 3 for dropped events
        raw0.filter(1,
                    None,
                    l_trans_bandwidth='auto',
                    filter_length='auto',
                    phase='zero')  # test that preloading works

    # test that using uint16_codec does not break stuff
    raw0 = read_raw_eeglab(input_fname=raw_fname,
                           montage=montage,
                           event_id=event_id,
                           preload=False,
                           uint16_codec='ascii')

    # test old EEGLAB version event import (read old version)
    eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
                     squeeze_me=True)['EEG']
    for event in eeg.event:  # old version allows integer events
        event.type = 1
    assert_equal(read_events_eeglab(eeg)[-1, -1], 1)
    eeg.event = eeg.event[0]  # single event
    assert_equal(read_events_eeglab(eeg)[-1, -1], 1)

    # test reading file with one event (read old version)
    eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
                     squeeze_me=True)['EEG']
    one_event_fname = op.join(tmpdir, 'test_one_event.set')
    io.savemat(one_event_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': eeg.nbchan,
            'data': 'test_one_event.fdt',
            'epoch': eeg.epoch,
            'event': eeg.event[0],
            'chanlocs': eeg.chanlocs,
            'pnts': eeg.pnts
        }
    },
               appendmat=False)
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    one_event_fname.replace('.set', '.fdt'))
    event_id = {eeg.event[0].type: 1}
    test_raw = read_raw_eeglab(input_fname=one_event_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=True)

    # test that sample indices are read python-wise (zero-based)
    assert find_events(test_raw)[0, 0] == round(eeg.event[0].latency) - 1

    # test negative event latencies
    negative_latency_fname = op.join(tmpdir, 'test_negative_latency.set')
    evnts = deepcopy(eeg.event[0])
    evnts.latency = 0
    io.savemat(negative_latency_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': eeg.nbchan,
            'data': 'test_one_event.fdt',
            'epoch': eeg.epoch,
            'event': evnts,
            'chanlocs': eeg.chanlocs,
            'pnts': eeg.pnts
        }
    },
               appendmat=False)
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    negative_latency_fname.replace('.set', '.fdt'))
    event_id = {eeg.event[0].type: 1}
    pytest.raises(ValueError,
                  read_raw_eeglab,
                  montage=montage,
                  preload=True,
                  event_id=event_id,
                  input_fname=negative_latency_fname)

    # test overlapping events
    overlap_fname = op.join(tmpdir, 'test_overlap_event.set')
    io.savemat(overlap_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': eeg.nbchan,
            'data': 'test_overlap_event.fdt',
            'epoch': eeg.epoch,
            'event': [eeg.event[0], eeg.event[0]],
            'chanlocs': eeg.chanlocs,
            'pnts': eeg.pnts
        }
    },
               appendmat=False)
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    overlap_fname.replace('.set', '.fdt'))
    event_id = {'rt': 1, 'square': 2}
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_eeglab(input_fname=overlap_fname,
                              montage=montage,
                              event_id=event_id,
                              preload=True)
    assert_equal(len(w), 1)  # one warning for the dropped event
    events_stimchan = find_events(raw)
    events_read_events_eeglab = read_events_eeglab(overlap_fname, event_id)
    assert (len(events_stimchan) == 1)
    assert (len(events_read_events_eeglab) == 2)

    # test reading file with one channel
    one_chan_fname = op.join(tmpdir, 'test_one_channel.set')
    io.savemat(one_chan_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': 1,
            'data': np.random.random((1, 3)),
            'epoch': eeg.epoch,
            'event': eeg.epoch,
            'chanlocs': {
                'labels': 'E1',
                'Y': -6.6069,
                'X': 6.3023,
                'Z': -2.9423
            },
            'times': eeg.times[:3],
            'pnts': 3
        }
    },
               appendmat=False)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        read_raw_eeglab(input_fname=one_chan_fname, preload=True)
    # no warning for 'no events found'
    assert_equal(len(w), 0)

    # test reading file with 3 channels - one without position information
    # first, create chanlocs structured array
    ch_names = ['F3', 'unknown', 'FPz']
    x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan]
    dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]
    chanlocs = np.zeros((3, ), dtype=dt)
    for ind, vals in enumerate(zip(ch_names, x, y, z)):
        for fld in range(4):
            chanlocs[ind][dt[fld][0]] = vals[fld]

    if LooseVersion(np.__version__) == '1.14.0':
        # There is a bug in 1.14.0 (or maybe with SciPy 1.0.0?) that causes
        # this write to fail!
        raise SkipTest('Need to fix bug in NumPy 1.14.0!')

    # save set file
    one_chanpos_fname = op.join(tmpdir, 'test_chanpos.set')
    io.savemat(one_chanpos_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': 3,
            'data': np.random.random((3, 3)),
            'epoch': eeg.epoch,
            'event': eeg.epoch,
            'chanlocs': chanlocs,
            'times': eeg.times[:3],
            'pnts': 3
        }
    },
               appendmat=False)
    # load it
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True)
    # one warning because some channels are not found in Montage
    assert_equal(len(w), 1)
    # position should be present for first two channels
    for i in range(2):
        assert_array_equal(
            raw.info['chs'][i]['loc'][:3],
            np.array([-chanlocs[i]['Y'], chanlocs[i]['X'], chanlocs[i]['Z']]))
    # position of the last channel should be zero
    assert_array_equal(raw.info['chs'][-1]['loc'][:3], [np.nan] * 3)

    # test reading channel names from set and positions from montage
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_eeglab(input_fname=one_chanpos_fname,
                              preload=True,
                              montage=montage)
    # one warning because some channels are not found in Montage
    assert_equal(len(w), 1)

    # when montage was passed - channel positions should be taken from there
    correct_pos = [[-0.56705965, 0.67706631, 0.46906776], [np.nan] * 3,
                   [0., 0.99977915, -0.02101571]]
    for ch_ind in range(3):
        assert_array_almost_equal(raw.info['chs'][ch_ind]['loc'][:3],
                                  np.array(correct_pos[ch_ind]))

    # test reading channel names but not positions when there is no X (only Z)
    # field in the EEG.chanlocs structure
    nopos_chanlocs = chanlocs[['labels', 'Z']]
    nopos_fname = op.join(tmpdir, 'test_no_chanpos.set')
    io.savemat(nopos_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': 3,
            'data': np.random.random((3, 2)),
            'epoch': eeg.epoch,
            'event': eeg.epoch,
            'chanlocs': nopos_chanlocs,
            'times': eeg.times[:2],
            'pnts': 2
        }
    },
               appendmat=False)
    # load the file
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        raw = read_raw_eeglab(input_fname=nopos_fname, preload=True)
    # test that channel names have been loaded but not channel positions
    for i in range(3):
        assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i])
        assert_array_equal(raw.info['chs'][i]['loc'][:3],
                           np.array([np.nan, np.nan, np.nan]))
Ejemplo n.º 57
0
def test_events():
    """Test reading and modifying events."""
    tempdir = _TempDir()
    # Note: BrainVision event offsets are 1-based, mne offsets are 0-based.
    # So in all tests below, the "onset" is 1 less than what's in the file

    # check that events are read and stim channel is synthesized correctly
    raw = read_raw_brainvision(vhdr_path, eog=eog, event_id=event_id)
    events = raw._get_brainvision_events()
    events = events[events[:, 2] != event_id['Sync On']]
    assert_array_equal(events, [[486, 0, 253],
                                [496, 1, 255],
                                [1769, 1, 254],
                                [1779, 1, 255],
                                [3252, 1, 254],
                                [3262, 1, 255],
                                [4935, 1, 253],
                                [4945, 1, 255],
                                [5999, 1, 255],
                                [6619, 1, 254],
                                [6629, 1, 255],
                                [7699, 1, 1]])

    # check that events are read and stim channel is synthesized correctly and
    # response triggers are shifted using the deprecated response_trig_shift.
    with pytest.warns(DeprecationWarning):
        raw = read_raw_brainvision(vhdr_path, eog=eog,
                                   response_trig_shift=1000, event_id=event_id)
    events = raw._get_brainvision_events()
    assert_array_equal(events, [[486, 0, 253],
                                [496, 1, 255],
                                [1769, 1, 254],
                                [1779, 1, 255],
                                [3252, 1, 254],
                                [3262, 1, 255],
                                [4935, 1, 253],
                                [4945, 1, 255],
                                [5999, 1, 1255],
                                [6619, 1, 254],
                                [6629, 1, 255],
                                [7629, 1, 5],
                                [7699, 1, 1]])

    # check that trig_shift_by_type works as well
    raw = read_raw_brainvision(vhdr_path, eog=eog,
                               trig_shift_by_type={'response': 1000,
                                                   'Optic': 2000},
                               event_id=event_id)
    events = raw._get_brainvision_events()
    assert_array_equal(events, [[486, 0, 253],
                                [496, 1, 255],
                                [1769, 1, 254],
                                [1779, 1, 255],
                                [3252, 1, 254],
                                [3262, 1, 255],
                                [4935, 1, 253],
                                [4945, 1, 255],
                                [5999, 1, 1255],
                                [6619, 1, 254],
                                [6629, 1, 255],
                                [7629, 1, 5],
                                [7699, 1, 2001]])

    # Check that we warn if a trigger is dropped
    with pytest.warns(RuntimeWarning, match='to parse triggers'):
        raw = read_raw_brainvision(vhdr_path)
    # check that events are read and stim channel is synthesized correctly and
    # response triggers are ignored.
    raw = read_raw_brainvision(vhdr_path, eog=eog, event_id=event_id,
                               trig_shift_by_type={'response': None})
    events = raw._get_brainvision_events()
    events = events[events[:, 2] != event_id['Sync On']]
    assert_array_equal(events, [[486, 0, 253],
                                [496, 1, 255],
                                [1769, 1, 254],
                                [1779, 1, 255],
                                [3252, 1, 254],
                                [3262, 1, 255],
                                [4935, 1, 253],
                                [4945, 1, 255],
                                [6619, 1, 254],
                                [6629, 1, 255],
                                [7699, 1, 1]])

    # Error handling of trig_shift_by_type
    pytest.raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
                  preload=True, trig_shift_by_type=1)
    pytest.raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
                  preload=True, trig_shift_by_type={'response': 0.1})
    pytest.raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
                  preload=True, trig_shift_by_type={'response': np.nan})
    pytest.raises(ValueError, read_raw_brainvision, vhdr_path, eog=eog,
                  preload=True, trig_shift_by_type={'response': 1000,
                                                    'Response': 1001})
    with pytest.warns(DeprecationWarning):
        pytest.raises(ValueError, read_raw_brainvision, vhdr_path, eog=eog,
                      preload=True, trig_shift_by_type={'response': 1000},
                      response_trig_shift=1001)

    # Check that events of type "Comment" are read if they contain square
    # brackets (which usually signify a new section within a BrainVision file)
    # If no event_id specified, skip the marker and continue as planned
    with pytest.warns(RuntimeWarning, match='channel types to misc'):
        raw = read_raw_brainvision(vhdr_v2_path)
    events = raw._get_brainvision_events()
    assert events.shape == (11, 3)  # shape of events without comment

    # with event_id specified, get that comment and assert it's there
    tmp_event_id = {'comment using [square] brackets': 999}
    with pytest.warns(RuntimeWarning, match='channel types to misc'):
        raw = read_raw_brainvision(vhdr_v2_path, event_id=tmp_event_id)
    events = raw._get_brainvision_events()
    assert 999 in events[:, -1]
    assert events.shape == (12, 3)  # shape of events with comment

    # check that events are read properly when event_id is specified for
    # auxiliary events
    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
                               trig_shift_by_type={'response': None},
                               event_id=event_id)
    events = raw._get_brainvision_events()
    assert_array_equal(events, [[486, 0, 253],
                                [496, 1, 255],
                                [1769, 1, 254],
                                [1779, 1, 255],
                                [3252, 1, 254],
                                [3262, 1, 255],
                                [4935, 1, 253],
                                [4945, 1, 255],
                                [6619, 1, 254],
                                [6629, 1, 255],
                                [7629, 1, 5],
                                [7699, 1, 1]])

    # to handle the min duration = 1 of stim trig (re)construction ...
    events = np.array([[486, 1, 253],
                       [496, 1, 255],
                       [1769, 1, 254],
                       [1779, 1, 255],
                       [3252, 1, 254],
                       [3262, 1, 255],
                       [4935, 1, 253],
                       [4945, 1, 255],
                       [6619, 1, 254],
                       [6629, 1, 255],
                       [7629, 1, 5],
                       [7699, 1, 1]])

    # Test that both trig_shift_by_type and event_id can be set
    read_raw_brainvision(vhdr_path, eog=eog, preload=False,
                         trig_shift_by_type={'response': 100},
                         event_id=event_id)
    mne_events = find_events(raw, stim_channel='STI 014')
    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])

    # modify events and check that stim channel is updated
    index = events[:, 2] == 255
    events = events[index]
    raw._set_brainvision_events(events)
    mne_events = find_events(raw, stim_channel='STI 014')
    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])

    # remove events
    nchan = raw.info['nchan']
    ch_name = raw.info['chs'][-2]['ch_name']
    events = np.empty((0, 3))
    raw._set_brainvision_events(events)
    assert_equal(raw.info['nchan'], nchan)
    assert_equal(len(raw._data), nchan)
    assert_equal(raw.info['chs'][-2]['ch_name'], ch_name)
    assert_equal(len(find_events(raw, 'STI 014')), 0)
    assert_allclose(raw[-1][0], 0.)
    fname = op.join(tempdir, 'evt_raw.fif')
    raw.save(fname)

    # add events back in
    events = [[10, 1, 2]]
    raw._set_brainvision_events(events)
    assert_equal(raw.info['nchan'], nchan)
    assert_equal(len(raw._data), nchan)
    assert_equal(raw.info['chs'][-1]['ch_name'], 'STI 014')
src = setup_source_space(subject, subjects_dir=subjects_dir,
                         spacing='oct5', add_dist=False)

# Setup a volume source space
# set pos=10.0 for speed, not very accurate; we recommend something smaller
# like 5.0 in actual analyses:
vol_src = setup_volume_source_space(
    subject, mri=fname_aseg, pos=10.0, bem=fname_model,
    add_interpolator=False,  # just for speed, usually use True
    volume_label=labels_vol, subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src

# Load data
raw = read_raw_fif(fname_raw, preload=True)
events = mne.find_events(raw)
raw.pick_types(meg=True, eeg=False, eog=True)
noise_cov = mne.read_cov(fname_cov)

# compute the fwd matrix
fwd = make_forward_solution(raw.info, fname_trans, src, fname_bem,
                            mindist=5.0)  # ignore sources<=5mm from innerskull
del src

# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
reject = dict(mag=4e-12, grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
                    reject=reject, preload=False)
del raw
Ejemplo n.º 59
0
def test_array_raw():
    """Test creating raw from array
    """
    tempdir = _TempDir()
    # creating
    raw = Raw(fif_fname).crop(2, 5, copy=False)
    data, times = raw[:, :]
    sfreq = raw.info['sfreq']
    ch_names = [(ch[4:] if 'STI' not in ch else ch)
                for ch in raw.info['ch_names']]  # change them, why not
    #del raw
    types = list()
    for ci in range(102):
        types.extend(('grad', 'grad', 'mag'))
    types.extend(['stim'] * 9)
    types.extend(['eeg'] * 60)
    # wrong length
    assert_raises(ValueError, create_info, ch_names, sfreq, types)
    # bad entry
    types.append('foo')
    assert_raises(KeyError, create_info, ch_names, sfreq, types)
    types[-1] = 'eog'
    # default type
    info = create_info(ch_names, sfreq)
    assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
    # use real types
    info = create_info(ch_names, sfreq, types)
    raw2 = RawArray(data, info)
    data2, times2 = raw2[:, :]
    assert_allclose(data, data2)
    assert_allclose(times, times2)
    # Make sure concatenation works
    raw_concat = concatenate_raws([raw2.copy(), raw2])
    assert_equal(raw_concat.n_times, 2 * raw2.n_times)

    # saving
    temp_fname = op.join(tempdir, 'raw.fif')
    raw2.save(temp_fname)
    raw3 = Raw(temp_fname)
    data3, times3 = raw3[:, :]
    assert_allclose(data, data3)
    assert_allclose(times, times3)

    # filtering
    picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
    assert_equal(len(picks), 4)
    raw_lp = raw2.copy()
    with warnings.catch_warnings(record=True):
        raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
    raw_hp = raw2.copy()
    with warnings.catch_warnings(record=True):
        raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
    raw_bp = raw2.copy()
    with warnings.catch_warnings(record=True):
        raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
    raw_bs = raw2.copy()
    with warnings.catch_warnings(record=True):
        raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
    data, _ = raw2[picks, :]
    lp_data, _ = raw_lp[picks, :]
    hp_data, _ = raw_hp[picks, :]
    bp_data, _ = raw_bp[picks, :]
    bs_data, _ = raw_bs[picks, :]
    sig_dec = 11
    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)

    # plotting
    import matplotlib
    matplotlib.use('Agg')  # for testing don't use X server
    raw2.plot()
    raw2.plot_psds()

    # epoching
    events = find_events(raw2, stim_channel='STI 014')
    events[:, 2] = 1
    assert_true(len(events) > 2)
    epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
    epochs.plot_drop_log(return_fig=True)
    epochs.plot()
    evoked = epochs.average()
    evoked.plot()
Ejemplo n.º 60
0
    conditions.append('/'.join(map(str, cond_tags)))
print(conditions[:10])

##############################################################################
# Let's make the event_id dictionary
event_id = dict(zip(conditions, conds.trigger + 1))
event_id['0/human bodypart/human/not-face/animal/natural']

##############################################################################
# Read MEG data
n_runs = 4  # 4 for full data (use less to speed up computations)
fname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')
raws = [read_raw_fif(fname % block) for block in range(n_runs)]
raw = concatenate_raws(raws)

events = mne.find_events(raw, min_duration=.002)

events = events[events[:, 2] <= max_trigger]

##############################################################################
# Epoch data
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,
                    picks=picks, tmin=-.1, tmax=.500, preload=True)

##############################################################################
# Let's plot some conditions
epochs['face'].average().plot()
epochs['not-face'].average().plot()

##############################################################################