def _read_events(p, subj, ridx, raw, picker=None): ridx = np.array(ridx) assert ridx.ndim == 1 if picker == 'restrict': # limit to events that will be processed ids = p.in_numbers picker = None print(' Events restricted to those in params.in_numbers') else: ids = None events = list() for fname in get_event_fnames(p, subj, ridx): # gracefully handle empty events (e.g., resting state) with open(fname, 'r') as fid: content = fid.read().strip() if not content: these_events = np.empty((0, 3), int) else: these_events = read_events(fname, include=ids) events.append(these_events) if len(events) == 1 and len(raw._first_samps) > 1: # for split raw first_samps = raw._first_samps[:1] last_samps = raw._last_samps[-1:] else: first_samps = raw._first_samps last_samps = raw._last_samps events = concatenate_events(events, first_samps, last_samps) if picker: events = _pick_events(events, picker) if len(np.unique(events[:, 0])) != len(events): raise RuntimeError('Non-unique event samples found after ' 'concatenation') # do time adjustment t_adj = int(np.round(-p.t_adjust * raw.info['sfreq'])) events[:, 0] += t_adj return events
def _read_events(p, subj, ridx, raw): ridx = np.array(ridx) assert ridx.ndim == 1 events = list() for fname in get_event_fnames(p, subj, ridx): these_events = read_events(fname) if len(np.unique(these_events[:, 0])) != len(these_events): raise RuntimeError('Non-unique event samples found in %s' % (fname, )) events.append(these_events) if len(events) == 1 and len(raw._first_samps) > 1: # for split raw first_samps = raw._first_samps[:1] last_samps = raw._last_samps[-1:] else: first_samps = raw._first_samps last_samps = raw._last_samps events = concatenate_events(events, first_samps, last_samps) if len(np.unique(events[:, 0])) != len(events): raise RuntimeError('Non-unique event samples found after ' 'concatenation') # do time adjustment t_adj = int(np.round(-p.t_adjust * raw.info['sfreq'])) events[:, 0] += t_adj return events
elif events[cnt, 2] == 5: filtered_events.append(np.array([events[cnt, 0], 0, 5])) event_order.append('STB') cnt += 1 filtered_events = np.array(filtered_events) # we need to keep all events at this point because we'll need them # in the correct order in order to match with behavior # filtering raw to remove breathing artifacts and stuff we won't need # for evoked analysis. Do it here because mne_process_raw wipes out # events channel raw.filter(1, 100) if f > 0: all_events = mne.concatenate_events([all_events, filtered_events], [all_raw.first_samp, raw.first_samp], [all_raw.last_samp, raw.last_samp]) all_raw = mne.concatenate_raws([all_raw, raw]) else: all_raw = raw all_events = filtered_events event_id = {'STG': 1, 'STI': 3, 'STB': 5} picks = mne.pick_types(raw.info, meg=True, ref_meg=True) epochs = mne.Epochs(all_raw, all_events, event_id, tmin, tmax, baseline=(None, 0), proj=False, preload=True, picks=picks) print subj print epochs # checking that we have at least 8 blocks of data if np.sum(epochs.events[:, 2] == 1) < 352:
def test_multiple_files(): """Test loading multiple files simultaneously """ # split file tempdir = _TempDir() raw = Raw(fif_fname).crop(0, 10) raw.load_data() raw.load_data() # test no operation split_size = 3. # in seconds sfreq = raw.info['sfreq'] nsamp = (raw.last_samp - raw.first_samp) tmins = np.round(np.arange(0., nsamp, split_size * sfreq)) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp])) tmaxs /= sfreq tmins /= sfreq assert_equal(raw.n_times, len(raw.times)) # going in reverse order so the last fname is the first file (need later) raws = [None] * len(tmins) for ri in range(len(tmins) - 1, -1, -1): fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri) raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) raws[ri] = Raw(fname) events = [find_events(r, stim_channel='STI 014') for r in raws] last_samps = [r.last_samp for r in raws] first_samps = [r.first_samp for r in raws] # test concatenation of split file assert_raises(ValueError, concatenate_raws, raws, True, events[1:]) all_raw_1, events1 = concatenate_raws(raws, preload=False, events_list=events) assert_equal(raw.first_samp, all_raw_1.first_samp) assert_equal(raw.last_samp, all_raw_1.last_samp) assert_allclose(raw[:, :][0], all_raw_1[:, :][0]) raws[0] = Raw(fname) all_raw_2 = concatenate_raws(raws, preload=True) assert_allclose(raw[:, :][0], all_raw_2[:, :][0]) # test proper event treatment for split files events2 = concatenate_events(events, first_samps, last_samps) events3 = find_events(all_raw_2, stim_channel='STI 014') assert_array_equal(events1, events2) assert_array_equal(events1, events3) # test various methods of combining files raw = Raw(fif_fname, preload=True) n_times = raw.n_times # make sure that all our data match times = list(range(0, 2 * n_times, 999)) # add potentially problematic points times.extend([n_times - 1, n_times, 2 * n_times - 1]) raw_combo0 = Raw([fif_fname, fif_fname], preload=True) _compare_combo(raw, raw_combo0, times, n_times) raw_combo = Raw([fif_fname, fif_fname], preload=False) _compare_combo(raw, raw_combo, times, n_times) raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat') _compare_combo(raw, raw_combo, times, n_times) assert_raises(ValueError, Raw, [fif_fname, ctf_fname]) assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname]) assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1]) assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times) # with all data preloaded, result should be preloaded raw_combo = Raw(fif_fname, preload=True) raw_combo.append(Raw(fif_fname, preload=True)) assert_true(raw_combo.preload is True) assert_equal(raw_combo.n_times, raw_combo._data.shape[1]) _compare_combo(raw, raw_combo, times, n_times) # with any data not preloaded, don't set result as preloaded raw_combo = concatenate_raws( [Raw(fif_fname, preload=True), Raw(fif_fname, preload=False)]) assert_true(raw_combo.preload is False) assert_array_equal(find_events(raw_combo, stim_channel='STI 014'), find_events(raw_combo0, stim_channel='STI 014')) _compare_combo(raw, raw_combo, times, n_times) # user should be able to force data to be preloaded upon concat raw_combo = concatenate_raws( [Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload=True) assert_true(raw_combo.preload is True) _compare_combo(raw, raw_combo, times, n_times) raw_combo = concatenate_raws( [Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload='memmap3.dat') _compare_combo(raw, raw_combo, times, n_times) raw_combo = concatenate_raws( [Raw(fif_fname, preload=True), Raw(fif_fname, preload=True)], preload='memmap4.dat') _compare_combo(raw, raw_combo, times, n_times) raw_combo = concatenate_raws( [Raw(fif_fname, preload=False), Raw(fif_fname, preload=False)], preload='memmap5.dat') _compare_combo(raw, raw_combo, times, n_times) # verify that combining raws with different projectors throws an exception raw.add_proj([], remove_existing=True) assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True)) # now test event treatment for concatenated raw files events = [ find_events(raw, stim_channel='STI 014'), find_events(raw, stim_channel='STI 014') ] last_samps = [raw.last_samp, raw.last_samp] first_samps = [raw.first_samp, raw.first_samp] events = concatenate_events(events, first_samps, last_samps) events2 = find_events(raw_combo0, stim_channel='STI 014') assert_array_equal(events, events2) # check out the len method assert_equal(len(raw), raw.n_times) assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
def test_multiple_files(): """Test loading multiple files simultaneously """ # split file raw = Raw(fif_fname, preload=True) split_size = 10. # in seconds sfreq = raw.info['sfreq'] nsamp = (raw.last_samp - raw.first_samp) tmins = np.round(np.arange(0., nsamp, split_size * sfreq)) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp])) tmaxs /= sfreq tmins /= sfreq # going in reverse order so the last fname is the first file (need later) raws = [None] * len(tmins) for ri in range(len(tmins) - 1, -1, -1): fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri) raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) raws[ri] = Raw(fname) events = [find_events(r) for r in raws] last_samps = [r.last_samp for r in raws] first_samps = [r.first_samp for r in raws] # test concatenation of split file all_raw_1 = concatenate_raws(raws, preload=False) assert_true(raw.first_samp == all_raw_1.first_samp) assert_true(raw.last_samp == all_raw_1.last_samp) assert_array_almost_equal(raw[:, :][0], all_raw_1[:, :][0]) raws[0] = Raw(fname) all_raw_2 = concatenate_raws(raws, preload=True) assert_array_almost_equal(raw[:, :][0], all_raw_2[:, :][0]) # test proper event treatment for split files events = concatenate_events(events, first_samps, last_samps) events2 = find_events(all_raw_2) assert_array_equal(events, events2) # test various methods of combining files n_combos = 9 raw_combos = [None] * n_combos raw = Raw(fif_fname, preload=True) raw_combos[0] = Raw([fif_fname, fif_fname], preload=True) raw_combos[1] = Raw([fif_fname, fif_fname], preload=False) raw_combos[2] = Raw([fif_fname, fif_fname], preload='memmap8.dat') assert_raises(ValueError, Raw, [fif_fname, ctf_fname]) assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname]) n_times = len(raw._times) assert_true(raw[:, :][0].shape[1] * 2 == raw_combos[0][:, :][0].shape[1]) assert_true(raw_combos[0][:, :][0].shape[1] == len(raw_combos[0]._times)) # with all data preloaded, result should be preloaded raw_combos[3] = Raw(fif_fname, preload=True) raw_combos[3].append(Raw(fif_fname, preload=True)) assert_true(raw_combos[0]._preloaded == True) # with any data not preloaded, don't set result as preloaded raw_combos[4] = concatenate_raws([Raw(fif_fname, preload=True), Raw(fif_fname, preload=False)]) assert_true(raw_combos[1]._preloaded == False) assert_array_equal(find_events(raw_combos[4]), find_events(raw_combos[0])) # user should be able to force data to be preloaded upon concat raw_combos[5] = concatenate_raws([Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload=True) assert_true(raw_combos[2]._preloaded == True) raw_combos[6] = concatenate_raws([Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload='memmap3.dat') raw_combos[7] = concatenate_raws([Raw(fif_fname, preload=True), Raw(fif_fname, preload=True)], preload='memmap4.dat') raw_combos[8] = concatenate_raws([Raw(fif_fname, preload=False), Raw(fif_fname, preload=False)], preload='memmap5.dat') # make sure that all our data match times = range(0, 2 * n_times, 999) # add potentially problematic points times.extend([n_times - 1, n_times, 2 * n_times - 1]) for ti in times: # let's do a subset of points for speed orig = raw[:, ti % n_times][0] for raw_combo in raw_combos: # these are almost_equals because of possible dtype differences assert_array_almost_equal(orig, raw_combo[:, ti][0]) # verify that combining raws with different projectors throws an exception raw.add_proj([], remove_existing=True) assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True)) # now test event treatment for concatenated raw files events = [find_events(raw), find_events(raw)] last_samps = [raw.last_samp, raw.last_samp] first_samps = [raw.first_samp, raw.first_samp] events = concatenate_events(events, first_samps, last_samps) events2 = find_events(raw_combos[0]) assert_array_equal(events, events2) # check out the len method assert_true(len(raw) == raw.n_times) assert_true(len(raw) == raw.last_samp - raw.first_samp + 1)
def test_multiple_files(): """Test loading multiple files simultaneously """ # split file raw = Raw(fif_fname, preload=True).crop(0, 10) split_size = 3. # in seconds sfreq = raw.info['sfreq'] nsamp = (raw.last_samp - raw.first_samp) tmins = np.round(np.arange(0., nsamp, split_size * sfreq)) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp])) tmaxs /= sfreq tmins /= sfreq assert_equal(raw.n_times, len(raw._times)) # going in reverse order so the last fname is the first file (need later) raws = [None] * len(tmins) for ri in range(len(tmins) - 1, -1, -1): fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri) raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) raws[ri] = Raw(fname) events = [find_events(r, stim_channel='STI 014') for r in raws] last_samps = [r.last_samp for r in raws] first_samps = [r.first_samp for r in raws] # test concatenation of split file all_raw_1 = concatenate_raws(raws, preload=False) assert_true(raw.first_samp == all_raw_1.first_samp) assert_true(raw.last_samp == all_raw_1.last_samp) assert_allclose(raw[:, :][0], all_raw_1[:, :][0]) raws[0] = Raw(fname) all_raw_2 = concatenate_raws(raws, preload=True) assert_allclose(raw[:, :][0], all_raw_2[:, :][0]) # test proper event treatment for split files events = concatenate_events(events, first_samps, last_samps) events2 = find_events(all_raw_2, stim_channel='STI 014') assert_array_equal(events, events2) # test various methods of combining files raw = Raw(fif_fname, preload=True) n_times = len(raw._times) # make sure that all our data match times = list(range(0, 2 * n_times, 999)) # add potentially problematic points times.extend([n_times - 1, n_times, 2 * n_times - 1]) raw_combo0 = Raw([fif_fname, fif_fname], preload=True) _compare_combo(raw, raw_combo0, times, n_times) raw_combo = Raw([fif_fname, fif_fname], preload=False) _compare_combo(raw, raw_combo, times, n_times) raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat') _compare_combo(raw, raw_combo, times, n_times) assert_raises(ValueError, Raw, [fif_fname, ctf_fname]) assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname]) assert_true(raw[:, :][0].shape[1] * 2 == raw_combo0[:, :][0].shape[1]) assert_true(raw_combo0[:, :][0].shape[1] == len(raw_combo0._times)) # with all data preloaded, result should be preloaded raw_combo = Raw(fif_fname, preload=True) raw_combo.append(Raw(fif_fname, preload=True)) assert_true(raw_combo._preloaded is True) assert_true(len(raw_combo._times) == raw_combo._data.shape[1]) _compare_combo(raw, raw_combo, times, n_times) # with any data not preloaded, don't set result as preloaded raw_combo = concatenate_raws([Raw(fif_fname, preload=True), Raw(fif_fname, preload=False)]) assert_true(raw_combo._preloaded is False) assert_array_equal(find_events(raw_combo, stim_channel='STI 014'), find_events(raw_combo0, stim_channel='STI 014')) _compare_combo(raw, raw_combo, times, n_times) # user should be able to force data to be preloaded upon concat raw_combo = concatenate_raws([Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload=True) assert_true(raw_combo._preloaded is True) _compare_combo(raw, raw_combo, times, n_times) raw_combo = concatenate_raws([Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload='memmap3.dat') _compare_combo(raw, raw_combo, times, n_times) raw_combo = concatenate_raws([Raw(fif_fname, preload=True), Raw(fif_fname, preload=True)], preload='memmap4.dat') _compare_combo(raw, raw_combo, times, n_times) raw_combo = concatenate_raws([Raw(fif_fname, preload=False), Raw(fif_fname, preload=False)], preload='memmap5.dat') _compare_combo(raw, raw_combo, times, n_times) # verify that combining raws with different projectors throws an exception raw.add_proj([], remove_existing=True) assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True)) # now test event treatment for concatenated raw files events = [find_events(raw, stim_channel='STI 014'), find_events(raw, stim_channel='STI 014')] last_samps = [raw.last_samp, raw.last_samp] first_samps = [raw.first_samp, raw.first_samp] events = concatenate_events(events, first_samps, last_samps) events2 = find_events(raw_combo0, stim_channel='STI 014') assert_array_equal(events, events2) # check out the len method assert_true(len(raw) == raw.n_times) assert_true(len(raw) == raw.last_samp - raw.first_samp + 1)
def test_multiple_files(): """Test loading multiple files simultaneously """ # split file raw = Raw(fif_fname, preload=True) split_size = 10. # in seconds sfreq = raw.info['sfreq'] nsamp = (raw.last_samp - raw.first_samp) tmins = np.round(np.arange(0., nsamp, split_size * sfreq)) tmaxs = np.concatenate((tmins[1:] - 1, [nsamp])) tmaxs /= sfreq tmins /= sfreq # going in reverse order so the last fname is the first file (need later) raws = [None] * len(tmins) for ri in range(len(tmins) - 1, -1, -1): fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri) raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) raws[ri] = Raw(fname) events = [find_events(r, stim_channel='STI 014') for r in raws] last_samps = [r.last_samp for r in raws] first_samps = [r.first_samp for r in raws] # test concatenation of split file all_raw_1 = concatenate_raws(raws, preload=False) assert_true(raw.first_samp == all_raw_1.first_samp) assert_true(raw.last_samp == all_raw_1.last_samp) assert_allclose(raw[:, :][0], all_raw_1[:, :][0]) raws[0] = Raw(fname) all_raw_2 = concatenate_raws(raws, preload=True) assert_allclose(raw[:, :][0], all_raw_2[:, :][0]) # test proper event treatment for split files events = concatenate_events(events, first_samps, last_samps) events2 = find_events(all_raw_2, stim_channel='STI 014') assert_array_equal(events, events2) # test various methods of combining files n_combos = 9 raw_combos = [None] * n_combos raw = Raw(fif_fname, preload=True) raw_combos[0] = Raw([fif_fname, fif_fname], preload=True) raw_combos[1] = Raw([fif_fname, fif_fname], preload=False) raw_combos[2] = Raw([fif_fname, fif_fname], preload='memmap8.dat') assert_raises(ValueError, Raw, [fif_fname, ctf_fname]) assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname]) n_times = len(raw._times) assert_true(raw[:, :][0].shape[1] * 2 == raw_combos[0][:, :][0].shape[1]) assert_true(raw_combos[0][:, :][0].shape[1] == len(raw_combos[0]._times)) # with all data preloaded, result should be preloaded raw_combos[3] = Raw(fif_fname, preload=True) raw_combos[3].append(Raw(fif_fname, preload=True)) assert_true(raw_combos[0]._preloaded == True) assert_true(len(raw_combos[3]._times) == raw_combos[3]._data.shape[1]) # with any data not preloaded, don't set result as preloaded raw_combos[4] = concatenate_raws( [Raw(fif_fname, preload=True), Raw(fif_fname, preload=False)]) assert_true(raw_combos[1]._preloaded == False) assert_array_equal(find_events(raw_combos[4], stim_channel='STI 014'), find_events(raw_combos[0], stim_channel='STI 014')) # user should be able to force data to be preloaded upon concat raw_combos[5] = concatenate_raws( [Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload=True) assert_true(raw_combos[2]._preloaded == True) raw_combos[6] = concatenate_raws( [Raw(fif_fname, preload=False), Raw(fif_fname, preload=True)], preload='memmap3.dat') raw_combos[7] = concatenate_raws( [Raw(fif_fname, preload=True), Raw(fif_fname, preload=True)], preload='memmap4.dat') raw_combos[8] = concatenate_raws( [Raw(fif_fname, preload=False), Raw(fif_fname, preload=False)], preload='memmap5.dat') # make sure that all our data match times = range(0, 2 * n_times, 999) # add potentially problematic points times.extend([n_times - 1, n_times, 2 * n_times - 1]) for ti in times: # let's do a subset of points for speed orig = raw[:, ti % n_times][0] for raw_combo in raw_combos: # these are almost_equals because of possible dtype differences assert_allclose(orig, raw_combo[:, ti][0]) # verify that combining raws with different projectors throws an exception raw.add_proj([], remove_existing=True) assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True)) # now test event treatment for concatenated raw files events = [ find_events(raw, stim_channel='STI 014'), find_events(raw, stim_channel='STI 014') ] last_samps = [raw.last_samp, raw.last_samp] first_samps = [raw.first_samp, raw.first_samp] events = concatenate_events(events, first_samps, last_samps) events2 = find_events(raw_combos[0], stim_channel='STI 014') assert_array_equal(events, events2) # check out the len method assert_true(len(raw) == raw.n_times) assert_true(len(raw) == raw.last_samp - raw.first_samp + 1)
### QUICK HACK ### raw_list[-1].apply_proj() #raw_list[-1].proj = False #raw_list[-1]._projector = None ############ raw_list[-1].info['projs'] = projOverwrite #concatenate raw raw = mne.concatenate_raws(raw_list) #raw = mne.io.RawFIFF(raw_fileNames, add_eeg_ref=True) # Read in list of events events = mne.concatenate_events([mne.read_events(l_dir) for l_dir in list_fileNames], raw._first_samps, raw._last_samps) ################################### # generate epochs object from events list and raw file #tempEpo = generateEpochs(raw_dir, list_dir)) eventDict = {'pitch_maintain': 1, 'space_maintain': 2, 'pitch_switch': 3, 'space_switch': 4} tmin = -0.2 tmax = 4.75 tempEpo = mne.Epochs(raw, events, event_id=eventDict, tmin=tmin, tmax=tmax, preload=True, proj=True, verbose=False) #tempEpo.drop_bad_epochs() #Done if preload=True above assert all(len(x) == 0 or 'IGNORED' in x for x in tempEpo.drop_log)