def concat_raw_cnt(raw_file_list, preload=True, data_format='int32', eog='header', montage = mne.channels.read_montage('standard_1020') ): # open and transfer a list of raw data file name into one raw data # has to be .cnt data # input : raw_file_list as the list of raw cnt file names # return : raw (mne.raw object as a concatenation) raw = mne.io.read_raw_cnt(raw_file_list[0], preload=preload, data_format=data_format, eog=eog, montage=montage) for i in range(1,len(raw_file_list)): raw_tmp = mne.io.read_raw_cnt(raw_file_list[i], preload=preload, data_format=data_format, eog=eog, montage=montage) mne.concatenate_raws([raw, raw_tmp]) return raw
def noseizureMerge(dirName, count): merge_of_noseizure = [] files = os.listdir('{}/{}'.format(dirName, 'noseizure')) random.shuffle(files)[:count] for fileItem in files: raw = mne.io.read_raw_fif('{}/{}/{}'.format(dirName, 'noseizure', fileItem)) merge_of_noseizure.append(raw) mne.concatenate_raws(merge_of_noseizure).save( '{}/merged_noseizure.fif'.format(path)) print(f"You have {count} noseizure segments merged.")
def get_dataset_low_v_high(data_folder, dataset_id, subject, ShowFig=False): """ Load Low Versus High Auditory Stimuli on Tinnitus patient and control. Note here that the EEG data are in µV while MNE use V. Therefore scale is with a 1e6 factor and it could cause a problem for non-linear related MNE analysing. I advice to apply a 1e-6 factor in the future to make sure that everything is working fine with mne. For classification, it is adviced to keep data in µV. """ # clean loop variable runs = [] labels = [] runs, labels, _, _ = load_sessions_raw(data_folder, dataset_id, subject) # load all session from this subject # split session into conditions runs_0 = list(compress(runs, [x == 'low' for x in labels])) runs_1 = list(compress(runs, [x == 'high' for x in labels])) raw_0 = mne.concatenate_raws(runs_0) raw_1 = mne.concatenate_raws(runs_1) # rename table for event to annotations event_id0 = {'BAD_data': 0, 'bad EPOCH': 100, 'BAD boundary': 100, 'EDGE boundary': 100} # vizualization if ShowFig: scalings = dict(eeg=10e1) mne.viz.plot_raw(raw_0, scalings=scalings) # extract events from annotations event_id0 = {'BAD_data': 0, 'bad EPOCH': 100, 'BAD boundary': 100, 'EDGE boundary': 100} event_id1 = {'BAD_data': 1, 'bad EPOCH': 100, 'BAD boundary': 100, 'EDGE boundary': 100} # note: get_events() outputs # tmp[0],tmp[1], tmp[2]: events, epochs2keep, epochs2drop tmp = zeta.data.stim.get_events(raw_0, event_id0) events0 = tmp[0][tmp[1], :] tmp = zeta.data.stim.get_events(raw_1, event_id1) events1 = tmp[0][tmp[1], :] # events visualization if ShowFig: fig, ax = plt.subplot(211) color = {0: 'green', 100: 'red'} mne.viz.plot_events(events0, raw_0.info['sfreq'], raw_0.first_samp, color=color, event_id=event_id0, axes=ax[0]) mne.viz.plot_events(events1, raw_0.info['sfreq'], raw_0.first_samp, color=color, event_id=event_id1, axes=ax[1]) return raw_0, raw_1, events0, events1
def seizureMerge(dirName): merge_of_seizure = [] count = 0 subfolders = os.listdir(dirName) for folder in subfolders: files = os.listdir('{}/{}'.format(dirName, folder)) for fileItem in files: raw = mne.io.read_raw_fif('{}/{}/{}'.format( dirName, folder, fileItem)) merge_of_seizure.append(raw) count += 1 mne.concatenate_raws(merge_of_seizure).save( '{}/merged_seizure.fif'.format(dirName)) print(f"You have {count} seizure segments merged.") return count
def get_Xy(sub=1): """ funs:从raw里得到可监督学习的X,Y MI Interval:[0., 3.] All Interval:[-2., 5.] y:0->left, 1->right subs:s1 ~ ss52 """ physionetMI = physionet_mi.PhysionetMI() datamap = physionetMI.get_data(subjects=[sub]) raw = mne.concatenate_raws( [datamap[sub]["session_0"][_] for _ in datamap[sub]["session_0"]]) raw_band = raw.copy() # Apply band-pass filter # ref: https://mne.tools/stable/auto_examples/decoding/plot_decoding_csp_eeg.html?highlight=montage raw_band.filter(8., 30.) X, y = get_Xy_fromRaw(raw_band, stim_channel="STI 014", event_id=dict(left_hand=2, right_hand=3, feet=5, hands=4), interval=[0, 3]) return X.astype("float32")[:, :, :-1], y.astype("int64") - 2
def load_raw_data(self, subject, series): """Load data for a subject / series.""" test = series == TEST_SERIES if not test: fnames = [ glob('../data/train/subj%d_series%d_data.csv' % (subject, i)) for i in series ] else: fnames = [ glob('../data/test/subj%d_series%d_data.csv' % (subject, i)) for i in series ] fnames = list(np.concatenate(fnames)) fnames.sort() raw_train = [ creat_mne_raw_object(fname, read_events=not test) for fname in fnames ] raw_train = concatenate_raws(raw_train) # pick eeg signal picks = pick_types(raw_train.info, eeg=True) self.data = raw_train._data[picks].transpose() self.data = preprocessData(self.data) if not test: self.events = raw_train._data[32:].transpose()
def plot_events(cfg, subject, session): raws_filt = [] raw_fname = BIDSPath(subject=subject, session=session, task=cfg.task, acquisition=cfg.acq, recording=cfg.rec, space=cfg.space, processing='filt', suffix='raw', extension='.fif', datatype=cfg.datatype, root=cfg.deriv_root, check=False) for run in cfg.runs: this_raw_fname = raw_fname.copy().update(run=run) if this_raw_fname.copy().update(split='01').fpath.exists(): this_raw_fname.update(split='01') raw_filt = mne.io.read_raw_fif(this_raw_fname) raws_filt.append(raw_filt) del this_raw_fname # Concatenate the filtered raws and extract the events. raw_filt_concat = mne.concatenate_raws(raws_filt, on_mismatch='warn') events, event_id = mne.events_from_annotations(raw=raw_filt_concat) fig = mne.viz.plot_events(events=events, event_id=event_id, first_samp=raw_filt_concat.first_samp, sfreq=raw_filt_concat.info['sfreq'], show=False) return fig
def _load_epochs_internal( self, experiment, subject, blocks, stimulus_to_name_time_pairs, baseline=None, verbose=False, add_eeg_ref=False, **kwargs): all_raw_objects = list() names = list() events_list = list() event_id_offset = 0 for block in blocks: mne_raw, stimuli, event_load_fix_info = self.load_block(experiment, subject, block) events = list() for item in chain.from_iterable(map(stimulus_to_name_time_pairs, stimuli)): if len(item) != 2: raise ValueError('Expected stimulus_to_name_time_pairs to return a list of pairs for each ' 'stimulus. Are you returning just a single pair? Got: {}'.format(item)) name, time = item sample_index = numpy.searchsorted(mne_raw.times, time, side='left') events.append(numpy.array([sample_index + mne_raw.first_samp, 0, len(events) + event_id_offset])) names.append(name) event_id_offset += len(events) events_list.append(numpy.array(events)) all_raw_objects.append(mne_raw) virtual_raw, all_events = mne.concatenate_raws(all_raw_objects, preload=False, events_list=events_list) try: epochs = mne.Epochs( virtual_raw, all_events, add_eeg_ref=add_eeg_ref, baseline=baseline, verbose=verbose, **kwargs) except TypeError: # add_eeg_ref is gone epochs = mne.Epochs(virtual_raw, all_events, baseline=baseline, verbose=verbose, **kwargs) if add_eeg_ref: epochs.load_data() epochs.set_eeg_reference() return epochs, names, events_list
def test_data(): """Test reading raw kit files """ raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim=list(range(167, 159, -1)), slope='+', stimthresh=1, preload=True) print(repr(raw_py)) # Binary file only stores the sensor channels py_picks = pick_types(raw_py.info, exclude='bads') raw_bin = op.join(data_dir, 'test_bin_raw.fif') raw_bin = Raw(raw_bin, preload=True) bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads') data_bin, _ = raw_bin[bin_picks] data_py, _ = raw_py[py_picks] # this .mat was generated using the Yokogawa MEG Reader data_Ykgw = op.join(data_dir, 'test_Ykgw.mat') data_Ykgw = scipy.io.loadmat(data_Ykgw)['data'] data_Ykgw = data_Ykgw[py_picks] assert_array_almost_equal(data_py, data_Ykgw) py_picks = pick_types(raw_py.info, stim=True, ref_meg=False, exclude='bads') data_py, _ = raw_py[py_picks] assert_array_almost_equal(data_py, data_bin) # Make sure concatenation works raw_concat = concatenate_raws([raw_py.copy(), raw_py]) assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_edf_data(): """Test reading raw edf files""" raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139, preload=True) picks = pick_types(raw_py.info, meg=False, eeg=True, exclude=['EDF Annotations']) data_py, _ = raw_py[picks] print(raw_py) # to test repr print(raw_py.info) # to test Info repr # this .mat was generated using the EEG Lab Biosemi Reader raw_eeglab = io.loadmat(edf_eeglab_path) raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts data_eeglab = raw_eeglab[picks] assert_array_almost_equal(data_py, data_eeglab, 10) # Make sure concatenation works raw_concat = concatenate_raws([raw_py.copy(), raw_py]) assert_equal(raw_concat.n_times, 2 * raw_py.n_times) # Test uneven sampling raw_py = read_raw_edf(edf_uneven_path, stim_channel=None) data_py, _ = raw_py[0] # this .mat was generated using the EEG Lab Biosemi Reader raw_eeglab = io.loadmat(edf_uneven_eeglab_path) raw_eeglab = raw_eeglab['data'] data_eeglab = raw_eeglab[0] # match upsampling upsample = len(data_eeglab) / len(raw_py) data_py = np.repeat(data_py, repeats=upsample) assert_array_equal(data_py, data_eeglab)
def load_subject_to_raw(self, subject_name, runs, preprocess=True): raws = [] for run in runs: raws.append(self.load_session_to_raw(subject_name, run, preprocess)) raw = concatenate_raws(raws) return raw
def load_raw_data(self, subject, series): """ Load data for a subject / series. n_points: int. The number of timepoints that can be predict/train. Because the timepoints in the start are not valid for windows or there are no velocity. """ # test = series == TEST_SERIES test = False if not test: fnames = [glob(get_horizo_path(subject, i)) for i in series] else: fnames = [glob('../data/test/subj%d_series%d_data.csv' % (subject, i)) for i in series] fnames = list(np.concatenate(fnames)) fnames.sort() self.fnames = fnames action_1D_type = 'HO' raw_train = [creat_mne_raw_object(fnames[i], i, read_events=action_1D_type) for i in range(len(fnames))] raw_train = concatenate_raws(raw_train) # pick eeg signal picks = pick_types(raw_train.info, eeg=True) self.data = raw_train._data[picks].transpose() self.data = preprocessData(self.data) self.n_points = self.data.shape[0] - START_TRAIN if not test: self.events = raw_train._data[14:].transpose()
def plot_events(subject, session): raws_filt = [] bids_path = BIDSPath(subject=subject, session=session, task=config.get_task(), acquisition=config.acq, recording=config.rec, space=config.space, processing='filt', suffix='raw', extension='.fif', datatype=config.get_datatype(), root=config.deriv_root, check=False) for run in config.get_runs(): fname = bids_path.copy().update(run=run) raw_filt = mne.io.read_raw_fif(fname) raws_filt.append(raw_filt) del fname # Concatenate the filtered raws and extract the events. raw_filt_concat = mne.concatenate_raws(raws_filt) events, event_id = mne.events_from_annotations(raw=raw_filt_concat) fig = mne.viz.plot_events(events=events, event_id=event_id, first_samp=raw_filt_concat.first_samp, sfreq=raw_filt_concat.info['sfreq'], show=False) return fig
def test_crop_more(): """Test more cropping.""" raw = mne.io.read_raw_fif(fif_fname).crop(0, 11).load_data() raw._data[:] = np.random.RandomState(0).randn(*raw._data.shape) onset = np.array([0.47058824, 2.49773765, 6.67873287, 9.15837097]) duration = np.array([0.89592767, 1.13574672, 1.09954739, 0.48868752]) annotations = mne.Annotations(onset, duration, 'BAD') raw.set_annotations(annotations) assert len(raw.annotations) == 4 delta = 1. / raw.info['sfreq'] offset = raw.first_samp * delta raw_concat = mne.concatenate_raws([ raw.copy().crop(0, 4 - delta), raw.copy().crop(4, 8 - delta), raw.copy().crop(8, None) ]) assert_allclose(raw_concat.times, raw.times) assert_allclose(raw_concat[:][0], raw[:][0]) assert raw_concat.first_samp == raw.first_samp assert_and_remove_boundary_annot(raw_concat, 2) assert len(raw_concat.annotations) == 4 assert_array_equal(raw_concat.annotations.description, raw.annotations.description) assert_allclose(raw.annotations.duration, duration) assert_allclose(raw_concat.annotations.duration, duration) assert_allclose(raw.annotations.onset, onset + offset) assert_allclose(raw_concat.annotations.onset, onset + offset, atol=1. / raw.info['sfreq'])
def test_edf_data(): """Test reading raw edf files """ raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139, preload=True) picks = pick_types(raw_py.info, meg=False, eeg=True, exclude=['EDF Annotations']) data_py, _ = raw_py[picks] print(raw_py) # to test repr print(raw_py.info) # to test Info repr # this .mat was generated using the EEG Lab Biosemi Reader raw_eeglab = io.loadmat(edf_eeglab_path) raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts data_eeglab = raw_eeglab[picks] assert_array_almost_equal(data_py, data_eeglab) # Make sure concatenation works raw_concat = concatenate_raws([raw_py.copy(), raw_py]) assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_bdf_data(): """Test reading raw bdf files """ raw_py = read_raw_edf(bdf_path, montage=montage_path, eog=eog, misc=misc, preload=True) picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') data_py, _ = raw_py[picks] print(raw_py) # to test repr print(raw_py.info) # to test Info repr # this .mat was generated using the EEG Lab Biosemi Reader raw_eeglab = io.loadmat(bdf_eeglab_path) raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts data_eeglab = raw_eeglab[picks] assert_array_almost_equal(data_py, data_eeglab) # Manually checking that float coordinates are imported assert_true((raw_py.info['chs'][0]['eeg_loc']).any()) assert_true((raw_py.info['chs'][25]['eeg_loc']).any()) assert_true((raw_py.info['chs'][63]['eeg_loc']).any()) # Make sure concatenation works raw_concat = concatenate_raws([raw_py.copy(), raw_py]) assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_io_egi(): """Test importing EGI simple binary files""" # test default tempdir = _TempDir() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always', category=RuntimeWarning) raw = read_raw_egi(egi_fname, include=None) assert_true('RawEGI' in repr(raw)) raw.load_data() # currently does nothing assert_equal(len(w), 1) assert_true(w[0].category == RuntimeWarning) msg = 'Did not find any event code with more than one event.' assert_true(msg in '%s' % w[0].message) include = ['TRSP', 'XXX1'] raw = read_raw_egi(egi_fname, include=include) repr(raw) repr(raw.info) assert_equal('eeg' in raw, True) out_fname = op.join(tempdir, 'test_egi_raw.fif') raw.save(out_fname) raw2 = Raw(out_fname, preload=True) data1, times1 = raw[:10, :] data2, times2 = raw2[:10, :] assert_array_almost_equal(data1, data2, 9) assert_array_almost_equal(times1, times2) eeg_chan = [c for c in raw.ch_names if 'EEG' in c] assert_equal(len(eeg_chan), 256) picks = pick_types(raw.info, eeg=True) assert_equal(len(picks), 256) assert_equal('STI 014' in raw.ch_names, True) events = find_events(raw, stim_channel='STI 014') assert_equal(len(events), 2) # ground truth assert_equal(np.unique(events[:, 1])[0], 0) assert_true(np.unique(events[:, 0])[0] != 0) assert_true(np.unique(events[:, 2])[0] != 0) triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]]) # test trigger functionality assert_raises(RuntimeError, _combine_triggers, triggers, None) triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]]) events_ids = [12, 24] new_trigger = _combine_triggers(triggers, events_ids) assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24])) assert_raises(ValueError, read_raw_egi, egi_fname, include=['Foo']) assert_raises(ValueError, read_raw_egi, egi_fname, exclude=['Bar']) for ii, k in enumerate(include, 1): assert_true(k in raw.event_id) assert_true(raw.event_id[k] == ii) # Make sure concatenation works raw_concat = concatenate_raws([raw.copy(), raw]) assert_equal(raw_concat.n_times, 2 * raw.n_times)
def test_bdf_data(): """Test reading raw bdf files""" raw_py = read_raw_edf(bdf_path, montage=montage_path, eog=eog, misc=misc, preload=True) assert_true('RawEDF' in repr(raw_py)) picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') data_py, _ = raw_py[picks] # this .mat was generated using the EEG Lab Biosemi Reader raw_eeglab = io.loadmat(bdf_eeglab_path) raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts data_eeglab = raw_eeglab[picks] # bdf saved as a single, resolution to seven decimal points in matlab assert_array_almost_equal(data_py, data_eeglab, 8) # Manually checking that float coordinates are imported assert_true((raw_py.info['chs'][0]['eeg_loc']).any()) assert_true((raw_py.info['chs'][25]['eeg_loc']).any()) assert_true((raw_py.info['chs'][63]['eeg_loc']).any()) # Make sure concatenation works raw_concat = concatenate_raws([raw_py.copy(), raw_py]) assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def EEGconcatenateFolder(folder, nchans, refchans, fs_new=[], exclude=[]): #concatenates all the EEG files in one folder #Assumes files are in format output from biosemi ... subj.bdf, subj+001.bdf, etc. #Also folder should end with a '/' EEGfiles = listdir(folder) EEGfiles.sort() # This line and next to fix order of files EEGfiles.insert(0, EEGfiles.pop(len(EEGfiles) - 1)) print(EEGfiles) raw = [] events = [] for eeg_f in EEGfiles: raw_temp, events_temp = bs.importbdf( folder + eeg_f, nchans, refchans, exclude=exclude) #uses EXG1 and EXG2 as reference usually if numel(fs_new): print('Resample raw data and update eves indices') events_temp[:, 0] = np.round(events_temp[:, 0] / raw_temp.info['sfreq'] * fs_new).astype('int') raw_temp.resample(fs_new) raw.append(raw_temp) events.append(events_temp) EEG_full, events_full = concatenate_raws(raw, events_list=events) return EEG_full, events_full
def load_raw_data(self, subject, series): """Load data for a subject / series.""" # test = series == TEST_SERIES test = False if not test: fnames = [glob(get_horizo_path(subject, i)) for i in series] else: fnames = [ glob('../data/test/subj%d_series%d_data.csv' % (subject, i)) for i in series ] fnames = list(np.concatenate(fnames)) fnames.sort() self.fnames = fnames action_1D_type = 'HO' raw_train = [ creat_mne_raw_object(fnames[i], i, read_events=action_1D_type) for i in range(len(fnames)) ] raw_train = concatenate_raws(raw_train) # pick eeg signal picks = pick_types(raw_train.info, eeg=True) self.data = raw_train._data[picks].transpose() self.data = preprocessData(self.data) if not test: self.events = raw_train._data[14:].transpose()
def loadEEG(path): """ Load data from .bdf files. If an array of path is provided, files will be concatenated. Parameters ---------- path : str | array-like of str Path to the .bdf file(s) to load. Returns: raw : instance of mne.io.edf.edf.RawEDF RawEDF object from the MNE library containing data from the .bdf files. """ if isinstance(path, list): temp = [] for i in path: data = mne.io.read_raw_edf(i) temp.append(data) print temp raw = mne.concatenate_raws(temp) else: raw = mne.io.read_raw_edf(path) return raw
def get_Xy_augmented(sub=1): """ funs:从raw里得到可监督学习的X,Y MI Interval:[0., 3.] All Interval:[-2., 5.] y:0->left, 1->right subs:s1 ~ ss52 """ physionetMI = physionet_mi.PhysionetMI() datamap = physionetMI.get_data(subjects=[1]) raw = mne.concatenate_raws( [datamap[sub]["session_0"][_] for _ in datamap[sub]["session_0"]]) raw_band = raw.copy() # Apply band-pass filter raw_band.filter(8., 30.) X, y = get_Xy_fromRaw(raw_band, stim_channel="STI 014", event_id=dict(left_hand=2, right_hand=3, feet=5, hands=4), interval=[-1., 4.]) return augment_train_data(X.astype("float32"), y.astype("int64") - 2, FS=160)
def read_maxfiltered(name, save_dir): split_string_number = 0 read_all_files = False raws = [] while not read_all_files: if split_string_number > 0: split_string_part = '-' + str(split_string_number) else: split_string_part = '' raw_name = name + '-tsss-mc_meg' + split_string_part + '.fif' raw_path = join(save_dir, raw_name) try: raw_part = mne.io.Raw(raw_path, preload=True) raws.append(raw_part) split_string_number += 1 except: read_all_files = True print(str(split_string_number) + ' raw files were read') raw = mne.concatenate_raws(raws) return raw
def test_crop_more(): """Test more cropping.""" raw = mne.io.read_raw_fif(fif_fname).crop(0, 11).load_data() raw._data[:] = np.random.RandomState(0).randn(*raw._data.shape) onset = np.array([0.47058824, 2.49773765, 6.67873287, 9.15837097]) duration = np.array([0.89592767, 1.13574672, 1.09954739, 0.48868752]) annotations = mne.Annotations(onset, duration, 'BAD') raw.set_annotations(annotations) assert len(raw.annotations) == 4 delta = 1. / raw.info['sfreq'] offset = raw.first_samp * delta raw_concat = mne.concatenate_raws( [raw.copy().crop(0, 4 - delta), raw.copy().crop(4, 8 - delta), raw.copy().crop(8, None)]) assert_allclose(raw_concat.times, raw.times) assert_allclose(raw_concat[:][0], raw[:][0]) assert raw_concat.first_samp == raw.first_samp boundary_idx = np.where( raw_concat.annotations.description == 'BAD boundary')[0] assert len(boundary_idx) == 2 raw_concat.annotations.delete(boundary_idx) boundary_idx = np.where( raw_concat.annotations.description == 'EDGE boundary')[0] assert len(boundary_idx) == 2 raw_concat.annotations.delete(boundary_idx) assert len(raw_concat.annotations) == 4 assert_array_equal(raw_concat.annotations.description, raw.annotations.description) assert_allclose(raw.annotations.duration, duration) assert_allclose(raw_concat.annotations.duration, duration) assert_allclose(raw.annotations.onset, onset + offset) assert_allclose(raw_concat.annotations.onset, onset + offset, atol=1. / raw.info['sfreq'])
def load_csv_as_raw( fnames, sfreq, ch_ind, aux_ind=None, replace_ch_names=None, verbose=1 ): """Load CSV files into an MNE Raw object. Args: fnames (array_like): list of filename(s) to load. Should end with ".csv". sfreq (float): sampling frequency of the data. ch_ind (array_like): column indices to keep from the CSV files. Keyword Args: aux_ind (array_like or None): list of indices for columns containing auxiliary channels. replace_ch_names (array_like or None): list of channel name mappings for the selected columns. verbose (int): verbose level. Returns: (mne.io.RawArray): concatenation of the specified filenames into a single Raw object. """ ch_ind = copy.deepcopy(ch_ind) n_eeg = len(ch_ind) if aux_ind is not None: n_aux = len(aux_ind) ch_ind += aux_ind else: n_aux = 0 raw = [] for fn in fnames: # Read the file data = pd.read_csv(fn) # Channel names and types ch_names = [list(data.columns)[i] for i in ch_ind] + ["stim"] print(ch_names) ch_types = ["eeg"] * n_eeg + ["misc"] * n_aux + ["stim"] if replace_ch_names is not None: ch_names = [ c if c not in replace_ch_names.keys() else replace_ch_names[c] for c in ch_names ] print(ch_names) # Transpose EEG data and convert from uV to Volts data = data.values[:, ch_ind + [-1]].T data[:-1] *= 1e-6 # create MNE object info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq, verbose=1) raw.append(RawArray(data=data, info=info, verbose=verbose)) raws = concatenate_raws(raw, verbose=verbose) montage = make_standard_montage("standard_1005") raws.set_montage(montage) return raws
def load_to_epochs(fnames, event_ids, im_times, filt): import mne import numpy as np import os.path as op infolder, outfolder = find_folder() baseline = (None, 0) #montage = mne.channels.read_montage("standard_1020") raw = [] for fname in fnames: fname = op.join(infolder, fname) raw.append( mne.io.read_raw_bdf(fname, preload=True).filter(filt[0], filt[1], method='iir')) raw = mne.concatenate_raws(raw) events = mne.find_events(raw, initial_event=True, consecutive=True, shortest_event=1) epochs = mne.Epochs(raw, events, event_ids, im_times[0], im_times[1], baseline=baseline, preload=True) return epochs
def test_data(): """Test reading raw nicolet files.""" tempdir = _TempDir() raw = read_raw_nicolet(fname, preload=False) raw_preload = read_raw_nicolet(fname, preload=True) picks = [2, 3, 12, 13] assert_array_equal(raw[picks, 20:30][0], raw_preload[picks, 20:30][0]) # Make sure concatenation works raw2 = concatenate_raws([raw_preload.copy(), raw_preload]) # Test saving and reading out_fname = op.join(tempdir, 'test_nicolet_raw.fif') raw2.save(out_fname, tmax=raw.times[-1]) raw2 = Raw(out_fname) full_data = raw_preload._data data1, times1 = raw[:10:3, 10:12] data2, times2 = raw2[:10:3, 10:12] data3, times3 = raw2[[0, 3, 6, 9], 10:12] assert_array_almost_equal(data1, full_data[:10:3, 10:12], 9) assert_array_almost_equal(data1, data2, 9) assert_array_almost_equal(data1, data3, 9) assert_array_almost_equal(times1, times2) assert_array_almost_equal(times1, times3)
def load_muse_csv_as_raw(filename, sfreq=256., ch_ind=[0, 1, 2, 3], stim_ind=5, replace_ch_names=None, verbose=1): """Load CSV files into a Raw object. Args: filename (str or list): path or paths to CSV files to load Keyword Args: subject_nb (int or str): subject number. If 'all', load all subjects. session_nb (int or str): session number. If 'all', load all sessions. sfreq (float): EEG sampling frequency ch_ind (list): indices of the EEG channels to keep stim_ind (int): index of the stim channel replace_ch_names (dict or None): dictionary containing a mapping to rename channels. Useful when an external electrode was used. Returns: (mne.io.array.array.RawArray): loaded EEG """ n_channel = len(ch_ind) raw = [] for fname in filename: # read the file data = pd.read_csv(fname, index_col=0) # name of each channels ch_names = list(data.columns)[0:n_channel] + ['Stim'] if replace_ch_names is not None: ch_names = [c if c not in replace_ch_names.keys() else replace_ch_names[c] for c in ch_names] # type of each channels ch_types = ['eeg'] * n_channel + ['stim'] montage = read_montage('standard_1005') # get data and exclude Aux channel data = data.values[:, ch_ind + [stim_ind]].T # convert in Volts (from uVolts) data[:-1] *= 1e-6 # create MNE object info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq, montage=montage, verbose=verbose) raw.append(RawArray(data=data, info=info, verbose=verbose)) # concatenate all raw objects if len(raw) > 0: raws = concatenate_raws(raw, verbose=verbose) else: print('No files for subject with filename ' + str(filename)) raws = raw return raws
def test_events_long(): """Test events.""" data_path = testing.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif' raw = read_raw_fif(raw_fname, preload=True) raw_tmin, raw_tmax = 0, 90 tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) # select gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=True, exclude=raw.info['bads']) # load data with usual Epochs for later verification raw = concatenate_raws([raw, raw.copy(), raw.copy(), raw.copy(), raw.copy(), raw.copy()]) assert 110 < raw.times[-1] < 130 raw_cropped = raw.copy().crop(raw_tmin, raw_tmax) events_offline = find_events(raw_cropped) epochs_offline = Epochs(raw_cropped, events_offline, event_id=event_id, tmin=tmin, tmax=tmax, picks=picks, decim=1, reject=dict(grad=4000e-13, eog=150e-6), baseline=None) epochs_offline.drop_bad() # create the mock-client object rt_client = MockRtClient(raw) rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1, reject=dict(grad=4000e-13, eog=150e-6), baseline=None, isi_max=1.) rt_epochs.start() rt_client.send_data(rt_epochs, picks, tmin=raw_tmin, tmax=raw_tmax, buffer_size=1000) expected_events = epochs_offline.events.copy() expected_events[:, 0] = expected_events[:, 0] - raw_cropped.first_samp assert np.all(expected_events[:, 0] <= (raw_tmax - tmax) * raw.info['sfreq']) assert_array_equal(rt_epochs.events, expected_events) assert len(rt_epochs) == len(epochs_offline) data_picks = pick_types(epochs_offline.info, meg='grad', eeg=False, eog=True, stim=False, exclude=raw.info['bads']) for ev_num, ev in enumerate(rt_epochs.iter_evoked()): if ev_num == 0: X_rt = ev.data[None, data_picks, :] y_rt = int(ev.comment) # comment attribute contains the event_id else: X_rt = np.concatenate((X_rt, ev.data[None, data_picks, :]), axis=0) y_rt = np.append(y_rt, int(ev.comment)) X_offline = epochs_offline.get_data()[:, data_picks, :] y_offline = epochs_offline.events[:, 2] assert_array_equal(X_rt, X_offline) assert_array_equal(y_rt, y_offline)
def load_raw_data(subject, test=False): """Load Raw data from files. For a given subject, csv files are loaded, converted to MNE raw instance and concatenated. If test is True, training data are composed of series 1 to 8 and test data of series 9 and test. Otherwise, training data are series 1 to 6 and test data series 7 and 8. """ # fnames_train = glob('../data/train/subj%d_series*_data.csv' % (subject)) fnames_train = glob(get_all_horizon_path_from_the_subject(subject)) fnames_train.sort() if test: fnames_test = glob(get_all_horizon_path_from_the_subject(subject)) fnames_test.sort() else: fnames_test = fnames_train[-1:] fnames_train = fnames_train[:-1] # read and concatenate all the files action_1D_type = 'HO' raw_train = [ creat_mne_raw_object(fname, i, read_events=action_1D_type) for i, fname in enumerate(fnames_train) ] raw_train = concatenate_raws(raw_train) # pick eeg signal picks = pick_types(raw_train.info, eeg=True) # get training data data_train = raw_train._data[picks].T labels_train = raw_train._data[len(CH_NAMES):].T raw_test = [ creat_mne_raw_object(fname, i, read_events=action_1D_type) for i, fname in enumerate(fnames_test) ] raw_test = concatenate_raws(raw_test) data_test = raw_test._data[picks].T # extract labels if validating on series 7&8 labels_test = None if not test: labels_test = raw_test._data[len(CH_NAMES):].T return data_train, labels_train, data_test, labels_test
def run_events(subject_id): subject = "sub_%03d" % subject_id print("processing subject: %s" % subject) in_path = op.join( data_path, "EEG_Process") #make map yourself in cwd called 'Subjects' process_path = op.join( data_path, "EEG_Process") #make map yourself in cwd called 'EEG_Process' raw_list = list() events_list = list() for run in range(1, 2): fname = op.join(in_path, 'sub_%03d_raw.fif' % (subject_id, )) raw = mne.io.read_raw_fif(fname, preload=True) print(" S %s - R %s" % (subject, run)) #import events and reorganize delay = int(round(0.0345 * raw.info['sfreq'])) events = mne.read_events( op.join(in_path, 'events_%03d-eve.fif' % (subject_id, ))) events[:, 0] = events[:, 0] + delay events_list.append(events) raw_list.append(raw) raw, events = mne.concatenate_raws(raw_list, events_list=events_list) ###some visualizations on the blinks in the raw data file### eog_events = mne.preprocessing.find_eog_events(raw) onsets = eog_events[:, 0] / raw.info['sfreq'] - 0.25 durations = [0.5] * len(eog_events) descriptions = ['bad blink'] * len(eog_events) blink_annot = mne.Annotations(onsets, durations, descriptions, orig_time=raw.info['meas_date']) raw.set_annotations(blink_annot) eeg_picks = mne.pick_types(raw.info, eeg=True) raw.plot(events=eog_events, order=eeg_picks) ###CONCLUSION: NOT THE BEST ALGORITHM #####ICA##### ica = ICA(random_state=97, n_components=15) picks = mne.pick_types(raw.info, eeg=True, eog=True, stim=False, exclude='bads') ica.fit(raw, picks=picks) raw.load_data() ica.plot_sources(raw) ica.plot_components() ica.plot_overlay(raw, exclude=[6], picks='eeg') #visualize the difference raw2 = raw.copy() ica.exclude = [6] ica.apply(raw2) raw2.plot() ica.plot_properties(raw, picks=[6])
def test_read_vhdr_annotations_and_events(): """Test load brainvision annotations and parse them to events.""" sfreq = 1000.0 expected_orig_time = 1384359243.794231 expected_onset_latency = np.array( [0, 486., 496., 1769., 1779., 3252., 3262., 4935., 4945., 5999., 6619., 6629., 7629., 7699.] ) expected_annot_description = [ 'New Segment/', 'Stimulus/S253', 'Stimulus/S255', 'Event/254', 'Stimulus/S255', 'Event/254', 'Stimulus/S255', 'Stimulus/S253', 'Stimulus/S255', 'Response/R255', 'Event/254', 'Stimulus/S255', 'SyncStatus/Sync On', 'Optic/O 1' ] expected_events = np.stack([ expected_onset_latency, np.zeros_like(expected_onset_latency), [99999, 253, 255, 254, 255, 254, 255, 253, 255, 1255, 254, 255, 99998, 2001], ]).astype('int64').T expected_event_id = {'New Segment/': 99999, 'Stimulus/S253': 253, 'Stimulus/S255': 255, 'Event/254': 254, 'Response/R255': 1255, 'SyncStatus/Sync On': 99998, 'Optic/O 1': 2001} raw = read_raw_brainvision(vhdr_path, eog=eog) # validate annotations assert raw.annotations.orig_time == expected_orig_time assert_allclose(raw.annotations.onset, expected_onset_latency / sfreq) assert_array_equal(raw.annotations.description, expected_annot_description) # validate event extraction events, event_id = events_from_annotations(raw) assert_array_equal(events, expected_events) assert event_id == expected_event_id # validate that None gives us a sorted list expected_none_event_id = {desc: idx + 1 for idx, desc in enumerate(sorted( event_id.keys()))} events, event_id = events_from_annotations(raw, event_id=None) assert event_id == expected_none_event_id # Add some custom ones, plus a 2-digit one s_10 = 'Stimulus/S 10' raw.annotations.append([1, 2, 3], 10, ['ZZZ', s_10, 'YYY']) expected_event_id.update(YYY=10001, ZZZ=10002) # others starting at 10001 expected_event_id[s_10] = 10 _, event_id = events_from_annotations(raw) assert event_id == expected_event_id # Concatenating two shouldn't change the resulting event_id # (BAD and EDGE should be ignored) with pytest.warns(RuntimeWarning, match='expanding outside'): raw_concat = concatenate_raws([raw.copy(), raw.copy()]) _, event_id = events_from_annotations(raw_concat) assert event_id == expected_event_id
def test_read_vhdr_annotations_and_events(): """Test load brainvision annotations and parse them to events.""" sfreq = 1000.0 expected_orig_time = 1384359243.794231 expected_onset_latency = np.array( [0, 486., 496., 1769., 1779., 3252., 3262., 4935., 4945., 5999., 6619., 6629., 7629., 7699.] ) expected_annot_description = [ 'New Segment/', 'Stimulus/S253', 'Stimulus/S255', 'Stimulus/S254', 'Stimulus/S255', 'Stimulus/S254', 'Stimulus/S255', 'Stimulus/S253', 'Stimulus/S255', 'Response/R255', 'Stimulus/S254', 'Stimulus/S255', 'SyncStatus/Sync On', 'Optic/O 1' ] expected_events = np.stack([ expected_onset_latency, np.zeros_like(expected_onset_latency), [99999, 253, 255, 254, 255, 254, 255, 253, 255, 1255, 254, 255, 99998, 2001], ]).astype('int64').T expected_event_id = {'New Segment/': 99999, 'Stimulus/S253': 253, 'Stimulus/S255': 255, 'Stimulus/S254': 254, 'Response/R255': 1255, 'SyncStatus/Sync On': 99998, 'Optic/O 1': 2001} raw = read_raw_brainvision(vhdr_path, eog=eog) # validate annotations assert raw.annotations.orig_time == expected_orig_time assert_allclose(raw.annotations.onset, expected_onset_latency / sfreq) assert_array_equal(raw.annotations.description, expected_annot_description) # validate event extraction events, event_id = events_from_annotations(raw) assert_array_equal(events, expected_events) assert event_id == expected_event_id # validate that None gives us a sorted list expected_none_event_id = {desc: idx + 1 for idx, desc in enumerate(sorted( event_id.keys()))} events, event_id = events_from_annotations(raw, event_id=None) assert event_id == expected_none_event_id # Add some custom ones, plus a 2-digit one s_10 = 'Stimulus/S 10' raw.annotations.append([1, 2, 3], 10, ['ZZZ', s_10, 'YYY']) expected_event_id.update(YYY=10001, ZZZ=10002) # others starting at 10001 expected_event_id[s_10] = 10 _, event_id = events_from_annotations(raw) assert event_id == expected_event_id # Concatenating two shouldn't change the resulting event_id # (BAD and EDGE should be ignored) with pytest.warns(RuntimeWarning, match='expanding outside'): raw_concat = concatenate_raws([raw.copy(), raw.copy()]) _, event_id = events_from_annotations(raw_concat) assert event_id == expected_event_id
def test_raw(): """ Test bti conversion to Raw object """ for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames, exported_fnames): # rx = 2 if 'linux' in pdf else 0 assert_raises(ValueError, read_raw_bti, pdf, 'eggs') assert_raises(ValueError, read_raw_bti, pdf, config, 'spam') if op.exists(tmp_raw_fname): os.remove(tmp_raw_fname) ex = Raw(exported, preload=True) ra = read_raw_bti(pdf, config, hs) assert_true('RawBTi' in repr(ra)) assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH]) assert_array_almost_equal(ex.info['dev_head_t']['trans'], ra.info['dev_head_t']['trans'], 7) dig1, dig2 = [ np.array([d['r'] for d in r_.info['dig']]) for r_ in (ra, ex) ] assert_array_almost_equal(dig1, dig2, 18) coil1, coil2 = [ np.concatenate([d['loc'].flatten() for d in r_.info['chs'][:NCH]]) for r_ in (ra, ex) ] assert_array_almost_equal(coil1, coil2, 7) loc1, loc2 = [ np.concatenate([d['loc'].flatten() for d in r_.info['chs'][:NCH]]) for r_ in (ra, ex) ] assert_allclose(loc1, loc2) assert_array_equal(ra._data[:NCH], ex._data[:NCH]) assert_array_equal(ra._cals[:NCH], ex._cals[:NCH]) # check our transforms for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): if ex.info[key] is None: pass else: assert_true(ra.info[key] is not None) for ent in ('to', 'from', 'trans'): assert_allclose(ex.info[key][ent], ra.info[key][ent]) # Make sure concatenation works raw_concat = concatenate_raws([ra.copy(), ra]) assert_equal(raw_concat.n_times, 2 * ra.n_times) ra.save(tmp_raw_fname) re = Raw(tmp_raw_fname) print(re) for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): assert_true(isinstance(re.info[key], dict)) this_t = re.info[key]['trans'] assert_equal(this_t.shape, (4, 4)) # cehck that matrix by is not identity assert_true(not np.allclose(this_t, np.eye(4))) os.remove(tmp_raw_fname)
def load_openBCI_csv_as_raw( filename, sfreq=256., ch_ind=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], stim_ind=16, replace_ch_names=None, verbose=1): """Load CSV files into a Raw object. Args: filename (list): paths to CSV files to load Keyword Args: subject_nb (int or str): subject number. If 'all', load all subjects. session_nb (int or str): session number. If 'all', load all sessions. sfreq (float): EEG sampling frequency ch_ind (list): indices of the EEG channels to keep stim_ind (int): index of the stim channel replace_ch_names (dict or None): dictionary containing a mapping to rename channels. Useful when an external electrode was used. Returns: (mne.io.array.array.RawArray): loaded EEG """ n_channel = len(ch_ind) raw = [] for fname in filename: # read the file data = pd.read_csv(fname, index_col=0) # name of each channels ch_names = list(data.columns)[0:n_channel] + ['Stim'] if replace_ch_names is not None: ch_names = [ c if c not in replace_ch_names.keys() else replace_ch_names[c] for c in ch_names ] # type of each channels ch_types = ['eeg'] * n_channel + ['stim'] # get data and exclude Aux channel data = data.values[:, ch_ind + [stim_ind]].T # convert in Volts (from nanoVolts?) data[:-1] *= 1e-9 montage = make_standard_montage('standard_1005') info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq, verbose=verbose) rawi = RawArray(data=data, info=info, verbose=verbose) rawi.set_montage(montage, raise_if_subset=False, match_case=False) raw.append(rawi) # concatenate all raw objects raws = concatenate_raws(raw, verbose=verbose) return raws
def test_mf_skips(): """Test processing of data with skips.""" raw = read_raw_fif(skip_fname, preload=True) raw.fix_mag_coil_types() raw.pick_channels(raw.ch_names[:50]) # fast and inaccurate kwargs = dict(st_only=True, coord_frame='meg', int_order=4, ext_order=3) # smoke test that this runs maxwell_filter(raw, st_duration=17., skip_by_annotation=(), **kwargs) # and this one, too, which will process some all-zero data maxwell_filter(raw, st_duration=2., skip_by_annotation=(), **kwargs) with pytest.raises(ValueError, match='duration'): # skips decrease acceptable duration maxwell_filter(raw, st_duration=17., **kwargs) onsets, ends = _annotations_starts_stops( raw, ('edge', 'bad_acq_skip'), 'skip_by_annotation', invert=True) assert (ends - onsets).min() / raw.info['sfreq'] == 2. assert (ends - onsets).max() / raw.info['sfreq'] == 3. for st_duration in (2., 3.): raw_sss = maxwell_filter(raw, st_duration=st_duration, **kwargs) for start, stop in zip(onsets, ends): orig_data = raw[:, start:stop][0] new_data = raw_sss[:, start:stop][0] if (stop - start) / raw.info['sfreq'] >= st_duration: # Should be modified assert not np.allclose(new_data, orig_data, atol=1e-20) else: # Should not be modified assert_allclose(new_data, orig_data, atol=1e-20) # Processing an individual file and concat should be equivalent to # concat then process raw.crop(0, 1) raw_sss = maxwell_filter(raw, st_duration=1., **kwargs) raw_sss_concat = concatenate_raws([raw_sss, raw_sss.copy()]) raw_concat = concatenate_raws([raw.copy(), raw.copy()]) raw_concat_sss = maxwell_filter(raw_concat, st_duration=1., **kwargs) raw_concat_sss_bad = maxwell_filter(raw_concat, st_duration=1., skip_by_annotation=(), **kwargs) data_c = raw_concat[:][0] data_sc = raw_sss_concat[:][0] data_cs = raw_concat_sss[:][0] data_csb = raw_concat_sss_bad[:][0] assert not np.allclose(data_cs, data_c, atol=1e-20) assert not np.allclose(data_cs, data_csb, atol=1e-20) assert_allclose(data_sc, data_cs, atol=1e-20)
def prepare(datafiles, read_events = True): """Given list of files, return MNE RawArray with the data in them. If read_events is True (as by default), also return a numpy array with events.""" rawdata = mne.concatenate_raws([file_to_raw(f) for f in datafiles]) if read_events: eventfiles = [file.replace("_data", "_events") for file in datafiles] events = np.concatenate([pd.read_csv(f).values[:,1:] for f in eventfiles]) return rawdata, events else: return rawdata, None
def test_raw(): """ Test bti conversion to Raw object """ for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames, exported_fnames): # rx = 2 if 'linux' in pdf else 0 assert_raises(ValueError, read_raw_bti, pdf, 'eggs') assert_raises(ValueError, read_raw_bti, pdf, config, 'spam') if op.exists(tmp_raw_fname): os.remove(tmp_raw_fname) ex = Raw(exported, preload=True) ra = read_raw_bti(pdf, config, hs) assert_true('RawBTi' in repr(ra)) assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH]) assert_array_almost_equal(ex.info['dev_head_t']['trans'], ra.info['dev_head_t']['trans'], 7) dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']]) for r_ in (ra, ex)] assert_array_almost_equal(dig1, dig2, 18) coil1, coil2 = [np.concatenate([d['loc'].flatten() for d in r_.info['chs'][:NCH]]) for r_ in (ra, ex)] assert_array_almost_equal(coil1, coil2, 7) loc1, loc2 = [np.concatenate([d['loc'].flatten() for d in r_.info['chs'][:NCH]]) for r_ in (ra, ex)] assert_allclose(loc1, loc2) assert_array_equal(ra._data[:NCH], ex._data[:NCH]) assert_array_equal(ra._cals[:NCH], ex._cals[:NCH]) # check our transforms for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): if ex.info[key] is None: pass else: assert_true(ra.info[key] is not None) for ent in ('to', 'from', 'trans'): assert_allclose(ex.info[key][ent], ra.info[key][ent]) # Make sure concatenation works raw_concat = concatenate_raws([ra.copy(), ra]) assert_equal(raw_concat.n_times, 2 * ra.n_times) ra.save(tmp_raw_fname) re = Raw(tmp_raw_fname) print(re) for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'): assert_true(isinstance(re.info[key], dict)) this_t = re.info[key]['trans'] assert_equal(this_t.shape, (4, 4)) # cehck that matrix by is not identity assert_true(not np.allclose(this_t, np.eye(4))) os.remove(tmp_raw_fname)
def load_raw_data(subject, test=False): """Load Raw data from files. For a given subject, csv files are loaded, converted to MNE raw instance and concatenated. If test is True, training data are composed of series 1 to 8 and test data of series 9 and test. Otherwise, training data are series 1 to 6 and test data series 7 and 8. """ fnames_train = glob('../data/train/subj%d_series*_data.csv' % (subject)) fnames_train.sort() if test: fnames_test = glob('../data/test/subj%d_series*_data.csv' % (subject)) fnames_test.sort() else: fnames_test = fnames_train[-2:] fnames_train = fnames_train[:-2] # read and concatenate all the files raw_train = [creat_mne_raw_object(fname) for fname in fnames_train] raw_train = concatenate_raws(raw_train) # pick eeg signal picks = pick_types(raw_train.info, eeg=True) # get training data data_train = raw_train._data[picks].T labels_train = raw_train._data[32:].T raw_test = [creat_mne_raw_object(fname, read_events=not test) for fname in fnames_test] raw_test = concatenate_raws(raw_test) data_test = raw_test._data[picks].T # extract labels if validating on series 7&8 labels_test = None if not test: labels_test = raw_test._data[32:].T return data_train, labels_train, data_test, labels_test
def test_clean_eog_ecg(): """Test mne clean_eog_ecg.""" check_usage(mne_clean_eog_ecg) tempdir = _TempDir() raw = concatenate_raws([read_raw_fif(f) for f in [raw_fname, raw_fname, raw_fname]]) raw.info["bads"] = ["MEG 2443"] use_fname = op.join(tempdir, op.basename(raw_fname)) raw.save(use_fname) with ArgvSetter(("-i", use_fname, "--quiet")): mne_clean_eog_ecg.run() fnames = glob.glob(op.join(tempdir, "*proj.fif")) assert_true(len(fnames) == 2) # two projs fnames = glob.glob(op.join(tempdir, "*-eve.fif")) assert_true(len(fnames) == 3) # raw plus two projs
def test_clean_eog_ecg(): """Test mne clean_eog_ecg""" check_usage(mne_clean_eog_ecg) tempdir = _TempDir() raw = concatenate_raws([Raw(f) for f in [raw_fname, raw_fname, raw_fname]]) raw.info['bads'] = ['MEG 2443'] use_fname = op.join(tempdir, op.basename(raw_fname)) raw.save(use_fname) with ArgvSetter(('-i', use_fname, '--quiet')): mne_clean_eog_ecg.run() fnames = glob.glob(op.join(tempdir, '*proj.fif')) assert_true(len(fnames) == 2) # two projs fnames = glob.glob(op.join(tempdir, '*-eve.fif')) assert_true(len(fnames) == 3) # raw plus two projs
def test_clean_eog_ecg(tmpdir): """Test mne clean_eog_ecg.""" check_usage(mne_clean_eog_ecg) tempdir = str(tmpdir) raw = concatenate_raws([read_raw_fif(f) for f in [raw_fname, raw_fname, raw_fname]]) raw.info['bads'] = ['MEG 2443'] use_fname = op.join(tempdir, op.basename(raw_fname)) raw.save(use_fname) with ArgvSetter(('-i', use_fname, '--quiet')): mne_clean_eog_ecg.run() for key, count in (('proj', 2), ('-eve', 3)): fnames = glob.glob(op.join(tempdir, '*%s.fif' % key)) assert len(fnames) == count
def test_brainvision_data(): """Test reading raw Brain Vision files """ assert_raises(IOError, read_raw_brainvision, vmrk_path) assert_raises(TypeError, read_raw_brainvision, vhdr_path, montage, preload=True, scale="0") raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog, preload=True) raw_py.load_data() # currently does nothing assert_true('RawBrainVision' in repr(raw_py)) assert_equal(raw_py.info['highpass'], 0.) assert_equal(raw_py.info['lowpass'], 250.) picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') data_py, times_py = raw_py[picks] print(raw_py) # to test repr print(raw_py.info) # to test Info repr # compare with a file that was generated using MNE-C raw_bin = Raw(eeg_bin, preload=True) picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads') data_bin, times_bin = raw_bin[picks] assert_array_almost_equal(data_py, data_bin) assert_array_almost_equal(times_py, times_bin) # Make sure EOG channels are marked correctly raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog, preload=True) for ch in raw_py.info['chs']: if ch['ch_name'] in eog: assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH) elif ch['ch_name'] == 'STI 014': assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH) elif ch['ch_name'] in raw_py.info['ch_names']: assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH) else: raise RuntimeError("Unknown Channel: %s" % ch['ch_name']) # Make sure concatenation works raw_concat = concatenate_raws([raw_py.copy(), raw_py]) assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def load_subject(id_num, runs): ''' Loads raw EEG recordings for one subject and at least one run of experiments. Arguments: id_num: int, the subject's ID number runs: int or list of ints -- which experiment(s) to read data from Returns: MNE Raw object ''' edf_files = load_data(id_num, runs) if len(edf_files) > 1: raw_objects = [read_raw_edf(file, preload=True) for file in edf_files] mne_raw = concatenate_raws(raw_objects, preload=True) else: mne_raw = read_raw_edf(edf_files[0], preload=True) return mne_raw
def test_data(): """Test reading raw kit files """ assert_raises(TypeError, read_raw_kit, epochs_path) assert_raises(TypeError, read_epochs_kit, sqd_path) assert_raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_path) assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None, list(range(200, 190, -1))) assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None, list(range(167, 159, -1)), '*', 1, True) # check functionality _ = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_path, hsp_path) raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim=list(range(167, 159, -1)), slope='+', stimthresh=1, preload=True) assert_true('RawKIT' in repr(raw_py)) # Binary file only stores the sensor channels py_picks = pick_types(raw_py.info, exclude='bads') raw_bin = op.join(data_dir, 'test_bin_raw.fif') raw_bin = Raw(raw_bin, preload=True) bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads') data_bin, _ = raw_bin[bin_picks] data_py, _ = raw_py[py_picks] # this .mat was generated using the Yokogawa MEG Reader data_Ykgw = op.join(data_dir, 'test_Ykgw.mat') data_Ykgw = scipy.io.loadmat(data_Ykgw)['data'] data_Ykgw = data_Ykgw[py_picks] assert_array_almost_equal(data_py, data_Ykgw) py_picks = pick_types(raw_py.info, stim=True, ref_meg=False, exclude='bads') data_py, _ = raw_py[py_picks] assert_array_almost_equal(data_py, data_bin) # Make sure concatenation works raw_concat = concatenate_raws([raw_py.copy(), raw_py]) assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_raw(): """ Test bti conversion to Raw object """ for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames, exported_fnames): # rx = 2 if 'linux' in pdf else 0 assert_raises(ValueError, read_raw_bti, pdf, 'eggs') assert_raises(ValueError, read_raw_bti, pdf, config, 'spam') if op.exists(tmp_raw_fname): os.remove(tmp_raw_fname) with Raw(exported, preload=True) as ex: with read_raw_bti(pdf, config, hs) as ra: assert_true('RawBTi' in repr(ra)) assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH]) assert_array_almost_equal(ex.info['dev_head_t']['trans'], ra.info['dev_head_t']['trans'], 7) dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']]) for r_ in (ra, ex)] assert_array_equal(dig1, dig2) coil1, coil2 = [np.concatenate([d['coil_trans'].flatten() for d in r_.info['chs'][:NCH]]) for r_ in (ra, ex)] assert_array_almost_equal(coil1, coil2, 7) loc1, loc2 = [np.concatenate([d['loc'].flatten() for d in r_.info['chs'][:NCH]]) for r_ in (ra, ex)] assert_array_equal(loc1, loc2) assert_array_equal(ra._data[:NCH], ex._data[:NCH]) assert_array_equal(ra._cals[:NCH], ex._cals[:NCH]) # Make sure concatenation works raw_concat = concatenate_raws([ra.copy(), ra]) assert_equal(raw_concat.n_times, 2 * ra.n_times) ra.save(tmp_raw_fname) with Raw(tmp_raw_fname) as r: print(r) os.remove(tmp_raw_fname)
def load_raw_data(self, subject, series): """Load data for a subject / series.""" test = series == TEST_SERIES if not test: fnames = [glob('../data/train/subj%d_series%d_data.csv' % (subject, i)) for i in series] else: fnames = [glob('../data/test/subj%d_series%d_data.csv' % (subject, i)) for i in series] fnames = list(np.concatenate(fnames)) fnames.sort() raw_train = [creat_mne_raw_object(fname, read_events=not test) for fname in fnames] raw_train = concatenate_raws(raw_train) # pick eeg signal picks = pick_types(raw_train.info, eeg=True) self.data = raw_train._data[picks].transpose() self.data = preprocessData(self.data) if not test: self.events = raw_train._data[32:].transpose()
def test_array_raw(): """Test creating raw from array """ tempdir = _TempDir() # creating raw = Raw(fif_fname).crop(2, 5, copy=False) data, times = raw[:, :] sfreq = raw.info['sfreq'] ch_names = [(ch[4:] if 'STI' not in ch else ch) for ch in raw.info['ch_names']] # change them, why not # del raw types = list() for ci in range(102): types.extend(('grad', 'grad', 'mag')) types.extend(['stim'] * 9) types.extend(['eeg'] * 60) # wrong length assert_raises(ValueError, create_info, ch_names, sfreq, types) # bad entry types.append('foo') assert_raises(KeyError, create_info, ch_names, sfreq, types) types[-1] = 'eog' # default type info = create_info(ch_names, sfreq) assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0]) # use real types info = create_info(ch_names, sfreq, types) raw2 = RawArray(data, info) data2, times2 = raw2[:, :] assert_allclose(data, data2) assert_allclose(times, times2) # Make sure concatenation works raw_concat = concatenate_raws([raw2.copy(), raw2]) assert_equal(raw_concat.n_times, 2 * raw2.n_times) assert_true('RawArray' in repr(raw2)) # saving temp_fname = op.join(tempdir, 'raw.fif') raw2.save(temp_fname) raw3 = Raw(temp_fname) data3, times3 = raw3[:, :] assert_allclose(data, data3) assert_allclose(times, times3) # filtering picks = pick_types(raw2.info, misc=True, exclude='bads')[:4] assert_equal(len(picks), 4) raw_lp = raw2.copy() with warnings.catch_warnings(record=True): raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2) raw_hp = raw2.copy() with warnings.catch_warnings(record=True): raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2) raw_bp = raw2.copy() with warnings.catch_warnings(record=True): raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks) raw_bs = raw2.copy() with warnings.catch_warnings(record=True): raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2) data, _ = raw2[picks, :] lp_data, _ = raw_lp[picks, :] hp_data, _ = raw_hp[picks, :] bp_data, _ = raw_bp[picks, :] bs_data, _ = raw_bs[picks, :] sig_dec = 11 assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec) assert_array_almost_equal(data, bp_data + bs_data, sig_dec) # plotting raw2.plot() raw2.plot_psd() # epoching events = find_events(raw2, stim_channel='STI 014') events[:, 2] = 1 assert_true(len(events) > 2) epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) epochs.plot_drop_log() epochs.plot() evoked = epochs.average() evoked.plot()
print "Loading subject " + str(subject) fnames = glob('input/train/subj%d_series*_data.csv' % (subject)) fnames.sort() raws = map(creat_mne_raw_object, fnames, [True]*len(fnames)) ids = [] epochs_tot = [] y = [] allraws = [] picks = pick_types(raws[0].info,eeg=True) for i in range(0,len(fnames)): raws[i]._data[picks] = np.array(Parallel(n_jobs=-1)(delayed(lfilter)(b,a,raws[i]._data[j]) for j in picks)) ids.append(np.array(pd.read_csv(fnames[i])['id'])) allraws.append(raws[i].copy()) allraws = concatenate_raws(allraws) ################ CSP Filters training ##################################### print "\tTraining CSP" for label in labels: # get event posision corresponding to HandStart events = find_events(allraws,stim_channel=label, verbose=False) # epochs signal for 2 second after the event epochs = Epochs(allraws, events, {'during' : 1}, 0, 2, proj=False, baseline=None, preload=True, picks=picks, add_eeg_ref=False, verbose=False) epochs_tot.append(epochs) y.extend([1]*len(epochs)) # epochs signal for 2 second before the event, this correspond to the # rest period.
for subject in subjects: print ("Loading data for subject %d... " % subject) fnames = glob('data/train/subj%d_series*_data.csv' % (subject)) fnames.sort() #print fnames fnames_train = fnames[:-2] fnames_validation = fnames[-2:] fnames_test = glob('data/test/subj%d_series*_data.csv' % (subject)) fnames_test.sort() #print fnames_validation raw_train = concatenate_raws([creat_mne_raw_object(fname, read_events=True) for fname in fnames_train]) raw_val = concatenate_raws([creat_mne_raw_object(fname, read_events=True) for fname in fnames_validation]) #raw_test = concatenate_raws([creat_mne_raw_object(fname, read_events=False) for fname in fnames_test]) picks_train = pick_types(raw_train.info, eeg=True) picks_val = pick_types(raw_val.info, eeg=True) #picks_test = pick_types(raw_test.info, eeg=True) data_train = raw_train._data[picks_train].T labels_train = raw_train._data[32:].T data_val = raw_val._data[picks_val].T labels_val = raw_val._data[32:].T #data_test = raw_test._data[picks_test].T #labels_test = None
def gen_html_report(p, subjects, structurals, run_indices=None): """Generates HTML reports""" import matplotlib.pyplot as plt from ._mnefun import (_load_trans_to, plot_good_coils, _head_pos_annot, _get_bem_src_trans, safe_inserter, _prebad, _load_meg_bads, mlab_offscreen, _fix_raw_eog_cals, _handle_dict, _get_t_window, plot_chpi_snr_raw) if run_indices is None: run_indices = [None] * len(subjects) style = {'axes.spines.right': 'off', 'axes.spines.top': 'off', 'axes.grid': True} time_kwargs = dict() if 'time_unit' in mne.fixes._get_args(mne.viz.plot_evoked): time_kwargs['time_unit'] = 's' for si, subj in enumerate(subjects): struc = structurals[si] report = Report(verbose=False) print(' Processing subject %s/%s (%s)' % (si + 1, len(subjects), subj)) # raw fnames = get_raw_fnames(p, subj, 'raw', erm=False, add_splits=False, run_indices=run_indices[si]) for fname in fnames: if not op.isfile(fname): raise RuntimeError('Cannot create reports until raw data ' 'exist, missing:\n%s' % fname) raw = [read_raw_fif(fname, allow_maxshield='yes') for fname in fnames] _fix_raw_eog_cals(raw) prebad_file = _prebad(p, subj) for r in raw: _load_meg_bads(r, prebad_file, disp=False) raw = mne.concatenate_raws(raw) # sss sss_fnames = get_raw_fnames(p, subj, 'sss', False, False, run_indices[si]) has_sss = all(op.isfile(fname) for fname in sss_fnames) sss_info = mne.io.read_raw_fif(sss_fnames[0]) if has_sss else None bad_file = get_bad_fname(p, subj) if bad_file is not None: sss_info.load_bad_channels(bad_file) sss_info = sss_info.info # pca pca_fnames = get_raw_fnames(p, subj, 'pca', False, False, run_indices[si]) has_pca = all(op.isfile(fname) for fname in pca_fnames) # whitening and source localization inv_dir = op.join(p.work_dir, subj, p.inverse_dir) has_fwd = op.isfile(op.join(p.work_dir, subj, p.forward_dir, subj + p.inv_tag + '-fwd.fif')) with plt.style.context(style): ljust = 25 # # Head coils # section = 'Good HPI count' if p.report_params.get('good_hpi_count', True) and p.movecomp: t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') figs = list() captions = list() for fname in fnames: _, _, fit_data = _head_pos_annot(p, fname, prefix=' ') assert fit_data is not None fig = plot_good_coils(fit_data, show=False) fig.set_size_inches(10, 2) fig.tight_layout() figs.append(fig) captions.append('%s: %s' % (section, op.split(fname)[-1])) report.add_figs_to_section(figs, captions, section, image_format='svg') print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) # # cHPI SNR # section = 'cHPI SNR' if p.report_params.get('chpi_snr', True) and p.movecomp: t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') figs = list() captions = list() for fname in fnames: raw = mne.io.read_raw_fif(fname, allow_maxshield='yes') t_window = _get_t_window(p, raw) fig = plot_chpi_snr_raw(raw, t_window, show=False, verbose=False) fig.set_size_inches(10, 5) fig.subplots_adjust(0.1, 0.1, 0.8, 0.95, wspace=0, hspace=0.5) figs.append(fig) captions.append('%s: %s' % (section, op.split(fname)[-1])) report.add_figs_to_section(figs, captions, section, image_format='png') # svd too slow print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) # # Head movement # section = 'Head movement' if p.report_params.get('head_movement', True) and p.movecomp: print((' %s ... ' % section).ljust(ljust), end='') t0 = time.time() trans_to = _load_trans_to(p, subj, run_indices[si], raw) figs = list() captions = list() for fname in fnames: pos, _, _ = _head_pos_annot(p, fname, prefix=' ') fig = plot_head_positions(pos=pos, destination=trans_to, info=raw.info, show=False) for ax in fig.axes[::2]: """ # tighten to the sensor limits assert ax.lines[0].get_color() == (0., 0., 0., 1.) mn, mx = np.inf, -np.inf for line in ax.lines: ydata = line.get_ydata() if np.isfinite(ydata).any(): mn = min(np.nanmin(ydata), mn) mx = max(np.nanmax(line.get_ydata()), mx) """ # always show at least 10cm span, and use tight limits # if greater than that coord = ax.lines[0].get_ydata() for line in ax.lines: if line.get_color() == 'r': extra = line.get_ydata()[0] mn, mx = coord.min(), coord.max() md = (mn + mx) / 2. mn = min([mn, md - 50., extra]) mx = max([mx, md + 50., extra]) assert (mn <= coord).all() assert (mx >= coord).all() ax.set_ylim(mn, mx) fig.set_size_inches(10, 6) fig.tight_layout() figs.append(fig) captions.append('%s: %s' % (section, op.split(fname)[-1])) del trans_to report.add_figs_to_section(figs, captions, section, image_format='svg') print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) # # Raw segments # if op.isfile(pca_fnames[0]): raw_pca = mne.concatenate_raws( [mne.io.read_raw_fif(fname) for fname in pca_fnames]) section = 'Raw segments' if p.report_params.get('raw_segments', True) and has_pca: times = np.linspace(raw.times[0], raw.times[-1], 12)[1:-1] raw_plot = list() for t in times: this_raw = raw_pca.copy().crop(t - 0.5, t + 0.5) this_raw.load_data() this_raw._data[:] -= np.mean(this_raw._data, axis=-1, keepdims=True) raw_plot.append(this_raw) raw_plot = mne.concatenate_raws(raw_plot) for key in ('BAD boundary', 'EDGE boundary'): raw_plot.annotations.delete( np.where(raw_plot.annotations.description == key)[0]) new_events = np.linspace( 0, int(round(10 * raw.info['sfreq'])) - 1, 11).astype(int) new_events += raw_plot.first_samp new_events = np.array([new_events, np.zeros_like(new_events), np.ones_like(new_events)]).T fig = raw_plot.plot(group_by='selection', butterfly=True, events=new_events) fig.axes[0].lines[-1].set_zorder(10) # events fig.axes[0].set(xticks=np.arange(0, len(times)) + 0.5) xticklabels = ['%0.1f' % t for t in times] fig.axes[0].set(xticklabels=xticklabels) fig.axes[0].set(xlabel='Center of 1-second segments') fig.axes[0].grid(False) for _ in range(len(fig.axes) - 1): fig.delaxes(fig.axes[-1]) fig.set(figheight=(fig.axes[0].get_yticks() != 0).sum(), figwidth=12) fig.subplots_adjust(0.0, 0.0, 1, 1, 0, 0) report.add_figs_to_section(fig, 'Processed', section, image_format='png') # svg too slow # # PSD # section = 'PSD' if p.report_params.get('psd', True) and has_pca: t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') if p.lp_trans == 'auto': lp_trans = 0.25 * p.lp_cut else: lp_trans = p.lp_trans n_fft = 8192 fmax = raw.info['lowpass'] figs = [raw.plot_psd(fmax=fmax, n_fft=n_fft, show=False)] captions = ['%s: Raw' % section] fmax = p.lp_cut + 2 * lp_trans figs.append(raw.plot_psd(fmax=fmax, n_fft=n_fft, show=False)) captions.append('%s: Raw (zoomed)' % section) if op.isfile(pca_fnames[0]): figs.append(raw_pca.plot_psd(fmax=fmax, n_fft=n_fft, show=False)) captions.append('%s: Processed' % section) # shared y limits n = len(figs[0].axes) // 2 for ai, axes in enumerate(list(zip( *[f.axes for f in figs]))[:n]): ylims = np.array([ax.get_ylim() for ax in axes]) ylims = [np.min(ylims[:, 0]), np.max(ylims[:, 1])] for ax in axes: ax.set_ylim(ylims) ax.set(title='') for fig in figs: fig.set_size_inches(8, 8) with warnings.catch_warnings(record=True): fig.tight_layout() report.add_figs_to_section(figs, captions, section, image_format='svg') print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) # # SSP # section = 'SSP topomaps' proj_nums = _handle_dict(p.proj_nums, subj) if p.report_params.get('ssp_topomaps', True) and has_pca and \ np.sum(proj_nums) > 0: assert sss_info is not None t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') figs = [] comments = [] proj_files = get_proj_fnames(p, subj) if p.proj_extra is not None: comments.append('Custom') projs = read_proj(op.join(p.work_dir, subj, p.pca_dir, p.proj_extra)) figs.append(plot_projs_topomap(projs, info=sss_info, show=False)) if any(proj_nums[0]): # ECG if 'preproc_ecg-proj.fif' in proj_files: comments.append('ECG') figs.append(_proj_fig(op.join( p.work_dir, subj, p.pca_dir, 'preproc_ecg-proj.fif'), sss_info, proj_nums[0], p.proj_meg, 'ECG')) if any(proj_nums[1]): # EOG if 'preproc_blink-proj.fif' in proj_files: comments.append('Blink') figs.append(_proj_fig(op.join( p.work_dir, subj, p.pca_dir, 'preproc_blink-proj.fif'), sss_info, proj_nums[1], p.proj_meg, 'EOG')) if any(proj_nums[2]): # ERM if 'preproc_cont-proj.fif' in proj_files: comments.append('Continuous') figs.append(_proj_fig(op.join( p.work_dir, subj, p.pca_dir, 'preproc_cont-proj.fif'), sss_info, proj_nums[2], p.proj_meg, 'ERM')) captions = [section] + [None] * (len(comments) - 1) report.add_figs_to_section( figs, captions, section, image_format='svg', comments=comments) print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) # # Source alignment # section = 'Source alignment' source_alignment = p.report_params.get('source_alignment', True) if source_alignment is True or isinstance(source_alignment, dict) \ and has_sss and has_fwd: assert sss_info is not None kwargs = source_alignment if isinstance(source_alignment, dict): kwargs = dict(**source_alignment) else: assert source_alignment is True kwargs = dict() t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') captions = [section] try: from mayavi import mlab except ImportError: warnings.warn('Cannot plot alignment in Report, mayavi ' 'could not be imported') else: subjects_dir = mne.utils.get_subjects_dir( p.subjects_dir, raise_error=True) bem, src, trans, _ = _get_bem_src_trans( p, sss_info, subj, struc) if len(mne.pick_types(sss_info)): coord_frame = 'meg' else: coord_frame = 'head' with mlab_offscreen(): fig = mlab.figure(bgcolor=(0., 0., 0.), size=(1000, 1000)) for key, val in ( ('info', sss_info), ('subjects_dir', subjects_dir), ('bem', bem), ('dig', True), ('coord_frame', coord_frame), ('show_axes', True), ('fig', fig), ('trans', trans), ('src', src)): kwargs[key] = kwargs.get(key, val) try_surfs = ['head-dense', 'head', 'inner_skull'] for surf in try_surfs: try: mne.viz.plot_alignment(surfaces=surf, **kwargs) except Exception: pass else: break else: raise RuntimeError('Could not plot any surface ' 'for alignment:\n%s' % (try_surfs,)) fig.scene.parallel_projection = True view = list() for ai, angle in enumerate([180, 90, 0]): mlab.view(angle, 90, focalpoint=(0., 0., 0.), distance=0.6, figure=fig) view.append(mlab.screenshot(figure=fig)) mlab.close(fig) view = trim_bg(np.concatenate(view, axis=1), 0) report.add_figs_to_section(view, captions, section) print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) # # SNR # section = 'SNR' if p.report_params.get('snr', None) is not None: t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') snrs = p.report_params['snr'] if not isinstance(snrs, (list, tuple)): snrs = [snrs] for snr in snrs: assert isinstance(snr, dict) analysis = snr['analysis'] name = snr['name'] times = snr.get('times', [0.1]) inv_dir = op.join(p.work_dir, subj, p.inverse_dir) fname_inv = op.join(inv_dir, safe_inserter(snr['inv'], subj)) fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif' % (analysis, p.lp_cut, p.inv_tag, p.eq_tag, subj)) if not op.isfile(fname_inv): print(' Missing inv: %s' % op.basename(fname_inv), end='') elif not op.isfile(fname_evoked): print(' Missing evoked: %s' % op.basename(fname_evoked), end='') else: inv = mne.minimum_norm.read_inverse_operator(fname_inv) this_evoked = mne.read_evokeds(fname_evoked, name) title = ('%s<br>%s["%s"] (N=%d)' % (section, analysis, name, this_evoked.nave)) figs = plot_snr_estimate(this_evoked, inv, verbose=False) figs.axes[0].set_ylim(auto=True) captions = ('%s<br>%s["%s"] (N=%d)' % (section, analysis, name, this_evoked.nave)) report.add_figs_to_section( figs, captions, section=section, image_format='svg') print('%5.1f sec' % ((time.time() - t0),)) # # BEM # section = 'BEM' if p.report_params.get('bem', True) and has_fwd: caption = '%s<br>%s' % (section, struc) bem, src, trans, _ = _get_bem_src_trans( p, raw.info, subj, struc) if not bem['is_sphere']: subjects_dir = mne.utils.get_subjects_dir( p.subjects_dir, raise_error=True) mri_fname = op.join(subjects_dir, struc, 'mri', 'T1.mgz') if not op.isfile(mri_fname): warnings.warn( 'Could not find MRI:\n%s\nIf using surrogate ' 'subjects, use ' 'params.report_params["bem"] = False to avoid ' 'this warning', stacklevel=2) else: t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') report.add_bem_to_section(struc, caption, section, decim=10, n_jobs=1, subjects_dir=subjects_dir) print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped (sphere)' % section) else: print(' %s skipped' % section) # # Whitening # section = 'Whitening' if p.report_params.get('whitening', False): t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') whitenings = p.report_params['whitening'] if not isinstance(whitenings, (list, tuple)): whitenings = [whitenings] for whitening in whitenings: assert isinstance(whitening, dict) analysis = whitening['analysis'] name = whitening['name'] cov_name = op.join(p.work_dir, subj, p.cov_dir, safe_inserter(whitening['cov'], subj)) # Load the inverse fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif' % (analysis, p.lp_cut, p.inv_tag, p.eq_tag, subj)) if not op.isfile(cov_name): print(' Missing cov: %s' % op.basename(cov_name), end='') elif not op.isfile(fname_evoked): print(' Missing evoked: %s' % op.basename(fname_evoked), end='') else: noise_cov = mne.read_cov(cov_name) evo = mne.read_evokeds(fname_evoked, name) captions = ('%s<br>%s["%s"] (N=%d)' % (section, analysis, name, evo.nave)) fig = evo.plot_white(noise_cov, **time_kwargs) report.add_figs_to_section( fig, captions, section=section, image_format='png') print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) # # Sensor space plots # section = 'Responses' if p.report_params.get('sensor', False): t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') sensors = p.report_params['sensor'] if not isinstance(sensors, (list, tuple)): sensors = [sensors] for sensor in sensors: assert isinstance(sensor, dict) analysis = sensor['analysis'] name = sensor['name'] times = sensor.get('times', [0.1, 0.2]) fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif' % (analysis, p.lp_cut, p.inv_tag, p.eq_tag, subj)) if not op.isfile(fname_evoked): print(' Missing evoked: %s' % op.basename(fname_evoked), end='') else: this_evoked = mne.read_evokeds(fname_evoked, name) figs = this_evoked.plot_joint( times, show=False, ts_args=dict(**time_kwargs), topomap_args=dict(outlines='head', **time_kwargs)) if not isinstance(figs, (list, tuple)): figs = [figs] captions = ('%s<br>%s["%s"] (N=%d)' % (section, analysis, name, this_evoked.nave)) captions = [captions] + [None] * (len(figs) - 1) report.add_figs_to_section( figs, captions, section=section, image_format='png') print('%5.1f sec' % ((time.time() - t0),)) # # Source estimation # section = 'Source estimation' if p.report_params.get('source', False): t0 = time.time() print((' %s ... ' % section).ljust(ljust), end='') sources = p.report_params['source'] if not isinstance(sources, (list, tuple)): sources = [sources] for source in sources: assert isinstance(source, dict) analysis = source['analysis'] name = source['name'] times = source.get('times', [0.1, 0.2]) # Load the inverse inv_dir = op.join(p.work_dir, subj, p.inverse_dir) fname_inv = op.join(inv_dir, safe_inserter(source['inv'], subj)) fname_evoked = op.join(inv_dir, '%s_%d%s_%s_%s-ave.fif' % (analysis, p.lp_cut, p.inv_tag, p.eq_tag, subj)) if not op.isfile(fname_inv): print(' Missing inv: %s' % op.basename(fname_inv), end='') elif not op.isfile(fname_evoked): print(' Missing evoked: %s' % op.basename(fname_evoked), end='') else: inv = mne.minimum_norm.read_inverse_operator(fname_inv) this_evoked = mne.read_evokeds(fname_evoked, name) title = ('%s<br>%s["%s"] (N=%d)' % (section, analysis, name, this_evoked.nave)) stc = mne.minimum_norm.apply_inverse( this_evoked, inv, lambda2=source.get('lambda2', 1. / 9.), method=source.get('method', 'dSPM')) stc = abs(stc) # get clim using the reject_tmin <->reject_tmax stc_crop = stc.copy().crop( p.reject_tmin, p.reject_tmax) clim = source.get('clim', dict(kind='percent', lims=[82, 90, 98])) out = mne.viz._3d._limits_to_control_points( clim, stc_crop.data, 'viridis', transparent=True) # dummy cmap if isinstance(out[0], (list, tuple, np.ndarray)): clim = out[0] # old MNE else: clim = out[1] # new MNE (0.17+) clim = dict(kind='value', lims=clim) if not isinstance(stc, mne.SourceEstimate): print('Only surface source estimates currently ' 'supported') else: subjects_dir = mne.utils.get_subjects_dir( p.subjects_dir, raise_error=True) with mlab_offscreen(): brain = stc.plot( hemi=source.get('hemi', 'split'), views=source.get('views', ['lat', 'med']), size=source.get('size', (800, 600)), colormap=source.get('colormap', 'viridis'), transparent=source.get('transparent', True), foreground='k', background='w', clim=clim, subjects_dir=subjects_dir, ) imgs = list() for t in times: brain.set_time(t) imgs.append( trim_bg(brain.screenshot(), 255)) brain.close() captions = ['%2.3f sec' % t for t in times] report.add_slider_to_section( imgs, captions=captions, section=section, title=title, image_format='png') print('%5.1f sec' % ((time.time() - t0),)) else: print(' %s skipped' % section) report_fname = get_report_fnames(p, subj)[0] report.save(report_fname, open_browser=False, overwrite=True)
for raw in raws: raw.add_proj([], remove_existing=True) if not MEG: # crop away eyes closed resting raws[0].crop(tmin=0, tmax=90) # drop bad and non-data channels for raw in raws: picks = mne.pick_types(raw.info, eeg=True, meg=True) raw.drop_channels([name for idx, name in enumerate(raw.info['ch_names']) if idx not in picks]) raw.drop_channels(raw.info['bads']) raw = mne.concatenate_raws(raws) if MEG: layout = None else: layout = load_layout() wsize = 4096 sfreq = raw.info['sfreq'] states = [(0, 85), (95, len(raw.times)/sfreq)] # calculate fourier-ica fica = FourierICA(wsize=wsize, n_components=N_COMPONENTS, sfreq=sfreq, hpass=4, lpass=30, maxiter=7000) fica.fit(raw._data[:, raw.first_samp:raw.last_samp])
# submission file submission_file = 'beat_the_benchmark.csv' cols = ['HandStart','FirstDigitTouch', 'BothStartLoadPhase','LiftOff', 'Replace','BothReleased'] for subject in subjects: epochs_tot = [] y = [] ################ READ DATA ################################################ fnames = glob("../30 Data/train/subj%d_series*_data.csv" % (subject)) # read and concatenate all the files raw = concatenate_raws([creat_mne_raw_object(fname) for fname in fnames]) # pick eeg signal picks = pick_types(raw.info,eeg=True) # Filter data for alpha frequency and beta band # Note that MNE implement a zero phase (filtfilt) filtering not compatible # with the rule of future data. # Here we use left filter compatible with this constraint. # The function parallelized for speeding up the script raw._data[picks] = np.array(Parallel(n_jobs=-1)(delayed(lfilter)(b,a,raw._data[i]) for i in picks)) ################ CSP Filters training ##################################### # get event posision corresponding to Replace events = find_events(raw,stim_channel='BothReleased', verbose=False) # epochs signal for 1.5 second before the movement
def _test_raw_reader(reader, test_preloading=True, **kwargs): """Test reading, writing and slicing of raw classes. Parameters ---------- reader : function Function to test. test_preloading : bool Whether not preloading is implemented for the reader. If True, both cases and memory mapping to file are tested. **kwargs : Arguments for the reader. Note: Do not use preload as kwarg. Use ``test_preloading`` instead. Returns ------- raw : Instance of Raw A preloaded Raw object. """ tempdir = _TempDir() rng = np.random.RandomState(0) if test_preloading: raw = reader(preload=True, **kwargs) # don't assume the first is preloaded buffer_fname = op.join(tempdir, 'buffer') picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10] picks = np.append(picks, len(raw.ch_names) - 1) # test trigger channel bnd = min(int(round(raw.info['buffer_size_sec'] * raw.info['sfreq'])), raw.n_times) slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), slice(3, 300), slice(None), slice(1, bnd)] if raw.n_times >= 2 * bnd: # at least two complete blocks slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1), slice(0, bnd + 100)] other_raws = [reader(preload=buffer_fname, **kwargs), reader(preload=False, **kwargs)] for sl_time in slices: data1, times1 = raw[picks, sl_time] for other_raw in other_raws: data2, times2 = other_raw[picks, sl_time] assert_allclose(data1, data2) assert_allclose(times1, times2) else: raw = reader(**kwargs) full_data = raw._data assert_true(raw.__class__.__name__, repr(raw)) # to test repr assert_true(raw.info.__class__.__name__, repr(raw.info)) # Test saving and reading out_fname = op.join(tempdir, 'test_raw.fif') raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1) raw3 = read_raw_fif(out_fname) assert_equal(set(raw.info.keys()), set(raw3.info.keys())) assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6, atol=1e-20) # atol is very small but > 0 assert_array_almost_equal(raw.times, raw3.times) assert_true(not math.isnan(raw3.info['highpass'])) assert_true(not math.isnan(raw3.info['lowpass'])) assert_true(not math.isnan(raw.info['highpass'])) assert_true(not math.isnan(raw.info['lowpass'])) assert_equal(raw3.info['kit_system_id'], raw.info['kit_system_id']) # Make sure concatenation works first_samp = raw.first_samp last_samp = raw.last_samp concat_raw = concatenate_raws([raw.copy(), raw]) assert_equal(concat_raw.n_times, 2 * raw.n_times) assert_equal(concat_raw.first_samp, first_samp) assert_equal(concat_raw.last_samp - last_samp + first_samp, last_samp + 1) return raw
def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, **kwargs): """Test reading, writing and slicing of raw classes. Parameters ---------- reader : function Function to test. test_preloading : bool Whether not preloading is implemented for the reader. If True, both cases and memory mapping to file are tested. **kwargs : Arguments for the reader. Note: Do not use preload as kwarg. Use ``test_preloading`` instead. Returns ------- raw : instance of Raw A preloaded Raw object. """ tempdir = _TempDir() rng = np.random.RandomState(0) if test_preloading: raw = reader(preload=True, **kwargs) # don't assume the first is preloaded buffer_fname = op.join(tempdir, 'buffer') picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10] picks = np.append(picks, len(raw.ch_names) - 1) # test trigger channel bnd = min(int(round(raw.buffer_size_sec * raw.info['sfreq'])), raw.n_times) slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd), slice(3, 300), slice(None), slice(1, bnd)] if raw.n_times >= 2 * bnd: # at least two complete blocks slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1), slice(0, bnd + 100)] other_raws = [reader(preload=buffer_fname, **kwargs), reader(preload=False, **kwargs)] for sl_time in slices: data1, times1 = raw[picks, sl_time] for other_raw in other_raws: data2, times2 = other_raw[picks, sl_time] assert_allclose(data1, data2) assert_allclose(times1, times2) else: raw = reader(**kwargs) full_data = raw._data assert raw.__class__.__name__ in repr(raw) # to test repr assert raw.info.__class__.__name__ in repr(raw.info) # gh-5604 assert _handle_meas_date(raw.info['meas_date']) >= 0 # test resetting raw if test_kwargs: raw2 = reader(**raw._init_kwargs) assert set(raw.info.keys()) == set(raw2.info.keys()) assert_array_equal(raw.times, raw2.times) # Test saving and reading out_fname = op.join(tempdir, 'test_raw.fif') raw = concatenate_raws([raw]) raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1) raw3 = read_raw_fif(out_fname) assert set(raw.info.keys()) == set(raw3.info.keys()) assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6, atol=1e-20) # atol is very small but > 0 assert_array_almost_equal(raw.times, raw3.times) assert not math.isnan(raw3.info['highpass']) assert not math.isnan(raw3.info['lowpass']) assert not math.isnan(raw.info['highpass']) assert not math.isnan(raw.info['lowpass']) assert raw3.info['kit_system_id'] == raw.info['kit_system_id'] # Make sure concatenation works first_samp = raw.first_samp last_samp = raw.last_samp concat_raw = concatenate_raws([raw.copy(), raw]) assert concat_raw.n_times == 2 * raw.n_times assert concat_raw.first_samp == first_samp assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1 idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0] if concat_raw.info['meas_date'] is None: expected_bad_boundary_onset = ((last_samp - first_samp) / raw.info['sfreq']) else: expected_bad_boundary_onset = raw._last_time assert_array_almost_equal(concat_raw.annotations.onset[idx], expected_bad_boundary_onset, decimal=2) if raw.info['meas_id'] is not None: for key in ['secs', 'usecs', 'version']: assert raw.info['meas_id'][key] == raw3.info['meas_id'][key] assert_array_equal(raw.info['meas_id']['machid'], raw3.info['meas_id']['machid']) assert isinstance(raw.annotations, Annotations) # Make a "soft" test on units: They have to be valid SI units as in # mne.io.meas_info.valid_units, but we accept any lower/upper case for now. valid_units = _get_valid_units() valid_units_lower = [unit.lower() for unit in valid_units] if raw._orig_units is not None: assert isinstance(raw._orig_units, dict) for ch_name, unit in raw._orig_units.items(): assert unit.lower() in valid_units_lower, ch_name return raw
filtered_events.append(np.array([events[cnt, 0], 0, 5])) event_order.append('STB') cnt += 1 filtered_events = np.array(filtered_events) # we need to keep all events at this point because we'll need them # in the correct order in order to match with behavior # filtering raw to remove breathing artifacts and stuff we won't need # for evoked analysis. Do it here because mne_process_raw wipes out # events channel raw.filter(1, 100) if f > 0: all_events = mne.concatenate_events([all_events, filtered_events], [all_raw.first_samp, raw.first_samp], [all_raw.last_samp, raw.last_samp]) all_raw = mne.concatenate_raws([all_raw, raw]) else: all_raw = raw all_events = filtered_events event_id = {'STG': 1, 'STI': 3, 'STB': 5} picks = mne.pick_types(raw.info, meg=True, ref_meg=True) epochs = mne.Epochs(all_raw, all_events, event_id, tmin, tmax, baseline=(None, 0), proj=False, preload=True, picks=picks) print subj print epochs # checking that we have at least 8 blocks of data if np.sum(epochs.events[:, 2] == 1) < 352: print '\nERROR: Unexpected number of STG trials!'
def test_crop(): """Test cropping with annotations.""" raw = read_raw_fif(fif_fname) events = mne.find_events(raw) onset = events[events[:, 2] == 1, 0] / raw.info['sfreq'] duration = np.full_like(onset, 0.5) description = ['bad %d' % k for k in range(len(onset))] annot = mne.Annotations(onset, duration, description, orig_time=raw.info['meas_date']) raw.set_annotations(annot) split_time = raw.times[-1] / 2. + 2. split_idx = len(onset) // 2 + 1 raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq']) assert_array_equal(raw_cropped_left.annotations.description, raw.annotations.description[:split_idx]) assert_allclose(raw_cropped_left.annotations.duration, raw.annotations.duration[:split_idx]) assert_allclose(raw_cropped_left.annotations.onset, raw.annotations.onset[:split_idx]) raw_cropped_right = raw.copy().crop(split_time, None) assert_array_equal(raw_cropped_right.annotations.description, raw.annotations.description[split_idx:]) assert_allclose(raw_cropped_right.annotations.duration, raw.annotations.duration[split_idx:]) assert_allclose(raw_cropped_right.annotations.onset, raw.annotations.onset[split_idx:]) raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right], verbose='debug') assert_allclose(raw_concat.times, raw.times) assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20) # Get rid of the boundary events raw_concat.annotations.delete(-1) raw_concat.annotations.delete(-1) # Ensure we annotations survive round-trip crop->concat assert_array_equal(raw_concat.annotations.description, raw.annotations.description) for attr in ('onset', 'duration'): assert_allclose(getattr(raw_concat.annotations, attr), getattr(raw.annotations, attr), err_msg='Failed for %s:' % (attr,)) raw.set_annotations(None) # undo # Test concatenating annotations with and without orig_time. raw2 = raw.copy() raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date'])) raw2.set_annotations(Annotations([2.], [3], 'BAD', None)) expected_onset = [45., 2. + raw._last_time] raw = concatenate_raws([raw, raw2]) raw.annotations.delete(-1) # remove boundary annotations raw.annotations.delete(-1) assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2) # Test IO tempdir = _TempDir() fname = op.join(tempdir, 'test-annot.fif') raw.annotations.save(fname) annot_read = read_annotations(fname) for attr in ('onset', 'duration', 'orig_time'): assert_allclose(getattr(annot_read, attr), getattr(raw.annotations, attr)) assert_array_equal(annot_read.description, raw.annotations.description) annot = Annotations((), (), ()) annot.save(fname) pytest.raises(IOError, read_annotations, fif_fname) # none in old raw annot = read_annotations(fname) assert isinstance(annot, Annotations) assert len(annot) == 0 # Test that empty annotations can be saved with an object fname = op.join(tempdir, 'test_raw.fif') raw.set_annotations(annot) raw.save(fname) raw_read = read_raw_fif(fname) assert isinstance(raw_read.annotations, Annotations) assert len(raw_read.annotations) == 0 raw.set_annotations(None) raw.save(fname, overwrite=True) raw_read = read_raw_fif(fname) assert raw_read.annotations is not None # XXX to be fixed in #5416 assert len(raw_read.annotations.onset) == 0 # XXX to be fixed in #5416