def get_tfr_each_file(cfg, tfr_type='multitaper', recursive=False, export_path=None, n_jobs=1): ''' @params: tfr_type: 'multitaper' or 'morlet' recursive: if True, load raw files in sub-dirs recursively export_path: path to save plots n_jobs: number of cores to run in parallel ''' cfg = check_cfg(cfg) t_buffer = cfg.T_BUFFER if tfr_type == 'multitaper': tfr = mne.time_frequency.tfr_multitaper elif tfr_type == 'morlet': tfr = mne.time_frequency.tfr_morlet else: raise ValueError('Wrong TFR type %s' % tfr_type) for fifdir in cfg.DATA_PATHS: for f in qc.get_file_list(fifdir, fullpath=True, recursive=recursive): [fdir, fname, fext] = qc.parse_path_list(f) if fext in ['fif', 'bdf', 'gdf']: get_tfr(f, cfg, tfr, cfg.N_JOBS)
def bdf2fif(filename, interactive=False, outdir=None): """ EDF or BioSemi BDF format """ # convert to mat using MATLAB (MNE's edf reader has an offset bug) fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir elif outdir[-1] != '/': outdir += '/' fiffile = outdir + fname + '.fif' raw = mne.io.read_raw_edf(filename, preload=True) # process event channel if raw.info['chs'][-1]['ch_name'] != 'STI 014': qc.print_c("*** ERROR: The last channel (%s) doesn't seem to be an event channel. Entering debugging mode." % raw.info['chs'][-1]['ch_name']) pdb.set_trace() raw.info['chs'][-1]['ch_name'] = 'TRIGGER' events = mne.find_events(raw, stim_channel='TRIGGER', shortest_event=1, uint_cast=True, consecutive=True) events[:, 2] -= events[:, 1] # set offset to 0 events[:, 1] = 0 # move the event channel to index 0 (for consistency) raw._data = np.concatenate((raw._data[-1, :].reshape(1, -1), raw._data[:-1, :])) raw._data[0] *= 0 # init the event channel raw.info['chs'] = [raw.info['chs'][-1]] + raw.info['chs'][:-1] # add events raw.add_events(events, 'TRIGGER') # save and close raw.save(fiffile, verbose=False, overwrite=True, fmt='double') print('Saved to', fiffile)
def epochs2mat(data_dir, channel_picks, event_id, tmin, tmax, merge_epochs=False, spfilter=None, spchannels=None): if merge_epochs: # load all raw files in the directory and merge epochs fiflist = [] for data_file in qc.get_file_list(data_dir, fullpath=True): if data_file[-4:] != '.fif': continue fiflist.append(data_file) raw, events = pu.load_multi(fiflist, spfilter=spfilter, spchannels=spchannels) matfile = data_dir + '/epochs_all.mat' save_mat(raw, events, channel_picks, event_id, tmin, tmax, matfile) else: # process individual raw file separately for data_file in qc.get_file_list(data_dir, fullpath=True): if data_file[-4:] != '.fif': continue [base, fname, fext] = qc.parse_path_list(data_file) matfile = '%s/%s-epochs.mat' % (base, fname) raw, events = pu.load_raw(data_file) save_mat(raw, events, channel_picks, event_id, tmin, tmax, matfile) logger.info('Exported to %s' % matfile)
def eeg2fif(filename, interactive=False, outdir=None): """ Brain Products EEG format """ fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir elif outdir[-1] != '/': outdir += '/' eegfile = fdir + fname + '.eeg' matfile = fdir + fname + '.mat' markerfile = fdir + fname + '.vmrk' fiffile = outdir + fname + '.fif' # convert to mat using MATLAB if not os.path.exists(matfile): print('Converting input to mat file') run = "[sig,header]=sload('%s'); save('%s','sig','header');" % (eegfile, matfile) qc.matlab(run) if not os.path.exists(matfile): qc.print_c('>> ERROR: mat file convertion error.', 'r') sys.exit() else: print('MAT file already exists. Skipping conversion.') # extract events events = [] for l in open(markerfile): if 'Stimulus,S' in l: # event, sample_index= l.split(' ')[-1].split(',')[:2] data = l.split(',')[1:3] event = int(data[0][1:]) # ignore 'S' sample_index = int(data[1]) events.append([sample_index, 0, event]) # load data and create fif header mat = scipy.io.loadmat(matfile) # headers= mat['header'] sample_rate = int(mat['header']['SampleRate']) signals = mat['sig'].T # channels x samples nch, t_len = signals.shape ch_names = ['TRIGGER'] + ['CH%d' % (x + 1) for x in range(nch)] ch_info = ['stim'] + ['eeg'] * (nch) info = mne.create_info(ch_names, sample_rate, ch_info, montage='standard_1005') # add event channel eventch = np.zeros([1, signals.shape[1]]) signals = np.concatenate((eventch, signals), axis=0) # create Raw object raw = mne.io.RawArray(signals, info) # add events raw.add_events(events, 'TRIGGER') # save and close raw.save(fiffile, verbose=False, overwrite=True, fmt='double') print('Saved to', fiffile)
def xdf2fif(filename, interactive=False, outdir=None): """ Convert XDF format """ from pyxdf import pyxdf pyxdf.logger.setLevel('WARNING') fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir elif outdir[-1] != '/': outdir += '/' fiffile = outdir + fname + '.fif' # channel x times data = pyxdf.load_xdf(filename) raw_data = data[0][0]['time_series'].T if np.max(raw_data[:-1]) < 1: logger.info('Assuming the signal unit is volate (V). Converting to uV') raw_data[:-1] *= 10**6 signals = np.concatenate((raw_data[-1, :].reshape(1, -1), raw_data[:-1, :])) sample_rate = int(data[0][0]['info']['nominal_srate'][0]) # TODO: check the event channel index and move to the 0-th index # in LSL, usually the name is TRIG or STI 014. ch_names = [] for ch in data[0][0]['info']['desc'][0]['channels'][0]['channel']: ch_names.append(ch['label'][0]) trig_ch_guess = pu.find_event_channel(signals, ch_names) if trig_ch_guess is None: trig_ch_guess = 0 ch_names = ['TRIGGER' ] + ch_names[:trig_ch_guess] + ch_names[trig_ch_guess + 1:] ch_info = ['stim'] + ['eeg'] * (len(ch_names) - 1) # fif header creation info = mne.create_info(ch_names, sample_rate, ch_info) raw = mne.io.RawArray(signals, info) #raw.add_events(events_index, stim_channel='TRIGGER') # save and close raw.save(fiffile, verbose=False, overwrite=True, fmt='double') logger.info('Saved to %s' % fiffile) saveChannels2txt(outdir, ch_names)
def mat2fif(mat_file, sample_rate, data_field, event_field): """ mat_file: Input Matlab file sample_rate: Hz data_field: Name of signal event_field: Name of event """ data = scipy.io.loadmat(MAT_FILE) eeg_data = data[DATA_FIELD] event_data = data[EVENT_FIELD] num_eeg_channels = eeg_data.shape[0] signals = np.concatenate((event_data, eeg_data), axis=0) assert event_data.shape[1] == eeg_data.shape[1] ch_names = ['TRIGGER'] + ['CH%d' % ch for ch in range(num_eeg_channels)] ch_info = ['stim'] + ['eeg'] * num_eeg_channels info = mne.create_info(ch_names, SAMPLE_RATE, ch_info) raw = mne.io.RawArray(signals, info) [basedir, fname, fext] = qc.parse_path_list(MAT_FILE) fifname = '%s/%s.fif' % (basedir, fname) raw.save(fifname, verbose=False, overwrite=True) logger.info('Saved to %s.' % fifname)
def xdf2fif(filename, interactive=False, outdir=None): """ Convert XDF format """ from xdf import xdf fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir elif outdir[-1] != '/': outdir += '/' fiffile = outdir + fname + '.fif' # channel x times data = xdf.load_xdf(filename) raw_data = data[0][0]['time_series'].T signals = np.concatenate((raw_data[-1, :].reshape(1, -1), raw_data[:-1, :])) sample_rate = int(data[0][0]['info']['nominal_srate'][0]) # TODO: check the event channel index and move to the 0-th index # in LSL, usually the name is TRIG or STI 014. ch_names = [] for ch in data[0][0]['info']['desc'][0]['channels'][0]['channel']: ch_names.append(ch['label'][0]) trig_ch_guess = pu.find_event_channel(signals, ch_names) if trig_ch_guess is None: trig_ch_guess = 0 ch_names =[ch_names[trig_ch_guess]] + ch_names[:trig_ch_guess] + ch_names[trig_ch_guess+1:] ch_info = ['stim'] + ['eeg'] * (len(ch_names)-1) # fif header creation info = mne.create_info(ch_names, sample_rate, ch_info) raw = mne.io.RawArray(signals, info) # save and close raw.save(fiffile, verbose=False, overwrite=True, fmt='double') print('Saved to', fiffile)
def pcl2fif(filename, interactive=False, outdir=None, external_event=None, offset=0, overwrite=False, precision='single'): """ PyCNBI Python pickle file Params -------- outdir: If None, it will be the subdirectory of the fif file. external_event: Event file in text format. Each row should be: "SAMPLE_INDEX 0 EVENT_TYPE" precision: Data matrix format. 'single' improves backward compatability. """ fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir + 'fif/' elif outdir[-1] != '/': outdir += '/' data = qc.load_obj(filename) if type(data['signals']) == list: signals_raw = np.array(data['signals'][0]).T # to channels x samples else: signals_raw = data['signals'].T # to channels x samples sample_rate = data['sample_rate'] if 'ch_names' not in data: ch_names = ['CH%d' % (x + 1) for x in range(signals_raw.shape[0])] else: ch_names = data['ch_names'] # search for event channel trig_ch = pu.find_event_channel(signals_raw, ch_names) ''' TODO: REMOVE # exception if trig_ch is None: logger.warning('Inferred event channel is None.') if interactive: logger.warning('If you are sure everything is alright, press Enter.') input() # fix wrong event channel elif trig_ch_guess != trig_ch: logger.warning('Specified event channel (%d) != inferred event channel (%d).' % (trig_ch, trig_ch_guess)) if interactive: input('Press Enter to fix. Event channel will be set to %d.' % trig_ch_guess) ch_names.insert(trig_ch_guess, ch_names.pop(trig_ch)) trig_ch = trig_ch_guess logger.info('New channel list:') for c in ch_names: logger.info('%s' % c) logger.info('Event channel is now set to %d' % trig_ch) ''' # move trigger channel to index 0 if trig_ch is None: # assuming no event channel exists, add a event channel to index 0 for consistency. logger.warning( 'No event channel was not found. Adding a blank event channel to index 0.' ) eventch = np.zeros([1, signals_raw.shape[1]]) signals = np.concatenate((eventch, signals_raw), axis=0) num_eeg_channels = signals_raw.shape[ 0] # data['channels'] is not reliable any more trig_ch = 0 ch_names = ['TRIGGER' ] + ['CH%d' % (x + 1) for x in range(num_eeg_channels)] elif trig_ch == 0: signals = signals_raw num_eeg_channels = data['channels'] - 1 else: # move event channel to 0 logger.info('Moving event channel %d to 0.' % trig_ch) signals = np.concatenate( (signals_raw[[trig_ch]], signals_raw[:trig_ch], signals_raw[trig_ch + 1:]), axis=0) assert signals_raw.shape == signals.shape num_eeg_channels = data['channels'] - 1 ch_names.pop(trig_ch) trig_ch = 0 ch_names.insert(trig_ch, 'TRIGGER') logger.info('New channel list:') for c in ch_names: logger.info('%s' % c) ch_info = ['stim'] + ['eeg'] * num_eeg_channels info = mne.create_info(ch_names, sample_rate, ch_info) # create Raw object raw = mne.io.RawArray(signals, info) raw._times = data['timestamps'] # seems to have no effect if external_event is not None: raw._data[0] = 0 # erase current events events_index = event_timestamps_to_indices(filename, external_event, offset) if len(events_index) == 0: logger.warning('No events were found in the event file') else: logger.info('Found %d events' % len(events_index)) raw.add_events(events_index, stim_channel='TRIGGER') qc.make_dirs(outdir) fiffile = outdir + fname + '.fif' raw.save(fiffile, verbose=False, overwrite=overwrite, fmt=precision) logger.info('Saved to %s' % fiffile) saveChannels2txt(outdir, ch_names) return True
def bdf2fif_matlab(filename, interactive=False, outdir=None): """ BioSemi bdf reader using BioSig toolbox of MATLAB. """ # convert to mat using MATLAB (MNE's edf reader has an offset bug) fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir elif outdir[-1] != '/': outdir += '/' fiffile = outdir + fname + '.fif' matfile = outdir + fname + '.mat' if not os.path.exists(matfile): logger.info('Converting input to mat file') run = "[sig,header]=sload('%s'); save('%s','sig','header');" % ( filename, matfile) qc.matlab(run) if not os.path.exists(matfile): logger.error('mat file convertion error.') sys.exit() mat = scipy.io.loadmat(matfile) os.remove(matfile) sample_rate = int(mat['header']['SampleRate']) nch = mat['sig'].shape[1] # assume Biosemi always has the same number of channels if nch == 73: ch_names = CAP['BIOSEMI_64'] extra_ch = nch - len(CAP['BIOSEMI_64_INFO']) extra_names = [] for ch in range(extra_ch): extra_names.append('EXTRA%d' % ch) ch_names = ch_names + extra_names ch_info = CAP['BIOSEMI_64_INFO'] + ['misc'] * extra_ch else: logger.warning('Unrecognized number of channels (%d)' % nch) logger.warning( 'The last channel will be assumed to be trigger. Press Enter to continue, or Ctrl+C to break.' ) if interactive: input() # Set the trigger to be channel 0 because later we will move it to channel 0. ch_names = ['TRIGGER'] + ['CH%d' % (x + 1) for x in range(nch - 1)] ch_info = ['stim'] + ['eeg'] * (nch - 1) signals_raw = mat['sig'].T # -> channels x samples # Note: Biosig's sload() sometimes returns bogus event values so we use the following for events bdf = mne.io.read_raw_edf(filename, preload=True) events = mne.find_events(bdf, stim_channel='TRIGGER', shortest_event=1, consecutive=True) # signals_raw[-1][:]= bdf._data[-1][:] # overwrite with the correct event values # Move the event channel to 0 (for consistency) signals = np.concatenate( (signals_raw[-1, :].reshape(1, -1), signals_raw[:-1, :])) signals[0] *= 0 # init the event channel info = mne.create_info(ch_names, sample_rate, ch_info, montage='standard_1005') # create Raw object raw = mne.io.RawArray(signals, info) # add events raw.add_events(events, 'TRIGGER') # save and close raw.save(fiffile, verbose=False, overwrite=True, fmt='double') logger.info('Saved to %s' % fiffile) saveChannels2txt(outdir, ch_names)
def gdf2fif(filename, interactive=False, outdir=None, channel_file=None): """ g.Tec gdf format Assumes the last channel is event channel. """ fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir elif outdir[-1] != '/': outdir += '/' fiffile = outdir + fname + '.fif' matfile = fdir + fname + '.mat' convert2mat(fdir + fname + '.gdf', matfile) mat = scipy.io.loadmat(matfile) os.remove(matfile) sample_rate = int(mat['header']['SampleRate']) nch = mat['sig'].shape[1] # read events from header evtype = mat['header']['EVENT'][0][0][0]['TYP'][0] evpos = mat['header']['EVENT'][0][0][0]['POS'][0] events = [] for e in range(evtype.shape[0]): label = int(evtype[e]) events.append([int(evpos[e][0]), 0, label]) signals_raw = mat['sig'].T # -> channels x samples ''' it seems they fixed the bug now # Note: Biosig's sload() sometimes returns bogus event values so we use the following for events raw= mne.io.read_raw_edf(filename, preload=True) events= mne.find_events(raw, stim_channel='TRIGGER', shortest_event=1, consecutive=True) #signals_raw[-1][:]= raw._data[-1][:] # overwrite with the correct event values ''' # Move the event channel to 0 (for consistency) signals = np.concatenate( (signals_raw[-1, :].reshape(1, -1), signals_raw[:-1, :])) signals[0] *= 0 # init the event channel # Note: gdf might have a software event channel if channel_file is None: ch_names = ['TRIGGER'] + ['CH%d' % x for x in range(1, nch)] else: ch_names_raw = [] for l in open(channel_file): ch_names_raw.append(l.strip()) if ch_names_raw[-1] != 'TRIGGER': input( 'Warning: Trigger channel is assumed to be the last channel. Press Ctrl+C if this is not the case.' ) ch_names = ['TRIGGER'] + ch_names_raw[:-1] ch_info = ['stim'] + ['eeg'] * (nch - 1) info = mne.create_info(ch_names, sample_rate, ch_info) # create Raw object raw = mne.io.RawArray(signals, info) # add events raw.add_events(events, 'TRIGGER') # save and close raw.save(fiffile, verbose=False, overwrite=True, fmt='double') logger.info('Saved to %s' % fiffile) saveChannels2txt(outdir, ch_names)
def get_tfr(cfg, recursive=False, n_jobs=1): ''' @params: tfr_type: 'multitaper' or 'morlet' recursive: if True, load raw files in sub-dirs recursively export_path: path to save plots n_jobs: number of cores to run in parallel ''' cfg = check_cfg(cfg) tfr_type = cfg.TFR_TYPE export_path = cfg.EXPORT_PATH t_buffer = cfg.T_BUFFER if tfr_type == 'multitaper': tfr = mne.time_frequency.tfr_multitaper elif tfr_type == 'morlet': tfr = mne.time_frequency.tfr_morlet elif tfr_type == 'butter': butter_order = 4 # TODO: parameterize tfr = lfilter elif tfr_type == 'fir': raise NotImplementedError else: raise ValueError('Wrong TFR type %s' % tfr_type) n_jobs = cfg.N_JOBS if n_jobs is None: n_jobs = mp.cpu_count() if hasattr(cfg, 'DATA_DIRS'): if export_path is None: raise ValueError('For multiple directories, cfg.EXPORT_PATH cannot be None') else: outpath = export_path # custom event file if hasattr(cfg, 'EVENT_FILE') and cfg.EVENT_FILE is not None: events = mne.read_events(cfg.EVENT_FILE) file_prefix = 'grandavg' # load and merge files from all directories flist = [] for ddir in cfg.DATA_DIRS: ddir = ddir.replace('\\', '/') if ddir[-1] != '/': ddir += '/' for f in qc.get_file_list(ddir, fullpath=True, recursive=recursive): if qc.parse_path(f).ext in ['fif', 'bdf', 'gdf']: flist.append(f) raw, events = pu.load_multi(flist) else: print('Loading', cfg.DATA_FILE) raw, events = pu.load_raw(cfg.DATA_FILE) # custom events if hasattr(cfg, 'EVENT_FILE') and cfg.EVENT_FILE is not None: events = mne.read_events(cfg.EVENT_FILE) if export_path is None: [outpath, file_prefix, _] = qc.parse_path_list(cfg.DATA_FILE) else: outpath = export_path # re-referencing if cfg.REREFERENCE is not None: pu.rereference(raw, cfg.REREFERENCE[1], cfg.REREFERENCE[0]) sfreq = raw.info['sfreq'] # set channels of interest picks = pu.channel_names_to_index(raw, cfg.CHANNEL_PICKS) spchannels = pu.channel_names_to_index(raw, cfg.SP_CHANNELS) if max(picks) > len(raw.info['ch_names']): msg = 'ERROR: "picks" has a channel index %d while there are only %d channels.' %\ (max(picks), len(raw.info['ch_names'])) raise RuntimeError(msg) # Apply filters pu.preprocess(raw, spatial=cfg.SP_FILTER, spatial_ch=spchannels, spectral=cfg.TP_FILTER, spectral_ch=picks, notch=cfg.NOTCH_FILTER, notch_ch=picks, multiplier=cfg.MULTIPLIER, n_jobs=n_jobs) # Read epochs classes = {} for t in cfg.TRIGGERS: if t in set(events[:, -1]): if hasattr(cfg, 'tdef'): classes[cfg.tdef.by_value[t]] = t else: classes[str(t)] = t if len(classes) == 0: raise ValueError('No desired event was found from the data.') try: tmin = cfg.EPOCH[0] tmin_buffer = tmin - t_buffer raw_tmax = raw._data.shape[1] / sfreq - 0.1 if cfg.EPOCH[1] is None: if cfg.POWER_AVERAGED: raise ValueError('EPOCH value cannot have None for grand averaged TFR') else: if len(cfg.TRIGGERS) > 1: raise ValueError('If the end time of EPOCH is None, only a single event can be defined.') t_ref = events[np.where(events[:,2] == list(cfg.TRIGGERS)[0])[0][0], 0] / sfreq tmax = raw_tmax - t_ref - t_buffer else: tmax = cfg.EPOCH[1] tmax_buffer = tmax + t_buffer if tmax_buffer > raw_tmax: raise ValueError('Epoch length with buffer (%.3f) is larger than signal length (%.3f)' % (tmax_buffer, raw_tmax)) #print('Epoch tmin = %.1f, tmax = %.1f, raw length = %.1f' % (tmin, tmax, raw_tmax)) epochs_all = mne.Epochs(raw, events, classes, tmin=tmin_buffer, tmax=tmax_buffer, proj=False, picks=picks, baseline=None, preload=True) if epochs_all.drop_log_stats() > 0: print('\n** Bad epochs found. Dropping into a Python shell.') print(epochs_all.drop_log) print('tmin = %.1f, tmax = %.1f, tmin_buffer = %.1f, tmax_buffer = %.1f, raw length = %.1f' % \ (tmin, tmax, tmin_buffer, tmax_buffer, raw._data.shape[1] / sfreq)) print('\nType exit to continue.\n') pdb.set_trace() except: print('\n*** (tfr_export) ERROR OCCURRED WHILE EPOCHING ***') traceback.print_exc() print('tmin = %.1f, tmax = %.1f, tmin_buffer = %.1f, tmax_buffer = %.1f, raw length = %.1f' % \ (tmin, tmax, tmin_buffer, tmax_buffer, raw._data.shape[1] / sfreq)) pdb.set_trace() power = {} for evname in classes: #export_dir = '%s/plot_%s' % (outpath, evname) export_dir = outpath qc.make_dirs(export_dir) print('\n>> Processing %s' % evname) freqs = cfg.FREQ_RANGE # define frequencies of interest n_cycles = freqs / 2. # different number of cycle per frequency if cfg.POWER_AVERAGED: # grand-average TFR epochs = epochs_all[evname][:] if len(epochs) == 0: print('No %s epochs. Skipping.' % evname) continue if tfr_type == 'butter': b, a = butter_bandpass(cfg.FREQ_RANGE[0], cfg.FREQ_RANGE[-1], sfreq, order=butter_order) tfr_filtered = lfilter(b, a, epochs, axis=2) tfr_hilbert = hilbert(tfr_filtered) tfr_power = abs(tfr_hilbert) tfr_data = np.mean(tfr_power, axis=0) elif tfr_type == 'fir': raise NotImplementedError else: power[evname] = tfr(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=False, return_itc=False, decim=1, n_jobs=n_jobs) power[evname] = power[evname].crop(tmin=tmin, tmax=tmax) tfr_data = power[evname].data if cfg.EXPORT_MATLAB is True: # export all channels to MATLAB mout = '%s/%s-%s-%s.mat' % (export_dir, file_prefix, cfg.SP_FILTER, evname) scipy.io.savemat(mout, {'tfr':tfr_data, 'chs':epochs.ch_names, 'events':events, 'sfreq':sfreq, 'epochs':cfg.EPOCH, 'freqs':cfg.FREQ_RANGE}) print('Exported %s' % mout) if cfg.EXPORT_PNG is True: # Inspect power for each channel for ch in np.arange(len(picks)): chname = raw.ch_names[picks[ch]] title = 'Peri-event %s - Channel %s' % (evname, chname) # mode= None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' fig = power[evname].plot([ch], baseline=cfg.BS_TIMES, mode=cfg.BS_MODE, show=False, colorbar=True, title=title, vmin=cfg.VMIN, vmax=cfg.VMAX, dB=False) fout = '%s/%s-%s-%s-%s.png' % (export_dir, file_prefix, cfg.SP_FILTER, evname, chname) fig.savefig(fout) fig.clf() print('Exported to %s' % fout) else: # TFR per event for ep in range(len(epochs_all[evname])): epochs = epochs_all[evname][ep] if len(epochs) == 0: print('No %s epochs. Skipping.' % evname) continue power[evname] = tfr(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=False, return_itc=False, decim=1, n_jobs=n_jobs) power[evname] = power[evname].crop(tmin=tmin, tmax=tmax) if cfg.EXPORT_MATLAB is True: # export all channels to MATLAB mout = '%s/%s-%s-%s-ep%02d.mat' % (export_dir, file_prefix, cfg.SP_FILTER, evname, ep + 1) scipy.io.savemat(mout, {'tfr':power[evname].data, 'chs':power[evname].ch_names, 'events':events, 'sfreq':sfreq, 'tmin':tmin, 'tmax':tmax, 'freqs':cfg.FREQ_RANGE}) print('Exported %s' % mout) if cfg.EXPORT_PNG is True: # Inspect power for each channel for ch in np.arange(len(picks)): chname = raw.ch_names[picks[ch]] title = 'Peri-event %s - Channel %s, Trial %d' % (evname, chname, ep + 1) # mode= None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' fig = power[evname].plot([ch], baseline=cfg.BS_TIMES, mode=cfg.BS_MODE, show=False, colorbar=True, title=title, vmin=cfg.VMIN, vmax=cfg.VMAX, dB=False) fout = '%s/%s-%s-%s-%s-ep%02d.png' % (export_dir, file_prefix, cfg.SP_FILTER, evname, chname, ep + 1) fig.savefig(fout) fig.clf() print('Exported %s' % fout) if hasattr(cfg, 'POWER_DIFF'): export_dir = '%s/diff' % outpath qc.make_dirs(export_dir) labels = classes.keys() df = power[labels[0]] - power[labels[1]] df.data = np.log(np.abs(df.data)) # Inspect power diff for each channel for ch in np.arange(len(picks)): chname = raw.ch_names[picks[ch]] title = 'Peri-event %s-%s - Channel %s' % (labels[0], labels[1], chname) # mode= None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' fig = df.plot([ch], baseline=cfg.BS_TIMES, mode=cfg.BS_MODE, show=False, colorbar=True, title=title, vmin=3.0, vmax=-3.0, dB=False) fout = '%s/%s-%s-diff-%s-%s-%s.jpg' % (export_dir, file_prefix, cfg.SP_FILTER, labels[0], labels[1], chname) print('Exporting to %s' % fout) fig.savefig(fout) fig.clf() print('Finished !')
def raw2psd(rawfile=None, fmin=1, fmax=40, wlen=0.5, wstep=1, tmin=0.0, tmax=None, channel_picks=None, excludes=[], n_jobs=1): """ Compute PSD features over a sliding window on the entire raw file. Leading edge of the window is the time reference, i.e. do not use future data. Input ===== rawfile: fif file. channel_picks: None or list of channel names tmin (sec): start time of the PSD window relative to the event onset. tmax (sec): end time of the PSD window relative to the event onset. None = until the end. fmin (Hz): minimum PSD frequency fmax (Hz): maximum PSD frequency wlen (sec): sliding window length for computing PSD (sec) wstep (int): sliding window step (time samples) excludes (list): list of channels to exclude """ raw, eve = pu.load_raw(rawfile) sfreq = raw.info['sfreq'] wframes = int(round(sfreq * wlen)) raw_eeg = raw.pick_types(meg=False, eeg=True, stim=False, exclude=excludes) if channel_picks is None: rawdata = raw_eeg._data chlist = raw.ch_names else: chlist = [] for ch in channel_picks: chlist.append(raw.ch_names.index(ch)) rawdata = raw_eeg._data[np.array(chlist)] if tmax is None: t_end = rawdata.shape[1] else: t_end = int(round(tmax * sfreq)) t_start = int(round(tmin * sfreq)) + wframes psde = mne.decoding.PSDEstimator(sfreq, fmin=fmin, fmax=fmax, n_jobs=1,\ bandwidth=None, low_bias=True, adaptive=False, normalization='length', verbose=None) print('[PID %d] %s' % (os.getpid(), rawfile)) psd_all = [] evelist = [] times = [] t_len = t_end - t_start last_eve = 0 y_i = 0 t_last = t_start tm = qc.Timer() for t in range(t_start, t_end, wstep): # compute PSD window = rawdata[:, t - wframes:t] psd = psde.transform( window.reshape((1, window.shape[0], window.shape[1]))) psd = psd.reshape(psd.shape[1], psd.shape[2]) psd_all.append(psd) times.append(t) # matching events at the current window if y_i < eve.shape[0] and t >= eve[y_i][0]: last_eve = eve[y_i][2] y_i += 1 evelist.append(last_eve) if tm.sec() >= 1: perc = (t - t_start) / t_len fps = (t - t_last) / wstep est = (t_end - t) / wstep / fps print('[PID %d] %.1f%% (%.1f FPS, %ds left)' % (os.getpid(), perc * 100.0, fps, est)) t_last = t tm.reset() print('Finished.') # export data try: chnames = [raw.ch_names[ch] for ch in chlist] psd_all = np.array(psd_all) [basedir, fname, fext] = qc.parse_path_list(rawfile) fout_header = '%s/psd-%s-header.pkl' % (basedir, fname) fout_psd = '%s/psd-%s-data.npy' % (basedir, fname) header = { 'psdfile': fout_psd, 'times': np.array(times), 'sfreq': sfreq, 'channels': chnames, 'wframes': wframes, 'events': evelist } qc.save_obj(fout_header, header) np.save(fout_psd, psd_all) print('Exported to:\n(header) %s\n(numpy array) %s' % (fout_header, fout_psd)) except: import traceback print('(%s) Unexpected error occurred while exporting data. Dropping you into a shell for recovery.' %\ os.path.basename(__file__)) traceback.print_exc() from IPython import embed embed()
def epochs2psd(rawfile, channel_picks, event_id, tmin, tmax, fmin, fmax, w_len, w_step, excludes=None): """ Compute PSD features over a sliding window in epochs Exported data is 4D: [epochs] x [times] x [channels] x [freqs] Input ===== rawfile: fif-format raw file channel_picks: None or list of channel indices event_id: { label(str) : event_id(int) } tmin: start time of the PSD window relative to the event onset tmax: end time of the PSD window relative to the event onset fmin: minimum PSD frequency fmax: maximum PSD frequency w_len: sliding window length for computing PSD w_step: sliding window step in time samples export: file name to be saved. It can have .mat or .pkl extension. .pkl exports data in pickled Python numpy format. .mat exports data in MATLAB format. """ rawfile = rawfile.replace('\\', '/') raw, events = pu.load_raw(rawfile) sfreq = raw.info['sfreq'] if channel_picks is None: picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude=excludes) else: picks = channel_picks # Epoching epochs = mne.Epochs(raw, events, event_id, tmin=tmin, tmax=tmax, proj=False, picks=picks, baseline=(tmin, tmax), preload=True, add_eeg_ref=False) # from IPython import embed; embed() # Compute psd vectors over a sliding window between tmin and tmax w_len = int(sfreq * w_len) # window length psde = mne.decoding.PSDEstimator(sfreq, fmin=fmin, fmax=fmax, n_jobs=cpu_count(), adaptive=False) epochmat = {e: epochs[e]._data for e in event_id} psdmat = {} for e in event_id: # psd = [epochs] x [windows] x [channels] x [freqs] psd, _ = pu.get_psd(epochs[e], psde, w_len, w_step, flatten=False) psdmat[e] = psd # psdmat[e]= np.mean(psd, 3) # for freq-averaged data = dict(epochs=epochmat, psds=psdmat, tmin=tmin, tmax=tmax, sfreq=epochs.info['sfreq'],\ fmin=fmin, fmax=fmax, w_step=w_step, w_len=w_len, labels=epochs.event_id.keys()) # Export [basedir, fname, fext] = qc.parse_path_list(rawfile) matfile = '%s/psd-%s.mat' % (basedir, fname) pklfile = '%s/psd-%s.pkl' % (basedir, fname) scipy.io.savemat(matfile, data) qc.save_obj(pklfile, data) print('Exported to %s' % matfile) print('Exported to %s' % pklfile)
""" Compute confusion matrix and accuracy from online result logs. Kyuhwa Lee ([email protected]) Swiss Federal Institute of Technology of Lausanne (EPFL) """ LOG_DIR = r'D:\data\MI\rx1\classifier\gait-ULR-250ms' import pycnbi import pycnbi.utils.q_common as qc dtlist = [] gtlist = [] for f in qc.get_file_list(LOG_DIR): [basedir, fname, fext] = qc.parse_path_list(f) if 'online' not in fname or fext != 'txt': continue print(f) for l in open(f): if len(l.strip()) == 0: break gt, dt = l.strip().split(',') gtlist.append(gt) dtlist.append(dt) print('Ground-truth: %s' % ''.join(gtlist)) print('Detected as : %s' % ''.join(dtlist)) cfmat, acc = qc.confusion_matrix(gtlist, dtlist) print('\nAverage accuracy: %.3f' % acc) print(cfmat)
while decoder.is_running() is 0: time.sleep(0.01) # bar visual object if cfg.FEEDBACK_TYPE == 'BAR': from pycnbi.protocols.viz_bars import BarVisual visual = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) elif cfg.FEEDBACK_TYPE == 'BODY': assert hasattr(cfg, 'FEEDBACK_IMAGE_PATH'), 'FEEDBACK_IMAGE_PATH is undefined in your config.' from pycnbi.protocols.viz_human import BodyVisual visual = BodyVisual(cfg.FEEDBACK_IMAGE_PATH, use_glass=cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) visual.put_text('Waiting to start') if cfg.LOG_PROBS: logdir = qc.parse_path_list(cfg.DECODER_FILE)[0] probs_logfile = time.strftime(logdir + "probs-%Y%m%d-%H%M%S.txt", time.localtime()) else: probs_logfile = None feedback = Feedback(cfg, state, visual, tdef, trigger, probs_logfile) # start trial = 1 dir_detected = [] prob_history = {c:[] for c in bar_dirs} while trial <= num_trials: if cfg.SHOW_TRIALS: title_text = 'Trial %d / %d' % (trial, num_trials) else: title_text = 'Ready' true_label = dir_seq[trial - 1]
if rex_dir is not None: bar.move(pred_label, 100, overlay=False, barcolor='B') bar.update() qc.print_c('Executing Rex action %s' % rex_dir, 'W') os.system('%s/Rex/RexControlSimple.exe %s %s' % (pycnbi.ROOT, cfg.REX_COMPORT, rex_dir)) time.sleep(8) if true_label == pred_label: msg = 'Correct' else: msg = 'Wrong' print('Trial %d: %s (%s -> %s)' % (trial, msg, true_label, pred_label)) trial += 1 # write performance fdir, _, _ = qc.parse_path_list(cfg.CLS_MI) logfile = time.strftime(fdir + "/online-%Y%m%d-%H%M%S.txt", time.localtime()) with open(logfile, 'w') as fout: for dt, gt in zip(dir_detected, dir_seq): fout.write('%s,%s\n' % (gt, dt)) cfmat, acc = qc.confusion_matrix(dir_seq, dir_detected) fout.write('\nAccuracy %.3f\nConfusion matrix\n' % acc) fout.write(cfmat) print('\nAccuracy %.3f\nConfusion matrix\n' % acc) print(cfmat) print('Log exported to %s' % logfile) bar.finish() if decoder_UD: decoder_UD.stop()
def load_multi(src, spfilter=None, spchannels=None, multiplier=1): """ Load multiple data files and concatenate them into a single series - Assumes all files have the same sampling rate and channel order. - Event locations are updated accordingly with new offset. @params: src: directory or list of files. spfilter: apply spatial filter while loading. spchannels: list of channel names to apply spatial filter. multiplier: to change units for better numerical stability. See load_raw() for more low-level details. """ if type(src) == str: if not os.path.isdir(src): logger.error('%s is not a directory or does not exist.' % src) raise IOError flist = [] for f in qc.get_file_list(src): if qc.parse_path_list(f)[2] == 'fif': flist.append(f) elif type(src) in [list, tuple]: flist = src else: logger.error('Unknown input type %s' % type(src)) raise TypeError if len(flist) == 0: logger.error('load_multi(): No fif files found in %s.' % src) raise RuntimeError elif len(flist) == 1: return load_raw(flist[0], spfilter=spfilter, spchannels=spchannels, multiplier=multiplier) # load raw files rawlist = [] for f in flist: logger.info('Loading %s' % f) raw, _ = load_raw(f, spfilter=spfilter, spchannels=spchannels, multiplier=multiplier) rawlist.append(raw) # concatenate signals signals = None for raw in rawlist: if signals is None: signals = raw._data else: signals = np.concatenate((signals, raw._data), axis=1) # append samples # create a concatenated raw object and update channel names raw = rawlist[0] trigch = find_event_channel(raw) ch_types = ['eeg'] * len(raw.ch_names) if trigch is not None: ch_types[trigch] = 'stim' info = mne.create_info(raw.ch_names, raw.info['sfreq'], ch_types) raw_merged = mne.io.RawArray(signals, info) # re-calculate event positions events = mne.find_events(raw_merged, stim_channel='TRIGGER', shortest_event=1, uint_cast=True, consecutive='increasing', output='onset', initial_event=True) return raw_merged, events
def config_run(cfg_module): if not (os.path.exists(cfg_module) and os.path.isfile(cfg_module)): raise IOError('%s cannot be loaded.' % os.path.realpath(cfg_module)) cfg = load_cfg(cfg_module) if cfg.FAKE_CLS is None: # chooose amp if cfg.AMP_NAME is None and cfg.AMP_SERIAL is None: amp_name, amp_serial = pu.search_lsl(ignore_markers=True) else: amp_name = cfg.AMP_NAME amp_serial = cfg.AMP_SERIAL fake_dirs = None else: amp_name = None amp_serial = None fake_dirs = [v for (k, v) in cfg.DIRECTIONS] # events and triggers tdef = trigger_def(cfg.TRIGGER_DEF) if cfg.TRIGGER_DEVICE is None: input( '\n** Warning: No trigger device set. Press Ctrl+C to stop or Enter to continue.' ) trigger = pyLptControl.Trigger(cfg.TRIGGER_DEVICE) if trigger.init(50) == False: qc.print_c( '\n** Error connecting to USB2LPT device. Use a mock trigger instead?', 'R') input('Press Ctrl+C to stop or Enter to continue.') trigger = pyLptControl.MockTrigger() trigger.init(50) # init classification decoder = BCIDecoderDaemon(cfg.CLS_MI, buffer_size=1.0, fake=(cfg.FAKE_CLS is not None), amp_name=amp_name, amp_serial=amp_serial, fake_dirs=fake_dirs, parallel=cfg.PARALLEL_DECODING, alpha_new=cfg.PROB_ALPHA_NEW) # OLD: requires trigger values to be always defined #labels = [tdef.by_value[x] for x in decoder.get_labels()] # NEW: events can be mapped into integers: labels = [] dirdata = set([d[1] for d in cfg.DIRECTIONS]) for x in decoder.get_labels(): if x not in dirdata: labels.append(tdef.by_value[x]) else: labels.append(x) # map class labels to bar directions bar_def = {label: str(dir) for dir, label in cfg.DIRECTIONS} bar_dirs = [bar_def[l] for l in labels] dir_seq = [] for x in range(cfg.TRIALS_EACH): dir_seq.extend(bar_dirs) if cfg.TRIALS_RANDOMIZE: random.shuffle(dir_seq) else: dir_seq = [d[0] for d in cfg.DIRECTIONS] * cfg.TRIALS_EACH num_trials = len(dir_seq) qc.print_c('Initializing decoder.', 'W') while decoder.is_running() is 0: time.sleep(0.01) # bar visual object if cfg.FEEDBACK_TYPE == 'BAR': from pycnbi.protocols.viz_bars import BarVisual visual = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) elif cfg.FEEDBACK_TYPE == 'BODY': assert hasattr(cfg, 'IMAGE_PATH'), 'IMAGE_PATH is undefined in your config.' from pycnbi.protocols.viz_human import BodyVisual visual = BodyVisual(cfg.IMAGE_PATH, use_glass=cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) visual.put_text('Waiting to start') if cfg.LOG_PROBS: logdir = qc.parse_path_list(cfg.CLS_MI)[0] probs_logfile = time.strftime(logdir + "probs-%Y%m%d-%H%M%S.txt", time.localtime()) else: probs_logfile = None feedback = Feedback(cfg, visual, tdef, trigger, probs_logfile) # start trial = 1 dir_detected = [] prob_history = {c: [] for c in bar_dirs} while trial <= num_trials: if cfg.SHOW_TRIALS: title_text = 'Trial %d / %d' % (trial, num_trials) else: title_text = 'Ready' true_label = dir_seq[trial - 1] # profiling feedback #import cProfile #pr = cProfile.Profile() #pr.enable() result = feedback.classify(decoder, true_label, title_text, bar_dirs, prob_history=prob_history) #pr.disable() #pr.print_stats(sort='time') if result is None: break else: pred_label = result dir_detected.append(pred_label) if cfg.WITH_REX is True and pred_label == true_label: # if cfg.WITH_REX is True: if pred_label == 'U': rex_dir = 'N' elif pred_label == 'L': rex_dir = 'W' elif pred_label == 'R': rex_dir = 'E' elif pred_label == 'D': rex_dir = 'S' else: qc.print_c( 'Warning: Rex cannot execute undefined action %s' % pred_label, 'W') rex_dir = None if rex_dir is not None: visual.move(pred_label, 100, overlay=False, barcolor='B') visual.update() qc.print_c('Executing Rex action %s' % rex_dir, 'W') os.system('%s/Rex/RexControlSimple.exe %s %s' % (pycnbi.ROOT, cfg.REX_COMPORT, rex_dir)) time.sleep(8) if true_label == pred_label: msg = 'Correct' else: msg = 'Wrong' if cfg.TRIALS_RETRY is False or true_label == pred_label: print('Trial %d: %s (%s -> %s)' % (trial, msg, true_label, pred_label)) trial += 1 if len(dir_detected) > 0: # write performance and log results fdir, _, _ = qc.parse_path_list(cfg.CLS_MI) logfile = time.strftime(fdir + "/online-%Y%m%d-%H%M%S.txt", time.localtime()) with open(logfile, 'w') as fout: fout.write('Ground-truth,Prediction\n') for gt, dt in zip(dir_seq, dir_detected): fout.write('%s,%s\n' % (gt, dt)) cfmat, acc = qc.confusion_matrix(dir_seq, dir_detected) fout.write('\nAccuracy %.3f\nConfusion matrix\n' % acc) fout.write(cfmat) print('Log exported to %s' % logfile) print('\nAccuracy %.3f\nConfusion matrix\n' % acc) print(cfmat) visual.finish() if decoder: decoder.stop() ''' # automatic thresholding if prob_history and len(bar_dirs) == 2: total = sum(len(prob_history[c]) for c in prob_history) fout = open(probs_logfile, 'a') msg = 'Automatic threshold optimization.\n' max_acc = 0 max_bias = 0 for bias in np.arange(-0.99, 1.00, 0.01): corrects = 0 for p in prob_history[bar_dirs[0]]: p_biased = (p + bias) / (bias + 1) # new sum = (p+bias) + (1-p) = bias+1 if p_biased >= 0.5: corrects += 1 for p in prob_history[bar_dirs[1]]: p_biased = (p + bias) / (bias + 1) # new sum = (p+bias) + (1-p) = bias+1 if p_biased < 0.5: corrects += 1 acc = corrects / total msg += '%s%.2f: %.3f\n' % (bar_dirs[0], bias, acc) if acc > max_acc: max_acc = acc max_bias = bias msg += 'Max acc = %.3f at bias %.2f\n' % (max_acc, max_bias) fout.write(msg) fout.close() print(msg) ''' print('Finished.')