def mne_eeg_remove_eyeblinks(raw, channels=None, overwrite=False): if channels is None: channels = ['Fp1', 'Fp2'] icafile = tb.fileparts(raw.filenames[0], '-blink_ica.fif') if not pathlib.Path.is_file(pathlib.Path(icafile)) or overwrite: filt_raw = raw.copy() filt_raw.load_data().filter(l_freq=1., h_freq=None, n_jobs=tb.n_jobs()) ica = mne.preprocessing.ICA(n_components=15, random_state=97) ica.fit(filt_raw) ica.exclude = [] eog_indices = [] for ch in channels: eog_index, eog_scores = ica.find_bads_eog( raw, ch_name=ch, reject_by_annotation=True) eog_indices.extend(eog_index) eog_indices = list(set(eog_indices)) ica.exclude = eog_indices if eog_indices: ica.plot_properties(raw, picks=eog_indices) ica.plot_sources(raw) else: ica = mne.preprocessing.read_ica(icafile) clean_raw = raw.copy().load_data() ica.apply(clean_raw) ica.save(tb.fileparts(raw.filenames[0], '-blink_ica.fif')) return clean_raw
def mne_mark_visual_artifacts(raw, overwrite=False, mark_flat=True, remove_eye_blinks=True, overwrite_ransac=False): filename = tb.fileparts(raw.filenames[0], append='_artifacts.tsv') channel_file = tb.fileparts(raw.filenames[0], '_channels_marked.tsv', -4) event_file = tb.fileparts(raw.filenames[0], '_events_marked.tsv', -4) raw.load_data() raw = mne_annotations_replace_dc(raw) if mark_flat: raw = mne_mark_flat(raw, 'eeg') if remove_eye_blinks: raw = mne_eeg_remove_eyeblinks(raw) raw = mne_ransac_bad_channels(raw, overwrite=overwrite_ransac) if not pathlib.Path.is_file(pathlib.Path(filename)) or overwrite: raw = mne_read_artifacts(raw, filename) raw = mne_eeg_inspect_data_quality(raw, overwrite=True) i = np.flatnonzero( np.char.startswith(raw.annotations.description, 'BAD')) mne_annotations_write_tsv(filename, raw.annotations[i]) mne_annotations_write_tsv(event_file, raw.annotations) chans = pd.read_csv(tb.fileparts(raw.filenames[0], '_channels.tsv', -4), delimiter='\t') chans.loc[chans.name.isin(raw.info['bads']), 'status'] = 'bad' pd.DataFrame.to_csv(chans, channel_file, sep='\t', index=False) else: chans = pd.read_csv(channel_file, delimiter='\t') raw.info['bads'] = list(chans.loc[chans['status'] == 'bad', 'name']) artifacts = mne_annotations_read_tsv(filename) artifacts.orig_time = raw.annotations.orig_time raw.set_annotations(artifacts + raw.annotations) return raw
def mne_apply_artifact_marks(raw): channel_file = tb.fileparts(raw.filenames[0], '_channels_marked.tsv', -4) event_file = tb.fileparts(raw.filenames[0], '_events_marked.tsv', -4) annotations = mne_annotations_read_tsv(event_file) annotations.orig_time = raw.annotations.orig_time raw.set_annotations(raw.annotations + annotations) raw.info['bads'] = mne_bad_channels_from_tsv(channel_file) return raw
def mne_ransac_bad_channels(raw, overwrite=False): bids_chan_file = tb.fileparts(raw.filenames[0], '_channels.tsv', -4) ransacfile = tb.fileparts(raw.filenames[0], '_channels_ransac.tsv') if not pathlib.Path.is_file(pathlib.Path(ransacfile)) or overwrite: epochs = mne_epoch(raw).drop_bad() epochs.load_data() ransac = Ransac(random_state=999) ransac.fit(epochs) raw.info['bads'] = ransac.bad_chs_ chans = pd.read_csv(bids_chan_file, delimiter='\t') chans.loc[chans.name.isin(ransac.bad_chs_), 'status'] = 'bad' pd.DataFrame.to_csv(chans, ransacfile, sep='\t') else: chans = pd.read_csv(ransacfile, delimiter='\t') raw.info['bads'] = list(chans.loc[chans['status'] == 'bad', 'name']) return raw
def bids_write_json_to_participants_tsv(bids_folder, json_file, participant_id=None): if participant_id is None: participant_id = bids_get_participant_id_from_filename(json_file) js = pd.read_json(json_file, typ='series') subs = bids_read_participants_tsv(bids_folder) i = [i for i, x in enumerate(subs.participant_id == participant_id) if x] for k in js.keys(): print(k) print(str(js[k])) if js[k]: subs.loc[i, k] = str(js[k]) subs.to_csv(bids_get_participants_tsv_filename(bids_folder), sep='\t', index=False) fdir, fname, ext = tb.fileparts( str(pathlib.Path(bids_folder, 'participants.json'))) shutil.copyfile(pathlib.Path(fdir, fname + ext), pathlib.Path(fdir, fname + '_backup' + ext)) js = tb.json_read(pathlib.Path(fdir, fname + ext)) for a in subs.keys(): if a not in list(js.keys()): js.update({a: {'Description': a}}) tb.json_write(pathlib.Path(fdir, fname + ext), js) return subs
def BrainSenseTimeDomain_to_bids(filename, subject, bids_folder='/bids', task='BrainSenseTimeDomain'): data = read_file(filename) opath, fname, ext = tb.fileparts(filename) if subject.find('-') > 0: subject = subject[subject.find('-') + 1:] session = data['SessionDate'][:-1].replace('-', '').replace(':', '') basename = make_bids_basename(subject=subject, task=task, session=session) bpath = make_bids_folders(subject=subject, session=session, make_dir=False) sourcename = make_bids_basename(subject=subject, session=session) raw = import_BrainSenseTimeDomain(filename) if raw is not None: rfig = raw.plot(color='k') rfig.savefig( pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata', 'figures', basename + '.png')) if os.path.exists('tmp.edf'): os.remove('tmp.edf') ephys.mne_write_edf(raw, 'tmp.edf') raw = mne.io.read_raw_edf('tmp.edf') write_raw_bids(raw, bids_basename=basename, output_path=bids_folder, overwrite=True) if not os.path.isdir( pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata')): os.makedirs(pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata')) shutil.copyfile( filename, pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata', basename + ext)) plot_wavelet_spectra(pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata', basename + ext), typefield=task) plot_BrainSenseLfp( pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata', basename + ext)) os.remove('tmp.edf') shutil.move( pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata', basename + ext), pathlib.Path(bids_folder, bpath, 'eeg', 'sourcedata', sourcename + ext))
def anonymize(filename): [fdir, fname, ext] = tb.fileparts(filename) copyname = pathlib.Path(fdir, 'Sensitive_' + fname[7:] + ext) shutil.copyfile(filename, copyname) data = read_file(filename) ainfo = [] ainfo.append(data.PatientInformation['Final']['PatientLastName']) ainfo.append(data.PatientInformation['Final']['PatientFirstName']) ainfo.append(data.PatientInformation['Final']['PatientId']) ainfo.append(data.PatientInformation['Final']['PatientDateOfBirth']) ainfo.append(data.PatientInformation['Final']['PatientGender']) ainfo.append( data.DeviceInformation['Final']['NeurostimulatorSerialNumber']) for a in ainfo: tb.replace_txt_in_file(filename, a) os.remove(str(filename) + '.bak') return filename
def plot_LfpFrequencySnapshotEvents(filename): data = read_file(filename) fpath = tb.fileparts(filename) if 'LfpFrequencySnapshotEvents' in data['DiagnosticData']: eventlist = data['DiagnosticData']['LfpFrequencySnapshotEvents'] n = 0 for a in eventlist: if 'LfpFrequencySnapshotEvents' in a.keys(): if n == 0: spectra = pd.DataFrame( index=a['LfpFrequencySnapshotEvents'] ['HemisphereLocationDef.Right']['Frequency']) n += 1 outname = reformat_DateTime( a['DateTime']) + '_' + a['EventName'].replace( ' ', '_') + '_ID' + str(a['EventID']) lfpr = a['LfpFrequencySnapshotEvents'][ 'HemisphereLocationDef.Right'] lfpl = a['LfpFrequencySnapshotEvents'][ 'HemisphereLocationDef.Left'] ch_r = reformat_LfpFrequencySnapshotEvents_channelname( lfpr['SenseID'], 'R') ch_l = reformat_LfpFrequencySnapshotEvents_channelname( lfpl['SenseID'], 'L') pfig = plt.figure(figsize=(5.17, 4.05)) plt.plot(lfpr['Frequency'], lfpr['FFTBinData']) plt.plot(lfpl['Frequency'], lfpl['FFTBinData']) plt.xlabel('Frequency [Hz]') plt.ylabel('Spectral power [uV]') plt.legend([ch_r, ch_l]) plt.title(outname) spectra.loc[:, outname + '_' + ch_r] = lfpr['FFTBinData'] spectra.loc[:, outname + '_' + ch_l] = lfpr['FFTBinData'] if not os.path.isdir(pathlib.Path(fpath[0], 'figures')): os.makedirs(pathlib.Path(fpath[0], 'figures')) pfig.savefig(str( pathlib.Path(fpath[0], 'figures', outname + '_snapshot_' + str(n) + '.png')), dpi=300) spectra.to_csv(pathlib.Path(fpath[0], 'figures', outname + '_snapshot_psd.tsv'), sep='\t')
def plot_BrainSenseLfp(filename): data = read_file(filename) fpath = tb.fileparts(filename) if 'BrainSenseLfp' in data.keys(): chans = list() nses = len(data.BrainSenseLfp) for a in np.arange(0, nses): bs = data['BrainSenseLfp'][a] chans = reformat_BrainSenseLfp_channelname(bs) fs = bs['SampleRateInHz'] dbs = bs['TherapySnapshot'] lfp = bs['LfpData'] df = pd.DataFrame() for n, b in enumerate(lfp): if n == 0: tmin = b['TicksInMs'] df.loc[n, 'Time'] = (b['TicksInMs'] - tmin) / 1000 df.loc[n, 'AbsTime'] = b['TicksInMs'] df.loc[n, chans[1]] = b['Right']['LFP'] / 1000 df.loc[n, chans[0]] = b['Left']['LFP'] / 1000 df.loc[n, 'stimR'] = b['Right']['mA'] df.loc[n, 'stimL'] = b['Left']['mA'] ylegend = [chans[1], chans[0], 'stimR', 'stimL'] df.plot(x='Time', y=ylegend) plt.xlabel('Time [s]') plt.ylabel('LFP + Stimulation amplitude') outname = data['SessionDate'][:-1].replace('-', '').replace(':', '') plt.title(outname) if not os.path.isdir(pathlib.Path(fpath[0], 'figures')): os.makedirs(pathlib.Path(fpath[0], 'figures')) plt.gcf().savefig(str( pathlib.Path(fpath[0], 'figures', outname + '_BrainSenseLfp.png')), dpi=300) df.to_csv(pathlib.Path(fpath[0], 'figures', outname + '_BrainSenseLfp.tsv'), sep='\t') else: print('No BrainSenseLfp in file ' + filename) return None, None
def mne_burst_analysis(raw, freqranges=None, threshold=50, common_threshold=True, min_length=200, method='wavelet', smoothing=200, common_zscore=True): if freqranges is None: freqranges = {'beta': [13, 30]} csvfile = tb.fileparts(raw.filenames[0], '_bursts.tsv') bdf = pd.DataFrame([]) for fband, frange in freqranges.items(): print(fband) if method == 'hilbert': df = raw.copy().filter(l_freq=frange[0], h_freq=frange[1], n_jobs=tb.n_jobs()).apply_hilbert( envelope=True).to_data_frame() for ch in raw.ch_names: df.loc[:, ch] = df[ch].rolling( axis=0, window=int( smoothing / 1000 * raw.info['sfreq'])).mean().astype('float').interpolate( method='linear', limit_direction='both') sfreq = raw.info['sfreq'] info = raw.info elif method == 'wavelet': wav = mne_tf_wavelet(mne_cont_epoch(raw), freqs=np.arange(frange[0], frange[1] + 1), n_freq=smoothing / 1000 * raw.info['sfreq'], zero_mean=False, n_cycles=10) df = pd.DataFrame(np.squeeze(wav.data.mean(axis=2)).transpose(), columns=raw.ch_names) sfreq = wav.info['sfreq'] info = wav.info if common_zscore: df = (df - df.mean()) / df.std() bursts = OrderedDict() if common_threshold: bthresh = np.percentile(df, threshold) for ch in raw.ch_names: bdata = df.loc[:, ch] if not common_threshold: bthresh = np.percentile(bdata, threshold) bursts.update( {ch: rox_burst_duration(bdata, bthresh, sfreq, min_length)}) bdf.loc[ch, fband + '_threshold'] = bthresh if bursts[ch]['n']: if bursts[ch]['n'] > 10: mdl = stats.fitlm_kfold(bursts[ch]['bdur'], bursts[ch]['bamp'], 5) bdf.loc[ch, fband + '_slope'] = np.mean(mdl[1]) else: bdf.loc[ch, fband + '_slope'] = 0 bdf.loc[ch, fband + '_mdur'] = bursts[ch]['bdur'].mean() bdf.loc[ch, fband + '_n'] = bursts[ch]['n'] / raw._last_time bdf.loc[ch, fband + '_mamp'] = bursts[ch]['bamp'].mean() bdf.loc[ch, fband + '_mpow'] = bdata.mean() else: for s in ['_slope', '_mdur', '_n', '_mamp', '_mpow']: bdf.loc[ch, fband + s] = 0 bdf.to_csv(csvfile, sep='\t') burst_settings = { 'threshold[%]': threshold, 'common_threshold': common_threshold, 'common_zscore': common_zscore, 'sfreq[Hz]': sfreq, 'method': method, 'smoothing[ms]': smoothing, 'min_length[ms]': min_length, 'freqranges[Hz]': freqranges } tb.json_write(tb.fileparts(raw.filenames[0], '_bursts.json'), burst_settings) bdf._metadata = burst_settings return ['df', 'setttings', 'info', 'chanwise'], bdf, burst_settings, info, bursts
def mne_eeg_inspect_data_quality(raw, overwrite=False): events, event_id = mne.events_from_annotations(raw) psd_fig = raw.plot_psd(picks='eeg', fmin=2, fmax=40, n_fft=int(3 * raw.info['sfreq']), reject_by_annotation=True) psd_fig.savefig(tb.fileparts(raw.filenames[0], '_raw_psd.png')) decim = int(raw.info['sfreq'] / 100) rawfig = raw.plot(duration=60, block=True, events=events, event_id=event_id, decim=decim, n_channels=32) rawfig.savefig(tb.fileparts(raw.filenames[0], '_raw_marked.png')) ica = mne_plot_ica(raw) ica.plot_sources(raw, block=True) ica.save(tb.fileparts(raw.filenames[0], '_visual_ica.fif')) print('Please check the data quality and oscillations.') assessment = [ 'usable', 'rating', 'central_theta', 'occipital_alpha', 'central_mu', 'frontal_lowbeta', 'frontal_highbeta' ] questions = [ 'Is this file usable for your purpose?', 'Please rate the overall data quality [0-10]', 'What is the central theta [4 - 8] peak frequency? [0 if none]', 'What is the occipital alpha [8 - 12] peak frequency? [0 if none]', 'What is the central mu [8 - 12] peak frequency? [0 if none]', 'What is the frontal low beta [13 - 19] peak frequency? [0 if none]', 'What is the frontal high beta [20 - 35] peak frequency? [0 if none]' ] vals = [] for n, a in enumerate(assessment): answer = input(questions[n]) vals.append(answer) dataquality = dict(zip(assessment, vals)) good_samples = np.ones(raw.n_times) bad = mne_annotations_get_bad(raw.annotations) for n, a in enumerate(bad): good_samples[int(raw.time_as_index(a['onset']) ):int(raw.time_as_index(a['onset'] + a['duration']))] = 0 dataquality.update({ 'good_seconds': np.round(np.sum(good_samples) / raw.info['sfreq'], 2) }) dataquality.update({ 'pct_good_samples': np.round(100 * np.sum(good_samples) / raw.n_times, 2) }) dataquality.update( {'good_channels': raw.info['nchan'] - len(raw.info['bads'])}) dataquality.update({'bad_channels': raw.info['bads']}) tb.json_write(tb.fileparts(raw.filenames[0], '_dataquality.json'), dataquality) plt.close('all') return raw
def plot_wavelet_spectra(filename, typefield='all'): data = read_file(filename) fpath = tb.fileparts(filename) if typefield == 'all': flist = get_TimeDomainFieldNames(filename) else: flist = typefield if isinstance(flist, str): flist = [flist] for f in flist: raw = import_rawdata(filename, typefield=f) if raw is not None: epochs = ephys.mne_cont_epoch(raw) wav = ephys.mne_tf_wavelet(epochs) mpow = wav.data[0, :, :, :].mean(axis=2) rpow, spow = ephys.normalize_spectrum(mpow, wav.freqs) pfig = plt.figure(figsize=(11.17, 4.05)) plt.subplot(1, 2, 1) i = tb.ci('LFPR', wav.ch_names) for a in i: plt.plot(wav.freqs, rpow[a, :].transpose(), linewidth=.5) plt.legend([wav.ch_names[ir] for ir in i]) plt.plot(wav.freqs, rpow[i, :].mean(axis=0), linewidth=2, color='k') plt.ylim((0, 10)) plt.xlabel('Frequency [Hz]') plt.ylabel('Relative spectral power [%]') plt.xlim((0, 45)) plt.title('Right hemisphere') plt.subplot(1, 2, 2) i = tb.ci('LFPL', wav.ch_names) for a in i: plt.plot(wav.freqs, rpow[a, :], linewidth=.5) plt.title(a) plt.legend([wav.ch_names[ir] for ir in i]) plt.plot(wav.freqs, rpow[i, :].mean(axis=0), linewidth=2, color='k') plt.ylim((0, 10)) plt.xlabel('Frequency [Hz]') plt.ylabel('Relative spectral power [%]') plt.xlim((0, 45)) plt.title('Left hemisphere') outname = data['SessionDate'][:-1].replace('-', '').replace( ':', '') + '_' + f plt.suptitle(outname) if not os.path.isdir(pathlib.Path(fpath[0], 'figures')): os.makedirs(pathlib.Path(fpath[0], 'figures')) pfig.savefig(str( pathlib.Path(fpath[0], 'figures', outname + '_rpow.png')), dpi=300) spectra = pd.DataFrame(np.transpose(rpow), index=wav.freqs, columns=wav.ch_names) spectra.to_csv(pathlib.Path(fpath[0], 'figures', outname + '_rpow.tsv'), sep='\t') spectra = pd.DataFrame(np.transpose(spow), index=wav.freqs, columns=wav.ch_names) spectra.to_csv(pathlib.Path(fpath[0], 'figures', outname + '_spow.tsv'), sep='\t') spectra = pd.DataFrame(np.transpose(mpow), index=wav.freqs, columns=wav.ch_names) spectra.to_csv(pathlib.Path(fpath[0], 'figures', outname + '_mpow.tsv'), sep='\t')
def plot_LFPMontage_spectra(filename): data = read_file(filename) fpath = tb.fileparts(filename) if 'LFPMontage' in data.keys(): chans = list() nchans = len(data.LFPMontage) hemisphere = list() for a in np.arange(0, nchans): hemisphere.append( data.LFPMontage[a]['Hemisphere'].split('.')[-1][0]) chans.append(reformat_LFPMontage_channelname(data.LFPMontage[a])) ir, il = tb.ci('R', hemisphere), tb.ci('L', hemisphere) right_chans = [chans[i] for i in ir] left_chans = [chans[i] for i in il] columns = [chans[i] for i in ir + il] spectra = pd.DataFrame(columns=columns, index=data.LFPMontage[0]['LFPFrequency']) spectra.index.name = 'Frequency' spectra.columns.name = 'Channels' pfig = plt.figure(figsize=(11.17, 4.05)) plt.subplot(1, 2, 1) for a in ir: lfp = data.LFPMontage[a] linestyle = ('-', '--') artefact = lfp['ArtifactStatus'].split( '.')[-1] != 'ARTIFACT_NOT_PRESENT' plt.plot(lfp['LFPFrequency'], lfp['LFPMagnitude'], linewidth=0.5, linestyle=linestyle[artefact]) if not artefact: plt.scatter(lfp['PeakFrequencyInHertz'], lfp['PeakMagnitudeInMicroVolt']) spectra[chans[a]] = lfp['LFPMagnitude'] spectra.loc[:, right_chans].mean(axis=1).plot(linewidth=3, color='k') plt.xlabel('Frequency [Hz]') plt.ylabel('Spectral power [uV]') plt.legend(right_chans) plt.title('Right hemisphere') plt.subplot(1, 2, 2) for a in il: lfp = data.LFPMontage[a] linestyle = ('-', '--') artefact = lfp[ 'ArtifactStatus'] == 'ArtifactStatusDef.ARTIFACT_PRESENT' plt.plot(lfp['LFPFrequency'], lfp['LFPMagnitude'], linewidth=0.5, linestyle=linestyle[artefact]) plt.scatter(lfp['PeakFrequencyInHertz'], lfp['PeakMagnitudeInMicroVolt']) spectra[chans[a]] = lfp['LFPMagnitude'] spectra.loc[:, left_chans].mean(axis=1).plot(linewidth=3, color='k') plt.xlabel('Frequency [Hz]') plt.ylabel('Spectral power [uV]') plt.legend(left_chans) plt.title('Left hemisphere') outname = data['SessionDate'][:-1].replace('-', '').replace(':', '') plt.suptitle(outname) if not os.path.isdir(pathlib.Path(fpath[0], 'figures')): os.makedirs(pathlib.Path(fpath[0], 'figures')) pfig.savefig(str( pathlib.Path(fpath[0], 'figures', outname + '_LFPMontage_raw.png')), dpi=300) spectra.to_csv(pathlib.Path(fpath[0], 'figures', outname + '_LFPMontage_raw.tsv'), sep='\t') else: print('No LFPMontage in file ' + filename) return None, None
def bids_get_participant_id_from_filename(filename): fdir, fname, ext = tb.fileparts(filename) dirparts = pathlib.Path(fdir).parts for item in list(dirparts): if item.startswith('sub-'): return item
def bids_backup_participants_tsv(bids_folder): fdir, fname, ext = tb.fileparts( bids_get_participants_tsv_filename(bids_folder)) shutil.copyfile(str(pathlib.Path(fdir, fname + ext)), str(pathlib.Path(fdir, fname + '_backup' + ext)))