def test_simulate_raw_chpi(): """Test simulation of raw data with cHPI.""" raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes') picks = np.arange(len(raw.ch_names)) picks = np.setdiff1d(picks, pick_types(raw.info, meg=True, eeg=True)[::4]) raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks]) raw.info.normalize_proj() sphere = make_sphere_model('auto', 'auto', raw.info) # make sparse spherical source space sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000., ) src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.) stc = _make_stc(raw, src) # simulate data with cHPI on raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False, interp='zero', use_cps=True) # need to trim extra samples off this one raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True, head_pos=pos_fname, interp='zero', use_cps=True) # test cHPI indication hpi_freqs, hpi_pick, hpi_ons = _get_hpi_info(raw.info) assert_allclose(raw_sim[hpi_pick][0], 0.) assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum()) # test that the cHPI signals make some reasonable values picks_meg = pick_types(raw.info, meg=True, eeg=False) picks_eeg = pick_types(raw.info, meg=False, eeg=True) for picks in [picks_meg[:3], picks_eeg[:3]]: psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks) psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks) assert_array_equal(freqs_sim, freqs_chpi) freq_idx = np.sort( [np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs]) if picks is picks_meg: assert_true( (psd_chpi[:, freq_idx] > 100 * psd_sim[:, freq_idx]).all()) else: assert_allclose(psd_sim, psd_chpi, atol=1e-20) # test localization based on cHPI information quats_sim = _calculate_chpi_positions(raw_chpi, t_step_min=10.) quats = read_head_pos(pos_fname) _assert_quats(quats, quats_sim, dist_tol=5e-3, angle_tol=3.5)
def test_psd_welch_average_kwarg(kind): """Test `average` kwarg of psd_welch().""" raw = read_raw_fif(raw_fname) picks_psd = [0, 1] # Populate raw with sinusoids rng = np.random.RandomState(40) data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times) freqs_sig = [8., 50.] for ix, freq in zip(picks_psd, freqs_sig): data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times) first_samp = raw._first_samps[0] raw = RawArray(data, raw.info) tmin, tmax = -0.5, 0.5 fmin, fmax = 0, np.inf n_fft = 256 n_per_seg = 128 n_overlap = 0 event_id = 2 events = read_events(event_fname) events[:, 0] -= first_samp kws = dict(fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, n_fft=n_fft, n_per_seg=n_per_seg, n_overlap=n_overlap, picks=picks_psd) if kind == 'raw': inst = raw elif kind == 'epochs': inst = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd, proj=False, preload=True, baseline=None) elif kind == 'evoked': inst = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd, proj=False, preload=True, baseline=None).average() else: raise ValueError('Unknown parametrization passed to test, check test ' 'for typos.') psds_mean, freqs_mean = psd_welch(inst=inst, average='mean', **kws) psds_median, freqs_median = psd_welch(inst=inst, average='median', **kws) psds_unagg, freqs_unagg = psd_welch(inst=inst, average=None, **kws) # Frequencies should be equal across all "average" types, as we feed in # the exact same data. assert_allclose(freqs_mean, freqs_median) assert_allclose(freqs_mean, freqs_unagg) # For `average=None`, the last dimension contains the un-aggregated # segments. assert psds_mean.shape == psds_median.shape assert psds_mean.shape == psds_unagg.shape[:-1] assert_allclose(psds_mean, psds_unagg.mean(axis=-1)) # Compare with manual median calculation assert_allclose(psds_median, np.median(psds_unagg, axis=-1))
def test_simulate_raw_chpi(): """Test simulation of raw data with cHPI.""" raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes') picks = np.arange(len(raw.ch_names)) picks = np.setdiff1d(picks, pick_types(raw.info, meg=True, eeg=True)[::4]) raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks]) raw.info.normalize_proj() sphere = make_sphere_model('auto', 'auto', raw.info) # make sparse spherical source space sphere_vol = tuple(sphere['r0']) + (sphere.radius, ) src = setup_volume_source_space(sphere=sphere_vol, pos=70., sphere_units='m') stcs = [_make_stc(raw, src)] * 15 # simulate data with cHPI on raw_sim = simulate_raw(raw.info, stcs, None, src, sphere, head_pos=pos_fname, interp='zero', first_samp=raw.first_samp) # need to trim extra samples off this one raw_chpi = add_chpi(raw_sim.copy(), head_pos=pos_fname, interp='zero') # test cHPI indication hpi_freqs, hpi_pick, hpi_ons = get_chpi_info(raw.info, on_missing='raise') assert_allclose(raw_sim[hpi_pick][0], 0.) assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum()) # test that the cHPI signals make some reasonable values picks_meg = pick_types(raw.info, meg=True, eeg=False) picks_eeg = pick_types(raw.info, meg=False, eeg=True) for picks in [picks_meg[:3], picks_eeg[:3]]: psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks) psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks) assert_array_equal(freqs_sim, freqs_chpi) freq_idx = np.sort( [np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs]) if picks is picks_meg: assert (psd_chpi[:, freq_idx] > 100 * psd_sim[:, freq_idx]).all() else: assert_allclose(psd_sim, psd_chpi, atol=1e-20) # test localization based on cHPI information chpi_amplitudes = compute_chpi_amplitudes(raw, t_step_min=10.) coil_locs = compute_chpi_locs(raw.info, chpi_amplitudes) quats_sim = compute_head_pos(raw_chpi.info, coil_locs) quats = read_head_pos(pos_fname) _assert_quats(quats, quats_sim, dist_tol=5e-3, angle_tol=3.5, vel_atol=0.03) # velicity huge because of t_step_min above
def test_simulate_raw_chpi(): """Test simulation of raw data with cHPI""" with warnings.catch_warnings(record=True): # MaxShield raw = Raw(raw_chpi_fname, allow_maxshield=True) sphere = make_sphere_model('auto', 'auto', raw.info) # make sparse spherical source space sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000., ) src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.) stc = _make_stc(raw, src) # simulate data with cHPI on raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False) # need to trim extra samples off this one raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True, head_pos=pos_fname) # test cHPI indication hpi_freqs, _, hpi_pick, hpi_on, _ = _get_hpi_info(raw.info) assert_allclose(raw_sim[hpi_pick][0], 0.) assert_allclose(raw_chpi[hpi_pick][0], hpi_on) # test that the cHPI signals make some reasonable values picks_meg = pick_types(raw.info, meg=True, eeg=False) picks_eeg = pick_types(raw.info, meg=False, eeg=True) for picks in [picks_meg, picks_eeg]: psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks) psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks) assert_array_equal(freqs_sim, freqs_chpi) freq_idx = np.sort( [np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs]) if picks is picks_meg: assert_true( (psd_chpi[:, freq_idx] > 100 * psd_sim[:, freq_idx]).all()) else: assert_allclose(psd_sim, psd_chpi, atol=1e-20) # test localization based on cHPI information trans_sim, rot_sim, t_sim = _calculate_chpi_positions(raw_chpi) trans, rot, t = get_chpi_positions(pos_fname) t -= raw.first_samp / raw.info['sfreq'] _compare_positions((trans, rot, t), (trans_sim, rot_sim, t_sim), max_dist=0.005)
def welch_PSD(epochs_eyes_closed): from mne.time_frequency import psd_welch fig, ax = plt.subplots(1, 1, figsize=(8, 5)) kwargs = dict(fmin=0, fmax=50, n_jobs=4) CTL_psds_welch_mean, freqs_mean = psd_welch(epochs_eyes_closed, average='mean', **kwargs) CTL_psds_welch_mean = 10 * np.log10(CTL_psds_welch_mean) CTL_psds_welch_mean = CTL_psds_welch_mean.mean(0).mean(0) #np.save('PD_PSD_mean_eyesclosed.npy', CTL_psds_welch_mean) PD_psds_welch_mean = np.load('PD_PSD_mean_eyesclosed.npy') ax.plot(freqs_mean, CTL_psds_welch_mean, color='red', ls='-', label='CTL mean of segments') ax.plot(freqs_mean, PD_psds_welch_mean, color='green', ls='-', label='PD mean of segments') y_location = np.round(np.arange(-15, 15, 5), 2) ax.set_yticks(y_location) y_labelname = y_location ax.set_yticklabels(y_labelname) ax.set(title='Welch PSD - Eyes closed', xlabel='Frequncy (Hz)', ylabel='Power Spectral Density (dB)') ax.legend(loc='upper right') plt.show()
def eeg_power_band(epochs): """EEG relative power band feature extraction. This function takes an ``mne.Epochs`` object and creates EEG features based on relative power in specific frequency bands that are compatible with scikit-learn. Parameters ---------- epochs : Epochs The data. Returns ------- X : numpy array of shape [n_samples, 5] Transformed data. """ # specific frequency bands FREQ_BANDS = {"delta": [0.5, 4.5], "theta": [4.5, 8.5], "alpha": [8.5, 11.5], "sigma": [11.5, 15.5], "beta": [15.5, 30]} psds, freqs = psd_welch(epochs, picks='eeg', fmin=0.5, fmax=30.) # Normalize the PSDs psds /= np.sum(psds, axis=-1, keepdims=True) X = [] for fmin, fmax in FREQ_BANDS.values(): psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].mean(axis=-1) X.append(psds_band.reshape(len(psds), -1)) return np.concatenate(X, axis=1)
def _compute_and_save_psd(data_fname, fmin=0, fmax=120, method='welch', is_epoched=False, n_fft=256, n_overlap=0, picks=None, proj=False, n_jobs=1, verbose=None): """Load epochs/raw from file, compute psd and save the result.""" if is_epoched: epochs = read_epochs(data_fname) else: epochs = read_raw_fif(data_fname, preload=True) epochs_meg = epochs.pick_types(meg=True, eeg=False, eog=False, ecg=False) if method == 'welch': from mne.time_frequency import psd_welch psds, freqs = psd_welch(epochs_meg, fmin=fmin, fmax=fmax) elif method == 'multitaper': from mne.time_frequency import psd_multitaper psds, freqs = psd_multitaper(epochs_meg, fmin=fmin, fmax=fmax) else: raise Exception('nonexistent method for psd computation') _get_raw_array(data_fname, save_data=False) psds_fname = _save_psd(data_fname, psds, freqs) _save_psd_img(data_fname, psds, freqs, is_epoched, method) return psds_fname
def test_spectrum(): with tempfile.TemporaryDirectory() as dirpath: sample_folder = mne.datasets.sample.data_path() sample_fname = os.path.join(sample_folder, 'MEG', 'sample', 'sample_audvis_raw.fif') raw = mne.io.read_raw_fif(sample_fname, preload=True) psds, freqs = psd_welch(raw, fmin=1, fmax=40, tmin=1, tmax=10) ch_names = raw.info['ch_names'] name = 'TestSpectrum' cond_name = '1' spectrum_dir = os.path.join(dirpath, 'spectrums') # one meggie-Spectrum can hold many spectrums, thus content is dict-like content = {cond_name: psds} params = {'conditions': [cond_name]} # Create meggie-Spectrum object with spectrum array stored within # and save it to spectrum directory spectrum = Spectrum(name, spectrum_dir, params, content=content, freqs=freqs, info=raw.info) ensure_folders([spectrum_dir]) spectrum.save_content() # Creating meggie-Spectrum object with same name and folder should allow # accessing the saved content loaded_spectrum = Spectrum(name, spectrum_dir, params) assert (list(loaded_spectrum.content.keys())[0] == cond_name)
def _calc_psd_welch(self, raw, **kwargs): """ :param raw: :return: psd,freq """ self._update_from_kwargs(**kwargs) if self.check_dead_channels: self.picks = jb.picks.check_dead_channels(raw, picks=self.picks, verbose=self.verbose) elif not self.picks.shape: self.picks = jb.picks.meg_and_ref_nobads(raw) #self.picks = jb.picks.meg_nobads(raw) self.tmax = self.tmax if self.tmax else raw.times[-1] self.fmax = self.fmax if self.fmax else raw.info.get("sfreq", 1000.0) / 2.0 return psd_welch(raw, picks=self.picks, fmin=self.fmin, fmax=self.fmax, tmin=self.tmin, tmax=self.tmax, n_fft=self.n_fft, n_jobs=self.n_jobs, proj=self.proj)
def _run_fooof(raw, fmin=0.001, fmax=1, tmin=0, tmax=None, n_overlap=200, n_fft=400, peak_width_limits=(0.5, 12.0)): """Prepare data for FOOOF including welch and scaling, then apply.""" from fooof import FOOOF spectra, freqs = psd_welch(raw, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, n_overlap=n_overlap, n_fft=n_fft) # FOOOF doesn't like low frequencies, so multiple by 10. # This is corrected for in the output below. freqs = freqs * 10 # Remember these values are really 0.001 to 1.2 Hz fm = FOOOF(peak_width_limits=peak_width_limits) # And these values are really 0.0001 to 1 Hz freq_range = [0.001, 10] fm.fit(freqs, np.mean(spectra, axis=0), freq_range) return fm
def exec_thread(raw, len_readed): # estrutura para armazenar os valores para cada tipo de eletrodo results = { 'beta': {}, 'alpha': {}, 'gamma': {}, 'theta': {} } # estrutura para armazenar os intervalos de frequencia para cada eletrodo interval_frenquency = { 'beta': { 'begin': 12, 'end': 30 }, 'alpha': { 'begin': 8, 'end': 12 }, 'gamma': { 'begin': 25, 'end': 100 }, 'theta': { 'begin': 5, 'end': 7 } } for i in range(0, int(len_readed/256)): # deslocamento dado um buffer pre-definido a cada 1s psdw, freq = psd_welch(raw, fmin=5, fmax=50, tmin=i, tmax=i+buffer_size, verbose=False) for j in results.keys(): results[j] = max( np.mean( psdw[:,interval_frenquency[j]['begin']:interval_frenquency[j]['end']], axis=1 ) ) if (max(results.values()) == results['alpha']): routine(results)
def compute_and_save_psd(epochs_fname, fmin=0, fmax=120, method='welch', n_fft=256, n_overlap=0, picks=None, proj=False, n_jobs=1, verbose=None): """ Load epochs from file, compute psd and save the result in numpy arrays """ import numpy as np import os from mne import read_epochs epochs = read_epochs(epochs_fname) epochs_meg = epochs.pick_types(meg=True, eeg=False, eog=False, ecg=False) if method == 'welch': from mne.time_frequency import psd_welch psds, freqs = psd_welch(epochs_meg) elif method == 'multitaper': from mne.time_frequency import psd_multitaper psds, freqs = psd_multitaper(epochs_meg) else: raise Exception('nonexistent method for psd computation') path, name = os.path.split(epochs_fname) base, ext = os.path.splitext(name) psds_fname = base + '-psds.npz' # freqs_fname = base + '-freqs.npy' psds_fname = os.path.abspath(psds_fname) # print(psds.shape) np.savez(psds_fname, psds=psds, freqs=freqs) # np.save(freqs_file, freqs) return psds_fname
def compute_psd_from_epochs(epochs: Epochs) -> Tuple[np.ndarray, np.ndarray]: """ Computes power spectral density (PSD) from Epochs instance using MNE. Returns the power spectral densities (psds) in a shape of (n_epochs, n_channels, n_freqs) and the frequencies (freqs) in a shape of (n_freqs,). Args: Epochs epochs: Epochs instance to be used for PSD estimation :return: np.ndarray psds, np.ndarray freqs See Also -------- mne.time_frequency.psd_welch : Computation of PSD using Welch's method. mne.time_frequency.psd_multitaper : Computation of PSD using multitapers. """ epoch_length = epochs.get_data().shape[-1] if METHOD == 'welch': psds, freqs = psd_welch(epochs, fmin=F_MIN, fmax=F_MAX, n_fft=epoch_length) elif METHOD == 'multitaper': psds, freqs = psd_multitaper(epochs, fmin=F_MIN, fmax=F_MAX) else: logging.error('Not a valid method for computing PSD, ' 'valid methods are: welch, multitaper.') raise return psds, freqs
def feature_extract(self, method=None, plot=True): ''' :param method: 特征提取方法,目前仅支持PSD方法,后续会加入时域特征,频域特征,熵,以及组合特征。 由于目前结果已经很好了,就先这样吧。 :param plot: 是否对特征进行可视化。 :return: ''' self.all_feature, self.frequencies = psd_welch(self.epochs_filter, fmin=self.fmin, fmax=self.fmax, verbose=False, n_per_seg=128) self.all_feature = self.all_feature.reshape(self.all_feature.shape[0], -1) # 将各个通道的脑电特征拉平为一维 # self.all_feature = np.mean(self.all_feature, axis=1) # 对通道这个维度求平均,降维 if plot == True: for label in np.unique(self.all_label): _, ax = plt.subplots(1, 1) ax.set_title(list(self.trial_list.keys())[label]) # plt.show() self.epochs_filter.copy().drop(indices=(self.all_label != label)).\ plot_psd(dB=False, fmin=0.5, fmax=32., color='blue', ax=ax)
def get_psds_from_epochs(epochs): """Extracts power spectrum densities from epochs Returns -------- psds with associated frequencies calculated with the welch method. """ psds, freqs = psd_welch(epochs, fmin=0.5, fmax=30., verbose=False) return psds, freqs
def test_simulate_raw_chpi(): """Test simulation of raw data with cHPI.""" raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes') picks = np.arange(len(raw.ch_names)) picks = np.setdiff1d(picks, pick_types(raw.info, meg=True, eeg=True)[::4]) raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks]) raw.info.normalize_proj() sphere = make_sphere_model('auto', 'auto', raw.info) # make sparse spherical source space sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,) src = setup_volume_source_space(sphere=sphere_vol, pos=70.) stc = _make_stc(raw, src) # simulate data with cHPI on with pytest.deprecated_call(): raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, head_pos=pos_fname, interp='zero') # need to trim extra samples off this one with pytest.deprecated_call(): raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True, head_pos=pos_fname, interp='zero') # test cHPI indication hpi_freqs, hpi_pick, hpi_ons = _get_hpi_info(raw.info) assert_allclose(raw_sim[hpi_pick][0], 0.) assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum()) # test that the cHPI signals make some reasonable values picks_meg = pick_types(raw.info, meg=True, eeg=False) picks_eeg = pick_types(raw.info, meg=False, eeg=True) for picks in [picks_meg[:3], picks_eeg[:3]]: psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks) psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks) assert_array_equal(freqs_sim, freqs_chpi) freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs]) if picks is picks_meg: assert (psd_chpi[:, freq_idx] > 100 * psd_sim[:, freq_idx]).all() else: assert_allclose(psd_sim, psd_chpi, atol=1e-20) # test localization based on cHPI information quats_sim = _calculate_chpi_positions(raw_chpi, t_step_min=10.) quats = read_head_pos(pos_fname) _assert_quats(quats, quats_sim, dist_tol=5e-3, angle_tol=3.5)
def get_avg_psd(raw): events = mne.find_events(raw, verbose=False) epochs = mne.epochs.Epochs(raw, events=events, event_id=event_id, tmin=1, tmax=5, baseline=None, preload=True, verbose=False, picks=[0, 1, 2, 3, 4, 5, 6, 7]) psd1, freq = psd_welch(epochs['13'], n_fft=1028, n_per_seg=256 * 3, verbose=0) psd2, _ = psd_welch(epochs['17'], n_fft=1028, n_per_seg=256 * 3, verbose=0) psd3, _ = psd_welch(epochs['21'], n_fft=1028, n_per_seg=256 * 3, verbose=0) psd4, _ = psd_welch(epochs['rest'], n_fft=1028, n_per_seg=256 * 3, verbose=0) psd1_mean = (10 * np.log10(psd1)).mean(0) psd2_mean = (10 * np.log10(psd2)).mean(0) psd3_mean = (10 * np.log10(psd3)).mean(0) psd4_mean = (10 * np.log10(psd4)).mean(0) return freq, psd1_mean, psd2_mean, psd3_mean, psd4_mean
def segments_freq(eeg, win_len=2., win_step=0.5, n_fft=None, n_overlap=None, picks=None, progress=True): from mne.io import _BaseRaw from mne.epochs import _BaseEpochs from mne.utils import _get_inst_data from mne.time_frequency import psd_welch sfreq = eeg.info['sfreq'] t_min = eeg.times[0] time_length = len(eeg.times) / eeg.info['sfreq'] n_win = int(np.floor((time_length - win_len) / win_step) + 1.) win_samples = int(np.floor(win_len * sfreq)) # check and set n_fft and n_overlap if n_fft is None: n_fft = int(np.floor(sfreq)) if n_overlap is None: n_overlap = int(np.floor(n_fft / 4.)) if n_fft > win_samples: n_fft = win_samples n_overlap = 0 if picks is None: picks = range(_get_inst_data(eeg).shape[-2]) n_freqs = int(np.floor(n_fft / 2)) + 1 if isinstance(eeg, _BaseRaw): n_channels, _ = _get_inst_data(eeg).shape psd = np.zeros((n_win, len(picks), n_freqs)) elif isinstance(eeg, _BaseEpochs): n_epochs, n_channels, _ = _get_inst_data(eeg).shape psd = np.zeros((n_win, n_epochs, len(picks), n_freqs)) else: raise TypeError('unsupported data type - has to be epochs or ' 'raw, got {}.'.format(type(eeg))) # BTW: doing this with n_jobs=2 is about 100 times slower than with one job p_bar = progressbar.ProgressBar(max_value=n_win) for w in range(n_win): psd_temp, freqs = psd_welch(eeg, tmin=t_min + w * win_step, tmax=t_min + w * win_step + win_len, n_fft=n_fft, n_overlap=n_overlap, n_jobs=1, picks=picks, verbose=False, proj=True) psd[w, :] = psd_temp if progress: p_bar.update(w) return psd.swapaxes(0, 1), freqs
def welch_PSD(epochs_eyes_open, subject, participant): kwargs = dict(fmin=0, fmax=50, n_jobs=4) psds_welch_mean, freqs_mean = psd_welch(epochs_eyes_open, **kwargs) psds_welch_mean = 10 * np.log10(psds_welch_mean) psds_welch_mean = psds_welch_mean.mean(0).mean(0) psd_mean_subjs = f'/home/senthil/caesar/MNE/scripts/data/{subject}_{participant}_PSD_mean_EO.npy' freq_subjs = f'/home/senthil/caesar/MNE/scripts/data/{subject}_{participant}_freq_EO.npy' np.save(psd_mean_subjs, psds_welch_mean) np.save(freq_subjs, freqs_mean) return (psd_mean_subjs, freq_subjs)
def test_simulate_raw_chpi(): """Test simulation of raw data with cHPI""" with warnings.catch_warnings(record=True): # MaxShield raw = Raw(raw_chpi_fname, allow_maxshield=True) sphere = make_sphere_model('auto', 'auto', raw.info) # make sparse spherical source space sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,) src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.) stc = _make_stc(raw, src) # simulate data with cHPI on raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False) # need to trim extra samples off this one raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True, head_pos=pos_fname) # test cHPI indication hpi_freqs, _, hpi_pick, hpi_ons = _get_hpi_info(raw.info)[:4] assert_allclose(raw_sim[hpi_pick][0], 0.) assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum()) # test that the cHPI signals make some reasonable values picks_meg = pick_types(raw.info, meg=True, eeg=False) picks_eeg = pick_types(raw.info, meg=False, eeg=True) for picks in [picks_meg, picks_eeg]: psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks) psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks) assert_array_equal(freqs_sim, freqs_chpi) freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs]) if picks is picks_meg: assert_true((psd_chpi[:, freq_idx] > 100 * psd_sim[:, freq_idx]).all()) else: assert_allclose(psd_sim, psd_chpi, atol=1e-20) # test localization based on cHPI information quats_sim = _calculate_chpi_positions(raw_chpi) trans_sim, rot_sim, t_sim = head_pos_to_trans_rot_t(quats_sim) trans, rot, t = head_pos_to_trans_rot_t(read_head_pos(pos_fname)) t -= raw.first_samp / raw.info['sfreq'] _compare_positions((trans, rot, t), (trans_sim, rot_sim, t_sim), max_dist=0.005)
def PSD(epochs, fmin, fmax, time_resolved): """ Computes the Power Spectral Density (PSD) on Epochs for a condition. Arguments: epochs: Epochs for a condition, for a subject (can result from the concatenation of epochs from different occurences of the condition across experiments). Epochs are MNE objects (data are stored in arrays of shape (n_epochs, n_channels, n_times) and info are into a dictionnary. fmin, fmax: minimum and maximum frequencies-of-interest for power spectral density calculation, floats in Hz. time_resolved: whether to collapse the time course, boolean. If False, PSD won't be averaged over epochs the time course is maintained. If True, PSD values are averaged over epochs. Note: The function can be iterated on the group and/or on conditions: for epochs in epochs['epochs_%s_%s_%s' % (subj, group, cond_name)], you can then visualize PSD distribution on the group with the toolbox vizualisation to check normality for statistics for example. Returns: freqs_mean: list of frequencies in frequency-band-of-interest actually used for power spectral density calculation. PSD_welch: PSD value in epochs for each channel and each frequency, ndarray (n_epochs, n_channels, n_frequencies). Note that if time_resolved == True, PSD values are averaged across epochs. """ # dropping EOG channels (incompatible with connectivity map model in stats) for ch in epochs.info['chs']: if ch['kind'] == 202: # FIFFV_EOG_CH epochs.drop_channels([ch['ch_name']]) # computing power spectral density on epochs signal # average in the 1second window around event (mean but can choose 'median') kwargs = dict(fmin=fmin, fmax=fmax, n_jobs=1) psds_welch, freqs_mean = psd_welch(epochs, **kwargs, average='mean', picks='all') # or median if time_resolved is True: # averaging power across epochs for each ch and each f PSD_welch = np.mean(psds_welch, axis=0) else: PSD_welch = psds_welch PSDTuple = namedtuple('PSD', ['freqs_mean', 'PSD_welch']) return PSDTuple(freqs_mean=freqs_mean, PSD_welch=PSD_welch)
def welch_PSD(epochs_eyes_closed, subject, participant): kwargs = dict(fmin=0, fmax=50, n_jobs=4) PD_psds_welch_mean, freqs_mean = psd_welch(epochs_eyes_closed, average='mean', **kwargs) PD_psds_welch_mean = 10 * np.log10(PD_psds_welch_mean) PD_psds_welch_mean = PD_psds_welch_mean.mean(0).mean(0) psd_mean_subjs = f'/Users/senthilp/Desktop/mne_tutorial/scripts/data/{subject}_{participant}_PSD_mean_EC.npy' freq_subjs = f'/Users/senthilp/Desktop/mne_tutorial/scripts/data/{subject}_{participant}_freq_EC.npy' np.save(psd_mean_subjs, PD_psds_welch_mean) np.save(freq_subjs, freqs_mean) return (psd_mean_subjs, freq_subjs)
def test_compares_psd(): """Test PSD estimation on raw for plt.psd and scipy.signal.welch """ raw = io.Raw(raw_fname) exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, exclude=exclude)[:2] tmin, tmax = 0, 10 # use the first 60s of data fmin, fmax = 2, 70 # look at frequencies between 5 and 70Hz n_fft = 2048 # Compute psds with the new implementation using Welch psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, proj=False, picks=picks, n_fft=n_fft, n_jobs=1) # Compute psds with plt.psd start, stop = raw.time_as_index([tmin, tmax]) data, times = raw[picks, start:(stop + 1)] from matplotlib.pyplot import psd out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data] freqs_mpl = out[0][1] psds_mpl = np.array([o[0] for o in out]) mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax) freqs_mpl = freqs_mpl[mask] psds_mpl = psds_mpl[:, mask] assert_array_almost_equal(psds_welch, psds_mpl) assert_array_almost_equal(freqs_welch, freqs_mpl) assert_true(psds_welch.shape == (len(picks), len(freqs_welch))) assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl))) assert_true(np.sum(freqs_welch < 0) == 0) assert_true(np.sum(freqs_mpl < 0) == 0) assert_true(np.sum(psds_welch < 0) == 0) assert_true(np.sum(psds_mpl < 0) == 0)
def create_animation(epoch_arr, xy_pts, im): psds, freqs = psd_welch(epoch_arr, 32, 100, verbose=True) electrode_averages = [np.mean(psd, 1) for psd in psds] mean_average = np.mean(electrode_averages) stdev_average = np.std(electrode_averages) electrode_averages -= mean_average electrode_averages /= stdev_average map = cm.ScalarMappable(Normalize(-1, 1)) colors = [map.to_rgba(x) for x in electrode_averages] times_corr = np.load('corr_arr.npy') ColorAnimator(xy_pts, colors, times_corr[0], times_corr[1], im).create_animation( os.path.join(os.getcwd(), 'new_brain_anim.mp4'))
def test_simulate_raw_chpi(): """Test simulation of raw data with cHPI.""" raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes') sphere = make_sphere_model('auto', 'auto', raw.info) # make sparse spherical source space sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,) src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.) stc = _make_stc(raw, src) # simulate data with cHPI on raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False) # need to trim extra samples off this one raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True, head_pos=pos_fname) # test cHPI indication hpi_freqs, _, hpi_pick, hpi_ons = _get_hpi_info(raw.info)[:4] assert_allclose(raw_sim[hpi_pick][0], 0.) assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum()) # test that the cHPI signals make some reasonable values picks_meg = pick_types(raw.info, meg=True, eeg=False) picks_eeg = pick_types(raw.info, meg=False, eeg=True) for picks in [picks_meg, picks_eeg]: psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks) psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks) assert_array_equal(freqs_sim, freqs_chpi) freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs]) if picks is picks_meg: assert_true((psd_chpi[:, freq_idx] > 100 * psd_sim[:, freq_idx]).all()) else: assert_allclose(psd_sim, psd_chpi, atol=1e-20) # test localization based on cHPI information quats_sim = _calculate_chpi_positions(raw_chpi) quats = read_head_pos(pos_fname) _assert_quats(quats, quats_sim, dist_tol=0.006, angle_tol=4)
def poweeg(data, tmin, tmax): """Extract Power Spectrum from EEG data and timing.""" psds, freqs = psd_welch(data, n_fft=1024, n_overlap=512, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, proj=False, picks=picks, n_jobs=1, verbose=False) return psds, freqs
def plot_psd_welch(epochs): kwargs = dict(fmin=2, fmax=50, n_jobs=1, n_per_seg=10) psds_welch_mean, freqs_mean = psd_welch(epochs, average='mean', **kwargs) psds_welch_median, freqs_median = psd_welch(epochs, average='median', **kwargs) psds_welch_mean = 10 * np.log10(psds_welch_mean) psds_welch_median = 10 * np.log10(psds_welch_median) ch_name = '0' ch_idx = epochs.info['ch_names'].index(ch_name) epo_idx = 0 _, ax = plt.subplots() ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k', ls='-', label='mean of segments') ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k', ls='--', label='median of segments') ax.set(title='Welch PSD ({}, Epoch {})'.format(ch_name, epo_idx), xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)') ax.legend(loc='upper right') plt.show()
def get_spectrum(trigger): intervals = [] for idx, (time, start, id_) in enumerate(events): if id_ == trigger: start = (time - raw.first_samp) / raw.info['sfreq'] end = (events[idx+1][0] - raw.first_samp) / raw.info['sfreq'] intervals.append((start, end)) spectra = [psd_welch(raw, tmin=ival[0], tmax=ival[1], n_jobs=3, fmin=1, fmax=40, n_fft=2048) for ival in intervals] freqs = spectra[0][1] return freqs, np.average([spectrum[0] for spectrum in spectra], axis=0)
def generate(self): psds, freqs = psd_welch( self.epochs, fmin=self.spectrogram_min_freq, fmax=self.spectrogram_max_freq, verbose=False, ) psds_db = self._convert_amplitudes_to_decibel(psds) spectrogram = {'frequencies': freqs.tolist()} for index, eeg_channel in enumerate(EEG_CHANNELS): spectrogram[eeg_channel.lower()] = psds_db[:, index, :].tolist() return spectrogram
def calculate_power(data, pick_list): psds_mean_array = np.empty([len(data), 40]) psds_std_array = np.empty([len(data), 40]) for i in range(len(data)): data[i] = data[i].pick(pick_list) psds, freqs = psd_welch(data[i], fmin=1, fmax=40, n_per_seg=100) # psds = 10. * np.log10(psds) psds_mean = psds.mean(0).mean(0) psds_std = psds.mean(0).std(0) # psds_std = psds.mean(0).std(0) psds_mean_array[i] = psds_mean psds_std_array[i] = psds_std # psds_std_array.append(psds_std) return psds_mean_array, psds_std_array, freqs
def segments_freq(eeg, win_len=2., win_step=0.5, n_fft=None, n_overlap=None, picks=None, progress=True): from mne.io import _BaseRaw from mne.epochs import _BaseEpochs from mne.utils import _get_inst_data from mne.time_frequency import psd_welch sfreq = eeg.info['sfreq'] t_min = eeg.times[0] time_length = len(eeg.times) / eeg.info['sfreq'] n_win = int(np.floor((time_length - win_len) / win_step) + 1.) win_samples = int(np.floor(win_len * sfreq)) # check and set n_fft and n_overlap if n_fft is None: n_fft = int(np.floor(sfreq)) if n_overlap is None: n_overlap = int(np.floor(n_fft / 4.)) if n_fft > win_samples: n_fft = win_samples n_overlap = 0 if picks is None: picks = range(_get_inst_data(eeg).shape[-2]) n_freqs = int(np.floor(n_fft / 2)) + 1 if isinstance(eeg, _BaseRaw): n_channels, _ = _get_inst_data(eeg).shape psd = np.zeros((n_win, len(picks), n_freqs)) elif isinstance(eeg, _BaseEpochs): n_epochs, n_channels, _ = _get_inst_data(eeg).shape psd = np.zeros((n_win, n_epochs, len(picks), n_freqs)) else: raise TypeError('unsupported data type - has to be epochs or ' 'raw, got {}.'.format(type(eeg))) # BTW: doing this with n_jobs=2 is about 100 times slower than with one job p_bar = progressbar.ProgressBar(max_value=n_win) for w in range(n_win): psd_temp, freqs = psd_welch( eeg, tmin=t_min + w * win_step, tmax=t_min + w * win_step + win_len, n_fft=n_fft, n_overlap=n_overlap, n_jobs=1, picks=picks, verbose=False, proj=True) psd[w, :] = psd_temp if progress: p_bar.update(w) return psd.swapaxes(0, 1), freqs
def psd(sepochs=epochs): from mne.time_frequency import psd_array_welch from mne.time_frequency import psd_welch psds, freqs = psd_welch(epochs, fmin=1, fmax=40, n_fft=128, n_overlap=.5, n_per_seg=128, proj=False, n_jobs=1, reject_by_annotation=True, average='mean', verbose=None) psds /= np.sum(psds, axis=-1, keepdims=True) psds_rs = psds.reshape(len(psds), -1)
def welch(self): if self.data is not None: psds, freqs = psd_welch(self.data, fmin=self.fmin, fmax=self.fmax, tmin=self.tmin, tmax=self.tmax, n_fft=256, n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1, reject_by_annotation=True, verbose=None) self.welch_result = (psds, freqs)
def extract(self, raw): feature = [] for band in self.bands: min_freq, max_freq = self.get_freq_range(band) psd, _ = psd_welch(raw, fmin=min_freq, fmax=max_freq, verbose=False) band_all_channels = np.average(psd, axis=1) if self.average: feature.append([np.average(band_all_channels)]) else: feature.append(band_all_channels) if self.standard_scaler: return StandardScaler().fit_transform(np.array(feature)).flatten() return feature
def compute_peaks(epochs, condition, chs, FREQ_BANDS, tmin=None, tmax=None, precision=0.125, sf=1000): import functools #epochs are cropped as desire (tmin could be before '0', ex: -1.5, depending on the values used during epoching) epochs = epochs.apply_baseline((-1.5, -0.1)) epochs = epochs.crop(tmin, tmax) print(epochs.get_data().shape) EOG_chs = ['E1', 'E8', 'E25', 'E32', 'E126', 'E127'] Unwanted = [ 'E43', 'E48', 'E49', 'E128', 'E113', 'E120', 'E125', 'E119', 'E129' ] All_chs = epochs.info['ch_names'][0:129] EEG_chs = [ele for ele in All_chs if ele not in Unwanted] EEG_chs = [ele for ele in EEG_chs if ele not in EOG_chs] #Find number of samples n_samples = len(epochs.get_data()[0][0]) - 1 print(n_samples) precision = precision / (sf / n_samples) fft_size = int(n_samples / precision) print(fft_size) evoked = epochs[condition].average(chs) FREQs = [] #This loop iterates for each epoch for min, max in FREQ_BANDS: #psds, freqs = function(epochs[t], fmin=min, fmax=max, bandwidth = 4, picks = EEG_chs) #PSDs are calculated with this function, giving power values and corresponding frequency bins as output psds, freqs = psd_welch(evoked, fmin=min, fmax=max, n_fft=fft_size) psds = 10. * np.log10( psds ) #PSDs values are transformed in log scale to compensate for the 1/f natural slope index_max = np.argmax(np.array(psds[13][:])) freq = freqs[index_max] print(index_max) #psds_mean = np.average(psds, axis=1) #Average across bins to obtain one value for the entire frequency range FREQs.append(freq) FREQs = np.array(FREQs) return FREQs
def psd_mne(ML,eeg_data_label): ''' ''' from mne.time_frequency import psd_welch from ml_eeg.eeg_prepro import mne_eeg fs = ML['fs'] raw_data, raw_label = mne_eeg.creat_mne_object(eeg_data_label,fs) # label psd_label = pd.DataFrame(np.array(raw_label),columns = ['Condition']) # psd data fmin = ML['locutoff'] fmax = ML['hicutoff'] n_fft = fs / ML['f_resolution'] psd, freqs = psd_welch(raw_data, \ fmin=fmin, fmax=fmax, n_fft=n_fft, n_overlap=25, picks=None) psd_freqs = {'psd_freqs':freqs}; ML.update(psd_freqs) # 更新 ML def combine_data_label(data,label,freqs): data = pd.DataFrame(psd) # label = psd_label column = [] for i in freqs: # max(freqs) < 100 if i < 10: column.append('psd_0' + str(i)) else: column.append('psd_' + str(i)) data.columns = list(column) feature_data_label = label.join(data,how='outer') return feature_data_label # data and label feature_data_label = combine_data_label(psd,psd_label,freqs) if ML['PSD_log']: # 后面特征无量纲化时有对数转换,此处不需要了 psds = 20 * np.log10(psd) # scale to dB feature_data_label = combine_data_label(psds,psd_label,freqs) return feature_data_label
def test_compares_psd(): """Test PSD estimation on raw for plt.psd and scipy.signal.welch """ raw = io.read_raw_fif(raw_fname) exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, exclude=exclude)[:2] tmin, tmax = 0, 10 # use the first 60s of data fmin, fmax = 2, 70 # look at frequencies between 5 and 70Hz n_fft = 2048 # Compute psds with the new implementation using Welch psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, proj=False, picks=picks, n_fft=n_fft, n_jobs=1) # Compute psds with plt.psd start, stop = raw.time_as_index([tmin, tmax]) data, times = raw[picks, start:(stop + 1)] from matplotlib.pyplot import psd out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data] freqs_mpl = out[0][1] psds_mpl = np.array([o[0] for o in out]) mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax) freqs_mpl = freqs_mpl[mask] psds_mpl = psds_mpl[:, mask] assert_array_almost_equal(psds_welch, psds_mpl) assert_array_almost_equal(freqs_welch, freqs_mpl) assert_true(psds_welch.shape == (len(picks), len(freqs_welch))) assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl))) assert_true(np.sum(freqs_welch < 0) == 0) assert_true(np.sum(freqs_mpl < 0) == 0) assert_true(np.sum(psds_welch < 0) == 0) assert_true(np.sum(psds_mpl < 0) == 0)
def eeg_power_band(epochs): # 特征提取,提取相应的几个频带作为特征 FREQ_BANDS = {"delta": [0.5, 4.5], "theta": [4.5, 8.5], "alpha": [8.5, 11.5], "sigma": [11.5, 15.5], "beta": [15.5, 30]} psds, freqs = psd_welch(epochs, picks='eeg', fmin=0.5, fmax=30.) # 归化PSDs psds /= np.sum(psds, axis=-1, keepdims=True) X = [] for fmin, fmax in FREQ_BANDS.values(): psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].mean(axis=-1) X.append(psds_band.reshape(len(psds), -1)) return np.concatenate(X, axis=1)
from mne.time_frequency import psd_welch from mne.datasets import sample print(__doc__) data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' raw = io.read_raw_fif(raw_fname, preload=True) raw.filter(1, 20, fir_design='firwin') picks = mne.pick_types(raw.info, meg=True, exclude=[]) tmin, tmax = 0, 120 # use the first 120s of data fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 psds, freqs = psd_welch(raw, picks=picks, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax) psds = 20 * np.log10(psds) # scale to dB def my_callback(ax, ch_idx): """ This block of code is executed once you click on one of the channel axes in the plot. To work with the viz internals, this function should only take two parameters, the axis and the channel or data index. """ ax.plot(freqs, psds[ch_idx], color='red') ax.set_xlabel = 'Frequency (Hz)' ax.set_ylabel = 'Power (dB)' for ax, idx in iter_topography(raw.info, fig_facecolor='white',
# if meg, drop magnetometers if MEG: for raw in raws: raw.drop_channels([ch_name for ch_name in raw.info['ch_names'] if 'MEG' not in ch_name or ch_name.endswith('1')]) raw = remove_bad_parts(raws[0]) picks = mne.pick_types(raw.info, eeg=True) # tmin, tmax = 1, 90 tmin, tmax = 1, 270 fmin, fmax = 1, 40 # fmin, fmax = 1, 20 rest_psds, rest_freqs = psd_welch(raw, picks=picks, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=N_FFT) rest_psds = 20 * np.log10(rest_psds) # read and preprocess mindfulness # raw = get_raw(SUBJECT, 'med') # raw = mne.io.Raw('/home/zairex/Code/cibr/data/clean/' + SUBJECT + '-raw.fif', preload=True) raw = remove_bad_parts(raws[1]) # crop wandering thoughts # ... picks = mne.pick_types(raw.info, eeg=True) fmin, fmax = 1, 40 # fmin, fmax = 1, 20
# get measurement info guessed by MNE-Python raw_info = rt_client.get_measurement_info() # select gradiometers picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True, stim=False, include=[], exclude=bads) n_fft = 256 # the FFT size. Ideally a power of 2 n_samples = 2048 # time window on which to compute FFT # make sure at least one epoch is available time.sleep(n_samples / raw_info['sfreq']) for ii in range(20): epoch = rt_client.get_data_as_epoch(n_samples=n_samples, picks=picks) psd, freqs = psd_welch(epoch, fmin=2, fmax=200, n_fft=n_fft) cmap = 'RdBu_r' freq_mask = freqs < 150 freqs = freqs[freq_mask] log_psd = 10 * np.log10(psd[0]) tmin = epoch.events[0][0] / raw_info['sfreq'] tmax = (epoch.events[0][0] + n_samples) / raw_info['sfreq'] if ii == 0: im = ax.imshow(log_psd[:, freq_mask].T, aspect='auto', origin='lower', cmap=cmap) ax.set_yticks(np.arange(0, len(freqs), 10)) ax.set_yticklabels(freqs[::10].round(1))
def plot_denoising(fname_raw, fmin=0, fmax=300, tmin=0.0, tmax=60.0, proj=False, n_fft=4096, color='blue', stim_name=None, event_id=1, tmin_stim=-0.2, tmax_stim=0.5, area_mode='range', area_alpha=0.33, n_jobs=1, title1='before denoising', title2='after denoising', info=None, show=True, fnout=None): """Plot the power spectral density across channels to show denoising. Parameters ---------- fname_raw : list or str List of raw files, without denoising and with for comparison. tmin : float Start time for calculations. tmax : float End time for calculations. fmin : float Start frequency to consider. fmax : float End frequency to consider. proj : bool Apply projection. n_fft : int Number of points to use in Welch FFT calculations. color : str | tuple A matplotlib-compatible color to use. area_mode : str | None Mode for plotting area. If 'std', the mean +/- 1 STD (across channels) will be plotted. If 'range', the min and max (across channels) will be plotted. Bad channels will be excluded from these calculations. If None, no area will be plotted. area_alpha : float Alpha for the area. info : bool Display information in the figure. show : bool Show figure. fnout : str Name of the saved output figure. If none, no figure will be saved. title1, title2 : str Title for two psd plots. n_jobs : int Number of jobs to use for parallel computation. stim_name : str Name of the stim channel. If stim_name is set, the plot of epochs average is also shown alongside the PSD plots. event_id : int ID of the stim event. (only when stim_name is set) Example Usage ------------- plot_denoising(['orig-raw.fif', 'orig,nr-raw.fif', fnout='example') """ from matplotlib import gridspec as grd import matplotlib.pyplot as plt from mne.time_frequency import psd_welch fnraw = get_files_from_list(fname_raw) # --------------------------------- # estimate power spectrum # --------------------------------- psds_all = [] freqs_all = [] # loop across all filenames for fname in fnraw: # read in data raw = mne.io.Raw(fname, preload=True) picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=False, exclude='bads') if area_mode not in [None, 'std', 'range']: raise ValueError('"area_mode" must be "std", "range", or None') psds, freqs = psd_welch(raw, picks=picks, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, n_fft=n_fft, n_jobs=n_jobs, proj=proj) psds_all.append(psds) freqs_all.append(freqs) if stim_name: n_xplots = 2 # get some infos events = mne.find_events(raw, stim_channel=stim_name, consecutive=True) else: n_xplots = 1 fig = plt.figure('denoising', figsize=(16, 6 * n_xplots)) gs = grd.GridSpec(n_xplots, int(len(psds_all))) # loop across all filenames for idx in range(int(len(psds_all))): # --------------------------------- # plot power spectrum # --------------------------------- p1 = plt.subplot(gs[0, idx]) # Convert PSDs to dB psds = 10 * np.log10(psds_all[idx]) psd_mean = np.mean(psds, axis=0) if area_mode == 'std': psd_std = np.std(psds, axis=0) hyp_limits = (psd_mean - psd_std, psd_mean + psd_std) elif area_mode == 'range': hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0)) else: # area_mode is None hyp_limits = None p1.plot(freqs_all[idx], psd_mean, color=color) if hyp_limits is not None: p1.fill_between(freqs_all[idx], hyp_limits[0], y2=hyp_limits[1], color=color, alpha=area_alpha) if idx == 0: p1.set_title(title1) ylim = [np.min(psd_mean) - 10, np.max(psd_mean) + 10] else: p1.set_title(title2) p1.set_xlabel('Freq (Hz)') p1.set_ylabel('Power Spectral Density (dB/Hz)') p1.set_xlim(freqs_all[idx][0], freqs_all[idx][-1]) p1.set_ylim(ylim[0], ylim[1]) # --------------------------------- # plot signal around stimulus # onset # --------------------------------- if stim_name: raw = mne.io.Raw(fnraw[idx], preload=True) epochs = mne.Epochs(raw, events, event_id, proj=False, tmin=tmin_stim, tmax=tmax_stim, picks=picks, preload=True, baseline=(None, None)) evoked = epochs.average() if idx == 0: ymin = np.min(evoked.data) ymax = np.max(evoked.data) times = evoked.times * 1e3 p2 = plt.subplot(gs[1, idx]) p2.plot(times, evoked.data.T, 'blue', linewidth=0.5) p2.set_xlim(times[0], times[len(times) - 1]) p2.set_ylim(1.1 * ymin, 1.1 * ymax) if (idx == 1) and info: plt.text(times[0], 0.9 * ymax, ' ICs: ' + str(info)) # save image if fnout: fig.savefig(fnout + '.png', format='png') # show image if requested if show: plt.show() plt.close('denoising') plt.ion()
def test_psd(): """Tests the welch and multitaper PSD.""" raw = read_raw_fif(raw_fname) picks_psd = [0, 1] # Populate raw with sinusoids rng = np.random.RandomState(40) data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times) freqs_sig = [8., 50.] for ix, freq in zip(picks_psd, freqs_sig): data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times) first_samp = raw._first_samps[0] raw = RawArray(data, raw.info) tmin, tmax = 0, 20 # use a few seconds of data fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz n_fft = 128 # -- Raw -- kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, picks=picks_psd) # Common to all kws_welch = dict(n_fft=n_fft) kws_mt = dict(low_bias=True) funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)] for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) psds, freqs = func(raw, proj=False, **kws) psds_proj, freqs_proj = func(raw, proj=True, **kws) assert_equal(psds.shape, (len(kws['picks']), len(freqs))) assert_equal(np.sum(freqs < 0), 0) assert_equal(np.sum(psds < 0), 0) # Is power found where it should be ixs_max = np.argmax(psds, axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs)) assert_true(np.abs(ixmax - ixtrue) < 2) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds, psds_proj) # Array input shouldn't work assert_raises(ValueError, func, raw[:3, :20][0]) # test n_per_seg in psd_welch (and padding) psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128, **kws_psd) psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128, **kws_psd) assert_true(len(freqs1) == np.floor(len(freqs2) / 2.)) assert_true(psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.)) # tests ValueError when n_per_seg=None and n_fft > signal length kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq'])) assert_raises(ValueError, psd_welch, raw, proj=False, n_per_seg=None, **kws_psd) # ValueError when n_overlap > n_per_seg kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90)) assert_raises(ValueError, psd_welch, raw, proj=False, **kws_psd) # -- Epochs/Evoked -- events = read_events(event_fname) events[:, 0] -= first_samp tmin, tmax, event_id = -0.5, 0.5, 1 epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd, proj=False, preload=True, baseline=None) evoked = epochs.average() tmin_full, tmax_full = -1, 1 epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full, picks=picks_psd, proj=False, preload=True, baseline=None) kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, picks=picks_psd) # Common to all funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt)] for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) psds, freqs = func( epochs[:1], proj=False, **kws) psds_proj, freqs_proj = func( epochs[:1], proj=True, **kws) psds_f, freqs_f = func( epochs_full[:1], proj=False, **kws) # this one will fail if you add for example 0.1 to tmin assert_array_almost_equal(psds, psds_f, 27) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds, psds_proj, 27) # Is power found where it should be ixs_max = np.argmax(psds.mean(0), axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs)) assert_true(np.abs(ixmax - ixtrue) < 2) assert_true(psds.shape == (1, len(kws['picks']), len(freqs))) assert_true(np.sum(freqs < 0) == 0) assert_true(np.sum(psds < 0) == 0) # Array input shouldn't work assert_raises(ValueError, func, epochs.get_data()) # Testing evoked (doesn't work w/ compute_epochs_psd) psds_ev, freqs_ev = func( evoked, proj=False, **kws) psds_ev_proj, freqs_ev_proj = func( evoked, proj=True, **kws) # Is power found where it should be ixs_max = np.argmax(psds_ev, axis=1) for ixmax, ifreq in zip(ixs_max, freqs_sig): # Find nearest frequency to the "true" freq ixtrue = np.argmin(np.abs(ifreq - freqs_ev)) assert_true(np.abs(ixmax - ixtrue) < 2) # Make sure the projection doesn't change channels it shouldn't assert_array_almost_equal(psds_ev, psds_ev_proj, 27) assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))