コード例 #1
0
def test_csd_morlet():
    """Test computing cross-spectral density using Morlet wavelets."""
    epochs = _generate_coherence_data()
    sfreq = epochs.info['sfreq']

    # Compute CSDs by a variety of methods
    freqs = [10, 15, 22]
    n_cycles = [20, 30, 44]
    times = [(None, None), (1, 9)]
    as_arrays = [False, True]
    parameters = product(times, as_arrays)
    for (tmin, tmax), as_array in parameters:
        if as_array:
            csd = csd_array_morlet(epochs.get_data(),
                                   sfreq,
                                   freqs,
                                   t0=epochs.tmin,
                                   n_cycles=n_cycles,
                                   tmin=tmin,
                                   tmax=tmax,
                                   ch_names=epochs.ch_names)
        else:
            csd = csd_morlet(epochs,
                             frequencies=freqs,
                             n_cycles=n_cycles,
                             tmin=tmin,
                             tmax=tmax)
        if tmin is None and tmax is None:
            assert csd.tmin == 0 and csd.tmax == 9.98
        else:
            assert csd.tmin == tmin and csd.tmax == tmax
        _test_csd_matrix(csd)

    # CSD diagonals should contain PSD
    tfr = tfr_morlet(epochs, freqs, n_cycles, return_itc=False)
    power = np.mean(tfr.data, 2)
    csd = csd_morlet(epochs, frequencies=freqs, n_cycles=n_cycles)
    assert_allclose(csd._data[[0, 3, 5]] * sfreq, power)

    # Test using plain convolution instead of FFT
    csd = csd_morlet(epochs,
                     frequencies=freqs,
                     n_cycles=n_cycles,
                     use_fft=False)
    assert_allclose(csd._data[[0, 3, 5]] * sfreq, power)

    # Test baselining warning
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        epochs_nobase = epochs.copy()
        epochs_nobase.baseline = None
        epochs_nobase.info['highpass'] = 0
        csd = csd_morlet(epochs_nobase, frequencies=[10], decim=20)
    assert len(w) == 1
コード例 #2
0
def _gen_dics(active_win, baseline_win, epochs):
    freqs = np.logspace(np.log10(12), np.log10(30), 9)
    csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
    csd_baseline = csd_morlet(epochs, freqs, tmin=baseline_win[0],
                              tmax=baseline_win[1], decim=20)
    csd_ers = csd_morlet(epochs, freqs, tmin=active_win[0], tmax=active_win[1],
                         decim=20)
    filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power',
                        reduce_rank=True, real_filter=True)
    stc_base, freqs = apply_dics_csd(csd_baseline.mean(), filters)
    stc_act, freqs = apply_dics_csd(csd_ers.mean(), filters)
    stc_act /= stc_base
    return stc_act
コード例 #3
0
def _simulate_data(fwd, idx):  # Somewhere on the frontal lobe by default
    """Simulate an oscillator on the cortex."""
    source_vertno = fwd['src'][0]['vertno'][idx]

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch (weird baseline but shouldn't matter)
    epochs = mne.Epochs(raw, [[0, 0, 1]],
                        event_id=1,
                        tmin=0,
                        tmax=raw.times[-1],
                        baseline=(0., 0.),
                        preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    labels = mne.read_labels_from_annot('sample',
                                        hemi='lh',
                                        subjects_dir=subjects_dir)
    label = [
        label for label in labels if np.in1d(source_vertno, label.vertices)[0]
    ]
    assert len(label) == 1
    label = label[0]
    vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno'])
    source_ind = vertices.tolist().index(source_vertno)
    assert vertices[source_ind] == source_vertno
    return epochs, evoked, csd, source_vertno, label, vertices, source_ind
コード例 #4
0
ファイル: test_csd.py プロジェクト: Eric89GXL/mne-python
def test_csd_morlet():
    """Test computing cross-spectral density using Morlet wavelets."""
    epochs = _generate_coherence_data()
    sfreq = epochs.info['sfreq']

    # Compute CSDs by a variety of methods
    freqs = [10, 15, 22]
    n_cycles = [20, 30, 44]
    times = [(None, None), (1, 9)]
    as_arrays = [False, True]
    parameters = product(times, as_arrays)
    for (tmin, tmax), as_array in parameters:
        if as_array:
            csd = csd_array_morlet(epochs.get_data(), sfreq, freqs,
                                   t0=epochs.tmin, n_cycles=n_cycles,
                                   tmin=tmin, tmax=tmax,
                                   ch_names=epochs.ch_names)
        else:
            csd = csd_morlet(epochs, frequencies=freqs, n_cycles=n_cycles,
                             tmin=tmin, tmax=tmax)
        if tmin is None and tmax is None:
            assert csd.tmin == 0 and csd.tmax == 9.98
        else:
            assert csd.tmin == tmin and csd.tmax == tmax
        _test_csd_matrix(csd)

    # CSD diagonals should contain PSD
    tfr = tfr_morlet(epochs, freqs, n_cycles, return_itc=False)
    power = np.mean(tfr.data, 2)
    csd = csd_morlet(epochs, frequencies=freqs, n_cycles=n_cycles)
    assert_allclose(csd._data[[0, 3, 5]] * sfreq, power)

    # Test using plain convolution instead of FFT
    csd = csd_morlet(epochs, frequencies=freqs, n_cycles=n_cycles,
                     use_fft=False)
    assert_allclose(csd._data[[0, 3, 5]] * sfreq, power)

    # Test baselining warning
    epochs_nobase = epochs.copy()
    epochs_nobase.baseline = None
    epochs_nobase.info['highpass'] = 0
    with pytest.warns(RuntimeWarning, match='baseline'):
        csd = csd_morlet(epochs_nobase, frequencies=[10], decim=20)
コード例 #5
0
def _simulate_data(fwd_fixed, source_vertno1, source_vertno2):
    """Simulate two oscillators on the cortex."""

    sfreq = 50.  # Hz.
    base_freq = 10
    t_rand = 0.001
    std = 0.1
    times = np.arange(10. * sfreq) / sfreq  # 10 seconds of data
    n_times = len(times)
    # Generate an oscillator with varying frequency and phase lag.
    iflaw = base_freq / sfreq + t_rand * np.random.randn(n_times)
    signal1 = np.exp(1j * 2.0 * np.pi * np.cumsum(iflaw))
    signal1 *= np.conj(signal1[0])
    signal1 = signal1.real

    # Add some random fluctuations to the signal.
    signal1 += std * np.random.randn(n_times)
    signal1 *= 1e-7

    # Make identical signal
    signal2 = signal1.copy()

    # Add random fluctuations
    signal1 += 1e-8 * np.random.randn(len(times))
    signal2 += 1e-8 * np.random.randn(len(times))

    # Construct a SourceEstimate object
    stc = mne.SourceEstimate(
        np.vstack((signal1[np.newaxis, :], signal2[np.newaxis, :])),
        vertices=[np.array([source_vertno1]), np.array([source_vertno2])],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd_fixed['info']['ch_names'], sfreq,
                           ch_types='grad')
    info.update(fwd_fixed['info'])  # Merge in sensor position information

    # Simulated sensor data.
    raw = mne.apply_forward_raw(fwd_fixed, stc, info)

    # Add noise
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch
    epochs = mne.Epochs(raw, np.array([[0, 0, 1]]), event_id=1, tmin=0,
                        tmax=raw.times[-1], preload=True, baseline=(0, 0))

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20])

    return csd
コード例 #6
0
ファイル: test_dics.py プロジェクト: jnvandermeer/mne-python
def _simulate_data(fwd):
    """Simulate an oscillator on the cortex."""
    source_vertno = 146374  # Somewhere on the frontal lobe

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch
    epochs = mne.Epochs(raw, [[0, 0, 1]],
                        event_id=1,
                        tmin=0,
                        tmax=raw.times[-1],
                        preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    return epochs, evoked, csd, source_vertno
コード例 #7
0
ファイル: test_dics.py プロジェクト: jhouck/mne-python
def _simulate_data(fwd):
    """Simulate an oscillator on the cortex."""
    source_vertno = 146374  # Somewhere on the frontal lobe

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch
    epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0,
                        tmax=raw.times[-1], preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    return epochs, evoked, csd, source_vertno
コード例 #8
0
                        dir=proc_dir, sub=sub, run=run, wav=wav_name)
                    epo = mne.read_epochs(epo_name)
                    all_bads += epo.info["bads"]
                    epos.append(epo)
                    wav_epos.append(epo)
                    epo_names.append("{}_{}".format(run, wav_name))
                epo_conds.append(mne.concatenate_epochs(wav_epos))
                epo_cond_names.append(run)

        for x in epos:
            x.info["bads"] = all_bads
            x.info["dev_head_t"] = epos[0].info["dev_head_t"]
        epo = mne.concatenate_epochs(epos)
        csd = csd_morlet(epo,
                         frequencies=freqs,
                         n_jobs=n_jobs,
                         n_cycles=c,
                         decim=3)
        #csd = csd.mean()
        fwd_name = "{dir}nc_{sub}_{sp}-fwd.fif".format(dir=proc_dir,
                                                       sub=sub,
                                                       sp=spacing)
        fwd = mne.read_forward_solution(fwd_name)
        filters = make_dics(epo.info,
                            fwd,
                            csd,
                            real_filter=True,
                            weight_norm="nai",
                            reduce_rank=False,
                            pick_ori="max-power")
        del epo, csd, fwd
コード例 #9
0
# Read in the simulated data
stc_signal = mne.read_source_estimate(
    fname.stc_signal(noise=config.noise, vertex=config.vertex))
epochs = mne.read_epochs(
    fname.simulated_epochs(noise=config.noise, vertex=config.vertex))
fwd = mne.read_forward_solution(fname.fwd)

# For pick_ori='normal', the fwd needs to be in surface orientation
fwd = mne.convert_forward_solution(fwd, surf_ori=True)

# The DICS beamformer currently only uses one sensor type
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')

# Make CSD matrix
csd = csd_morlet(epochs, [config.signal_freq])

# Compute the settings grid
regs = [0.05, 0.1, 0.5]
sensor_types = ['grad', 'mag']
pick_oris = [None, 'normal', 'max-power']
inversions = ['single', 'matrix']
weight_norms = ['unit-noise-gain', 'nai', None]
normalize_fwds = [True, False]
real_filters = [True, False]
settings = list(
    product(regs, sensor_types, pick_oris, inversions, weight_norms,
            normalize_fwds, real_filters))

# Compute DICS beamformer with all possible settings
dists = []
コード例 #10
0
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks,
                    preload=True)

# Read forward operator
fwd = mne.read_forward_solution(fname_fwd)

###############################################################################
# We are interested in the beta band. Define a range of frequencies, using a
# log scale, from 12 to 30 Hz.
freqs = np.logspace(np.log10(12), np.log10(30), 9)

###############################################################################
# Computing the cross-spectral density matrix for the beta frequency band, for
# different time intervals. We use a decim value of 20 to speed up the
# computation in this example at the loss of accuracy.
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=20)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)

###############################################################################
# Computing DICS spatial filters using the CSD that was computed on the entire
# timecourse.
filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power')

###############################################################################
# Applying DICS spatial filters separately to the CSD computed using the
# baseline and the CSD computed during the ERS activity.
baseline_source_power, freqs = apply_dics_csd(csd_baseline.mean(), filters)
beta_source_power, freqs = apply_dics_csd(csd_ers.mean(), filters)
コード例 #11
0
ファイル: plot_dics.py プロジェクト: xiaorong108/mne-python
# other sources as much as possible.
#
# The :func:`mne.beamformer.make_dics` function has many switches that offer
# precise control
# over the way the filter weights are computed. Currently, there is no clear
# consensus regarding the best approach. This is why we will demonstrate two
# approaches here:
#
#  1. The approach as described in [2]_, which first normalizes the forward
#     solution and computes a vector beamformer.
#  2. The scalar beamforming approach based on [3]_, which uses weight
#     normalization instead of normalizing the forward solution.

# Estimate the cross-spectral density (CSD) matrix on the trial containing the
# signal.
csd_signal = csd_morlet(epochs['signal'], frequencies=[10])

# Compute the spatial filters for each vertex, using two approaches.
filters_approach1 = make_dics(info,
                              fwd,
                              csd_signal,
                              reg=0.05,
                              pick_ori='max-power',
                              normalize_fwd=True,
                              inversion='single',
                              weight_norm=None)
print(filters_approach1)

filters_approach2 = make_dics(info,
                              fwd,
                              csd_signal,
コード例 #12
0
###############################################################################
# When computing the CSD with Morlet wavelets, you specify the exact
# frequencies at which to compute it. For each frequency, a corresponding
# wavelet will be constructed and convolved with the signal, resulting in a
# time-frequency decomposition.
#
# The CSD is constructed by computing the correlation between the
# time-frequency representations between all sensor-to-sensor pairs. The
# time-frequency decomposition originally has the same sampling rate as the
# signal, in our case ~600Hz. This means the decomposition is over-specified in
# time and we may not need to use all samples during our CSD computation, just
# enough to get a reliable correlation statistic. By specifying ``decim=10``,
# we use every 10th sample, which will greatly speed up the computation and
# will have a minimal effect on the CSD.
frequencies = [16, 17, 18, 19, 20]
csd_wav = csd_morlet(epochs, frequencies, decim=10, n_jobs=n_jobs)

###############################################################################
# The resulting :class:`mne.time_frequency.CrossSpectralDensity` objects have a
# plotting function we can use to compare the results of the different methods.
# We're plotting the mean CSD across frequencies.
csd_fft.mean().plot()
plt.suptitle('short-term Fourier transform')

csd_mt.mean().plot()
plt.suptitle('adaptive multitapers')

csd_wav.mean().plot()
plt.suptitle('Morlet wavelet transform')
コード例 #13
0
# Read epochs
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    picks=picks, baseline=(None, 0), preload=True,
                    reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()

# Read forward operator
forward = mne.read_forward_solution(fname_fwd)

# Computing the data and noise cross-spectral density matrices
# The time-frequency window was chosen on the basis of spectrograms from
# example time_frequency/plot_time_frequency.py
# We use Morlet wavelets to estimate the CSD for two specific frequencies.
data_csds = csd_morlet(epochs, tmin=0.04, tmax=0.15, frequencies=[18, 27])
noise_csds = csd_morlet(epochs, tmin=-0.11, tmax=-0.001, frequencies=[18, 27])

# Compute DICS spatial filter and estimate source power
stc = dics_source_power(epochs.info, forward, noise_csds, data_csds)

# Plot the power maps at both frequencies
for i, freq in enumerate(data_csds.frequencies):
    message = 'DICS source power at %0.1f Hz' % freq
    brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
                     time_label=message, figure=i)
    brain.set_data_time_index(i)
    brain.show_view('lateral')
    # Uncomment line below to save images
    # brain.save_image('DICS_source_power_freq_%d.png' % csd.freqs[0])
コード例 #14
0
###############################################################################
# When computing the CSD with Morlet wavelets, you specify the exact
# frequencies at which to compute it. For each frequency, a corresponding
# wavelet will be constructed and convolved with the signal, resulting in a
# time-frequency decomposition.
#
# The CSD is constructed by computing the correlation between the
# time-frequency representations between all sensor-to-sensor pairs. The
# time-frequency decomposition originally has the same sampling rate as the
# signal, in our case ~600Hz. This means the decomposition is over-specified in
# time and we may not need to use all samples during our CSD computation, just
# enough to get a reliable correlation statistic. By specifying ``decim=10``,
# we use every 10th sample, which will greatly speed up the computation and
# will have a minimal effect on the CSD.
frequencies = [16, 17, 18, 19, 20]
csd_wav = csd_morlet(epochs, frequencies, decim=10, n_jobs=n_jobs)

###############################################################################
# The resulting :class:`mne.time_frequency.CrossSpectralDensity` objects have a
# plotting function we can use to compare the results of the different methods.
# We're plotting the mean CSD across frequencies.
csd_fft.mean().plot()
plt.suptitle('short-term Fourier transform')

csd_mt.mean().plot()
plt.suptitle('adaptive multitapers')

csd_wav.mean().plot()
plt.suptitle('Morlet wavelet transform')
コード例 #15
0
 rest = mne.read_epochs("{dir}nc_{sub}_1_ica-epo.fif".format(dir=meg_dir,
                                                             sub=meg))
 ton = mne.read_epochs("{dir}nc_{sub}_2_ica-epo.fif".format(dir=meg_dir,
                                                            sub=meg))
 # override head_position data to append sensor data (just for calculating CSD !)
 rest.info['dev_head_t'] = ton.info['dev_head_t']
 epo_bas = mne.concatenate_epochs([rest, ton])
 epo_exp = mne.read_epochs("{dir}nc_{sub}_exp-epo.fif".format(dir=meg_dir,
                                                              sub=meg))
 #separate positive and negative condition epochs
 pos = epo_exp['positive']
 neg = epo_exp['negative']
 # calculate & save CSDs
 csd_bas_gamma_high = csd_morlet(epo_bas,
                                 frequencies=freqs["gamma_high"],
                                 n_jobs=8,
                                 n_cycles=cycles["gamma_high"],
                                 decim=1)
 csd_bas_gamma_high.save("{dir}nc_{meg}-csd_bas_gamma_high.h5".format(
     dir=meg_dir, meg=meg))
 csd_exp_gamma_high = csd_morlet(epo_exp,
                                 frequencies=freqs["gamma_high"],
                                 n_jobs=8,
                                 n_cycles=cycles["gamma_high"],
                                 decim=1)
 csd_exp_gamma_high.save("{dir}nc_{meg}-csd_exp_gamma_high.h5".format(
     dir=meg_dir, meg=meg))
 csd_rest_gamma_high = csd_morlet(rest,
                                  frequencies=freqs["gamma_high"],
                                  n_jobs=8,
                                  n_cycles=cycles["gamma_high"],
コード例 #16
0
 rest = mne.read_epochs("{dir}nc_{sub}_1_ica-epo.fif".format(dir=meg_dir,
                                                             sub=meg))
 ton = mne.read_epochs("{dir}nc_{sub}_2_ica-epo.fif".format(dir=meg_dir,
                                                            sub=meg))
 epo_exp = mne.read_epochs("{dir}nc_{sub}_exp-epo.fif".format(dir=meg_dir,
                                                              sub=meg))
 neg = epo_exp['negative']
 pos = epo_exp['positive']
 # override head_position data to append sensor data (just for calculating CSD !)
 rest.info['dev_head_t'] = ton.info['dev_head_t']
 epo_bas = mne.concatenate_epochs([rest, ton])
 # make and save CSDs for 1-90 Hz range
 frequencies = np.linspace(1, 90, num=90)
 csd_exp = csd_morlet(epo_exp,
                      frequencies=frequencies,
                      n_jobs=8,
                      n_cycles=7,
                      decim=1)
 csd_exp.save("{dir}nc_{meg}-csd_exp_1-90.h5".format(dir=meg_dir, meg=meg))
 csd_bas = csd_morlet(epo_bas,
                      frequencies=frequencies,
                      n_jobs=8,
                      n_cycles=7,
                      decim=1)
 csd_bas.save("{dir}nc_{meg}-csd_bas_1-90.h5".format(dir=meg_dir, meg=meg))
 csd_ton = csd_morlet(ton,
                      frequencies=frequencies,
                      n_jobs=8,
                      n_cycles=7,
                      decim=1)
 csd_ton.save("{dir}nc_{meg}-csd_ton_1-90.h5".format(dir=meg_dir, meg=meg))
コード例 #17
0
true_ori = fwd_disc_true['src'][0]['nn'][config.vertex]

del info, fwd_disc_true, er_raw

epochs = create_epochs(raw)

###############################################################################
# Sensor level analysis
###############################################################################

epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)

# Make CSD matrix
csd = csd_morlet(epochs, [config.signal_freq], tmin=0, tmax=1)
noise_csd = csd_morlet(epochs, [config.signal_freq], tmin=-1, tmax=0)

# Compute evokeds
evoked_grad = epochs_grad.average()
evoked_mag = epochs_mag.average()
evoked_joint = epochs_joint.average()

###############################################################################
# Compute DICS beamformer results
###############################################################################

# Read in forward solution
fwd_disc_man = mne.read_forward_solution(fname.fwd_discrete_man)

dists = []
コード例 #18
0
ファイル: 06_CSD.py プロジェクト: TinnErlangen/MEMO
# CSD over all conditions to calculate shared filters
for sub in subjs:
    # read in the epo files
    epo = mne.read_epochs("{}{}-analysis-epo.fif".format(proc_dir, sub))

    # # calculate "big" alpha CSDs for common filters
    # csd = csd_morlet(epo, frequencies=freqs, n_jobs=8, n_cycles=cycs, decim=1)
    # csd.save("{}{}_alpha-csd.h5".format(proc_dir,sub))
    #
    # # calculate "small" alpha CSDs for each condition and time window
    # for i, (tmin, tmax) in enumerate(zip(tmins,tmaxs)):
    #     epo_win = epo.copy()
    #     epo_win.crop(tmin=tmin, tmax=tmax)
    #     for cond in conds:
    #         csd_ct = csd_morlet(epo_win[cond], frequencies=freqs, n_jobs=8, n_cycles=cycs, decim=1)
    #         csd_ct.save("{}{}_alpha_{}_TW{}-csd.h5".format(proc_dir,sub,cond,i))

    # calculate new "small" alpha CSDs for each condition and time window (before, after)
    for (tmin, tmax, tw) in zip(new_tmins, new_tmaxs, new_tws):
        epo_win = epo.copy()
        epo_win.crop(tmin=tmin, tmax=tmax)
        for cond in conds:
            csd_ct = csd_morlet(epo_win[cond],
                                frequencies=freqs,
                                n_jobs=8,
                                n_cycles=cycs,
                                decim=1)
            csd_ct.save("{}{}_alpha_{}_{}-csd.h5".format(
                proc_dir, sub, cond, tw))
コード例 #19
0
ファイル: dics_indfreq.py プロジェクト: TinnErlangen/ATT
        for wav_idx, wav_name in enumerate(wavs):
            epo_name = "{dir}nc_{sub}_{run}_{wav}_hand-epo.fif".format(
              dir=proc_dir, sub=sub, run=run, wav=wav_name)
            epo = mne.read_epochs(epo_name)
            all_bads += epo.info["bads"]
            epos.append(epo)
            wav_epos.append(epo)
            epo_names.append("{}_{}".format(run,wav_name))
        epo_conds.append(mne.concatenate_epochs(wav_epos))
        epo_cond_names.append(run)

    for x in epos:
        x.info["bads"] = all_bads
        x.info["dev_head_t"] = epos[0].info["dev_head_t"]
    epo = mne.concatenate_epochs(epos)
    csd = csd_morlet(epo, frequencies=freqs, n_jobs=n_jobs, n_cycles=cycles, decim=3)
    csd = csd.mean()
    fwd_name = "{dir}nc_{sub}_{sp}-fwd.fif".format(dir=proc_dir, sub=sub, sp=spacing)
    fwd = mne.read_forward_solution(fwd_name)
    filters = make_dics(epo.info, fwd, csd, real_filter=True)
    del epo, csd, fwd

    print("\n\n")
    print("\n\n")
    if not doTones:
        epos = epo_conds
        epo_names = epo_cond_names
    for epo,epo_name in zip(epos,epo_names):
        epo_csd = csd_morlet(epo, frequencies=freqs,
                               n_jobs=n_jobs, n_cycles=cycles, decim=3)
        epo_csd = epo_csd.mean()
コード例 #20
0
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    picks=picks, baseline=(None, 0), preload=True,
                    reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()

# Read forward operator
forward = mne.read_forward_solution(fname_fwd)

###############################################################################
# Computing the cross-spectral density matrix at 4 evenly spaced frequencies
# from 6 to 10 Hz. We use a decim value of 20 to speed up the computation in
# this example at the loss of accuracy.
#
# .. warning:: The use of several sensor types with the DICS beamformer is
#              not heavily tested yet. Here we use verbose='error' to
#              suppress a warning along these lines.
csd = csd_morlet(epochs, tmin=0, tmax=0.5, decim=20,
                 frequencies=np.linspace(6, 10, 4),
                 n_cycles=2.5)  # short signals, must live with few cycles

# Compute DICS spatial filter and estimate source power.
filters = make_dics(epochs.info, forward, csd, reg=0.5, verbose='error')
print(filters)
stc, freqs = apply_dics_csd(csd, filters)

message = 'DICS source power in the 8-12 Hz frequency band'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
                 time_label=message)
コード例 #21
0
# Individual frequencies to estimate the CSD for
fmin = freq_bands[0][0]
fmax = freq_bands[-1][1]
frequencies = np.arange(fmin, fmax + 1, 2)

# Compute CSD matrices for each frequency and each condition.
for condition in conditions:
    print('Condition:', condition)
    # Remove the mean during the time interval for which we compute the CSD
    epochs_baselined = epochs[condition].apply_baseline((csd_tmin, csd_tmax))

    # Compute CSD for the desired time interval
    csd = csd_morlet(epochs_baselined,
                     frequencies=frequencies,
                     tmin=csd_tmin,
                     tmax=csd_tmax,
                     decim=20,
                     n_jobs=n_jobs,
                     verbose=True)

    # Save the CSD matrices
    csd.save(fname.csd(condition=condition, subject=subject))
    report.add_figs_to_section(csd.plot(show=False),
                               ['CSD for %s' % condition],
                               section='Sensor-level')

# Also compute the CSD for the baseline period (use all epochs for this,
# regardless of condition). This way, we can compare the change in power caused
# by the presentation of the stimulus.
epochs = epochs.apply_baseline((-0.2, 0))  # Make sure data is zero-mean
csd_baseline = csd_morlet(epochs,
コード例 #22
0
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True,
                    reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()

# Read forward operator
forward = mne.read_forward_solution(fname_fwd)

###############################################################################
# Computing the cross-spectral density matrix at 4 evenly spaced frequencies
# from 6 to 10 Hz. We use a decim value of 20 to speed up the computation in
# this example at the loss of accuracy.
csd = csd_morlet(epochs,
                 tmin=0,
                 tmax=0.5,
                 decim=20,
                 frequencies=np.linspace(6, 10, 4))

# Compute DICS spatial filter and estimate source power.
filters = make_dics(epochs.info, forward, csd, reg=0.5)
stc, freqs = apply_dics_csd(csd, filters)

message = 'DICS source power in the 8-12 Hz frequency band'
brain = stc.plot(surface='inflated',
                 hemi='rh',
                 subjects_dir=subjects_dir,
                 time_label=message)
コード例 #23
0
ファイル: plot_dics.py プロジェクト: jdammers/mne-python
# pass activity originating from the vertex, while dampening activity from
# other sources as much as possible.
#
# The :func:`make_dics` function has many switches that offer precise control
# over the way the filter weights are computed. Currently, there is no clear
# consensus regarding the best approach. This is why we will demonstrate two
# approaches here:
#
#  1. The approach as described in [2]_, which first normalizes the forward
#     solution and computes a vector beamformer.
#  2. The scalar beamforming approach based on [3]_, which uses weight
#     normalization instead of normalizing the forward solution.

# Estimate the cross-spectral density (CSD) matrix on the trial containing the
# signal.
csd_signal = csd_morlet(epochs['signal'], frequencies=[10])

# Compute the spatial filters for each vertex, using two approaches.
filters_approach1 = make_dics(
    info, fwd, csd_signal, reg=0.05, pick_ori='max-power', normalize_fwd=True,
    inversion='single', weight_norm=None)
filters_approach2 = make_dics(
    info, fwd, csd_signal, reg=0.05, pick_ori='max-power', normalize_fwd=False,
    inversion='matrix', weight_norm='unit-noise-gain')

# Compute the DICS power map by applying the spatial filters to the CSD matrix.
power_approach1, f = apply_dics_csd(csd_signal, filters_approach1)
power_approach2, f = apply_dics_csd(csd_signal, filters_approach2)

# Plot the DICS power maps for both approaches.
for approach, power in enumerate([power_approach1, power_approach2], 1):
コード例 #24
0
def test_tf_dics(_load_forward, idx, mat_tol, vol_tol):
    """Test 5D time-frequency beamforming based on DICS."""
    fwd_free, fwd_surf, fwd_fixed, _ = _load_forward
    epochs, _, _, source_vertno, label, vertices, source_ind = \
        _simulate_data(fwd_fixed, idx)
    reg = 1  # Lots of regularization for our toy dataset

    tmin = 0
    tmax = 9
    tstep = 4
    win_lengths = [5, 5]
    frequencies = [10, 20]
    freq_bins = [(8, 12), (18, 22)]

    with pytest.raises(RuntimeError, match='several sensor types'):
        stcs = tf_dics(epochs,
                       fwd_surf,
                       None,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins=freq_bins,
                       frequencies=frequencies,
                       decim=10,
                       reg=reg,
                       label=label)
    epochs.pick_types(meg='grad')
    # Compute DICS for two time windows and two frequencies
    for mode in ['fourier', 'multitaper', 'cwt_morlet']:
        stcs = tf_dics(epochs,
                       fwd_surf,
                       None,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       mode=mode,
                       freq_bins=freq_bins,
                       frequencies=frequencies,
                       decim=10,
                       reg=reg,
                       label=label)

        # Did we find the true source at 20 Hz?
        dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=0)
        assert dist == 0
        dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=1)
        assert dist == 0

        # 20 Hz power should decrease over time
        assert stcs[1].data[source_ind, 0] > stcs[1].data[source_ind, 1]

        # 20 Hz power should be more than 10 Hz power at the true source
        assert stcs[1].data[source_ind, 0] > stcs[0].data[source_ind, 0]

    # Manually compute source power and compare with the last tf_dics result.
    source_power = []
    time_windows = [(0, 5), (4, 9)]
    for time_window in time_windows:
        csd = csd_morlet(epochs,
                         frequencies=[frequencies[1]],
                         tmin=time_window[0],
                         tmax=time_window[1],
                         decim=10)
        csd = csd.sum()
        csd._data /= csd.n_fft
        filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label)
        stc_source_power, _ = apply_dics_csd(csd, filters)
        source_power.append(stc_source_power.data)

    # Comparing tf_dics results with dics_source_power results
    assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0)

    # Test using noise csds. We're going to use identity matrices. That way,
    # since we're using unit-noise-gain weight normalization, there should be
    # no effect.
    stcs = tf_dics(epochs,
                   fwd_surf,
                   None,
                   tmin,
                   tmax,
                   tstep,
                   win_lengths,
                   mode='cwt_morlet',
                   frequencies=frequencies,
                   decim=10,
                   reg=reg,
                   label=label,
                   normalize_fwd=False,
                   weight_norm='unit-noise-gain')
    noise_csd = csd.copy()
    inds = np.triu_indices(csd.n_channels)
    # Using [:, :] syntax for in-place broadcasting
    noise_csd._data[:, :] = 2 * np.eye(csd.n_channels)[inds][:, np.newaxis]
    noise_csd.n_fft = 2  # Dividing by n_fft should yield an identity CSD
    noise_csds = [noise_csd, noise_csd]  # Two frequency bins
    stcs_norm = tf_dics(epochs,
                        fwd_surf,
                        noise_csds,
                        tmin,
                        tmax,
                        tstep,
                        win_lengths,
                        mode='cwt_morlet',
                        frequencies=frequencies,
                        decim=10,
                        reg=reg,
                        label=label,
                        normalize_fwd=False,
                        weight_norm='unit-noise-gain')
    assert_allclose(stcs_norm[0].data, stcs[0].data, atol=0)
    assert_allclose(stcs_norm[1].data, stcs[1].data, atol=0)

    # Test invalid parameter combinations
    with pytest.raises(ValueError, match='fourier.*freq_bins" parameter'):
        tf_dics(epochs,
                fwd_surf,
                None,
                tmin,
                tmax,
                tstep,
                win_lengths,
                mode='fourier',
                freq_bins=None)
    with pytest.raises(ValueError, match='cwt_morlet.*frequencies" param'):
        tf_dics(epochs,
                fwd_surf,
                None,
                tmin,
                tmax,
                tstep,
                win_lengths,
                mode='cwt_morlet',
                frequencies=None)

    # Test if incorrect number of noise CSDs is detected
    with pytest.raises(ValueError, match='One noise CSD object expected per'):
        tf_dics(epochs,
                fwd_surf, [noise_csds[0]],
                tmin,
                tmax,
                tstep,
                win_lengths,
                freq_bins=freq_bins)

    # Test if freq_bins and win_lengths incompatibility is detected
    with pytest.raises(ValueError, match='One time window length expected'):
        tf_dics(epochs,
                fwd_surf,
                None,
                tmin,
                tmax,
                tstep,
                win_lengths=[0, 1, 2],
                freq_bins=freq_bins)

    # Test if time step exceeding window lengths is detected
    with pytest.raises(ValueError, match='Time step should not be larger'):
        tf_dics(epochs,
                fwd_surf,
                None,
                tmin,
                tmax,
                tstep=0.15,
                win_lengths=[0.2, 0.1],
                freq_bins=freq_bins)

    # Test if incorrect number of n_ffts is detected
    with pytest.raises(ValueError, match='When specifying number of FFT'):
        tf_dics(epochs,
                fwd_surf,
                None,
                tmin,
                tmax,
                tstep,
                win_lengths,
                freq_bins=freq_bins,
                n_ffts=[1])

    # Test if incorrect number of mt_bandwidths is detected
    with pytest.raises(ValueError, match='When using multitaper mode and'):
        tf_dics(epochs,
                fwd_surf,
                None,
                tmin,
                tmax,
                tstep,
                win_lengths=win_lengths,
                freq_bins=freq_bins,
                mode='multitaper',
                mt_bandwidths=[20])

    # Test if subtracting evoked responses yields NaN's, since we only have one
    # epoch. Suppress division warnings.
    assert len(epochs) == 1, len(epochs)
    with np.errstate(invalid='ignore'):
        stcs = tf_dics(epochs,
                       fwd_surf,
                       None,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       mode='cwt_morlet',
                       frequencies=frequencies,
                       subtract_evoked=True,
                       reg=reg,
                       label=label,
                       decim=20)
    assert np.all(np.isnan(stcs[0].data))
コード例 #25
0
fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
                    'sub-{}_task-{}-fwd.fif'.format(subject, task))
subjects_dir = op.join(data_path, 'derivatives', 'freesurfer', 'subjects')

fwd = mne.read_forward_solution(fname_fwd)

###############################################################################
# We are interested in the beta band. Define a range of frequencies, using a
# log scale, from 12 to 30 Hz.
freqs = np.logspace(np.log10(12), np.log10(30), 9)

###############################################################################
# Computing the cross-spectral density matrix for the beta frequency band, for
# different time intervals. We use a decim value of 20 to speed up the
# computation in this example at the loss of accuracy.
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=20)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)

###############################################################################
# To compute the source power for a frequency band, rather than each frequency
# separately, we average the CSD objects across frequencies.
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()

###############################################################################
# Computing DICS spatial filters using the CSD that was computed on the entire
# timecourse.
filters = make_dics(epochs.info,
コード例 #26
0
    ('sLORETA', [3, 5, 7]),
    ('eLORETA', [0.75, 1.25, 1.75]),
)):

    #    surfer_kwargs['clim']['lims'] = lims
    stc1 = apply_inverse(evoked,
                         inverse_operator,
                         lambda2,
                         method=method,
                         pick_ori=None)
    brain = stc1.plot(figure=mi, **surfer_kwargs)
    brain.add_text(0.1, 0.9, method, 'title', font_size=20)

#%%
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd

csd = csd_morlet(epochs, tmin=0, tmax=0.25, frequencies=np.linspace(2, 4))

# Compute DICS spatial filter and estimate source power.
filters = make_dics(epochs.info, fwd, csd, real_filter=True)

stc, freqs = apply_dics_csd(csd, filters)

message = 'DICS source power in the 8-12 Hz frequency band'
brain = stc.plot(surface='inflated',
                 hemi='rh',
                 subjects_dir=subjects_dir,
                 time_label=message,
                 time_viewer=True)
コード例 #27
0
ファイル: test_dics.py プロジェクト: jhouck/mne-python
def test_tf_dics():
    """Test 5D time-frequency beamforming based on DICS."""
    fwd_free, fwd_surf, fwd_fixed, fwd_vol, label = _load_forward()
    epochs, evoked, _, source_vertno = _simulate_data(fwd_fixed)
    vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
    source_ind = vertices.tolist().index(source_vertno)
    reg = 1  # Lots of regularization for our toy dataset

    tmin = 0
    tmax = 9
    tstep = 4
    win_lengths = [5, 5]
    frequencies = [10, 20]
    freq_bins = [(8, 12), (18, 22)]

    # Compute DICS for two time windows and two frequencies
    for mode in ['fourier', 'multitaper', 'cwt_morlet']:
        stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
                       mode=mode, freq_bins=freq_bins, frequencies=frequencies,
                       decim=10, reg=reg, label=label)

        # Did we find the true source at 20 Hz?
        assert np.argmax(stcs[1].data[:, 0]) == source_ind
        assert np.argmax(stcs[1].data[:, 1]) == source_ind

        # 20 Hz power should decrease over time
        assert stcs[1].data[source_ind, 0] > stcs[1].data[source_ind, 1]

        # 20 Hz power should be more than 10 Hz power at the true source
        assert stcs[1].data[source_ind, 0] > stcs[0].data[source_ind, 0]

    # Manually compute source power and compare with the last tf_dics result.
    source_power = []
    time_windows = [(0, 5), (4, 9)]
    for time_window in time_windows:
        csd = csd_morlet(epochs, frequencies=[frequencies[1]],
                         tmin=time_window[0], tmax=time_window[1], decim=10)
        csd = csd.sum()
        csd._data /= csd.n_fft
        filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label)
        stc_source_power, _ = apply_dics_csd(csd, filters)
        source_power.append(stc_source_power.data)

    # Comparing tf_dics results with dics_source_power results
    assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0)

    # Test using noise csds. We're going to use identity matrices. That way,
    # since we're using unit-noise-gain weight normalization, there should be
    # no effect.
    stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
                   mode='cwt_morlet', frequencies=frequencies, decim=10,
                   reg=reg, label=label, normalize_fwd=False,
                   weight_norm='unit-noise-gain')
    noise_csd = csd.copy()
    inds = np.triu_indices(csd.n_channels)
    # Using [:, :] syntax for in-place broadcasting
    noise_csd._data[:, :] = 2 * np.eye(csd.n_channels)[inds][:, np.newaxis]
    noise_csd.n_fft = 2  # Dividing by n_fft should yield an identity CSD
    noise_csds = [noise_csd, noise_csd]  # Two frequency bins
    stcs_norm = tf_dics(epochs, fwd_surf, noise_csds, tmin, tmax, tstep,
                        win_lengths, mode='cwt_morlet',
                        frequencies=frequencies, decim=10, reg=reg,
                        label=label, normalize_fwd=False,
                        weight_norm='unit-noise-gain')
    assert_allclose(stcs_norm[0].data, stcs[0].data, atol=0)
    assert_allclose(stcs_norm[1].data, stcs[1].data, atol=0)

    # Test invalid parameter combinations
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths, mode='fourier', freq_bins=None)
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths, mode='cwt_morlet', frequencies=None)

    # Test if incorrect number of noise CSDs is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, [noise_csds[0]], tmin, tmax,
           tstep, win_lengths, freq_bins=freq_bins)

    # Test if freq_bins and win_lengths incompatibility is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths=[0, 1, 2], freq_bins=freq_bins)

    # Test if time step exceeding window lengths is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep=0.15,
           win_lengths=[0.2, 0.1], freq_bins=freq_bins)

    # Test if incorrent number of n_ffts is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths, freq_bins=freq_bins, n_ffts=[1])

    # Test if incorrect number of mt_bandwidths is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths=win_lengths, freq_bins=freq_bins, mode='multitaper',
           mt_bandwidths=[20])

    # Test if subtracting evoked responses yields NaN's, since we only have one
    # epoch. Suppress division warnings.
    with pytest.warns(RuntimeWarning, match='[invalid|empty]'):
        stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
                       mode='cwt_morlet', frequencies=frequencies,
                       subtract_evoked=True, reg=reg, label=label, decim=20)
    assert np.all(np.isnan(stcs[0].data))
コード例 #28
0
# the frequencies passed as lists (for CSD calculation)
# take some time to choose appropriate values for your analysis, esp. for cycle number: what are your goals? what is the window length? etc.
freqs_n = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46]
cycs_n = [5, 5, 5, 5, 5, 7, 7,  7,  7,  7,  7,  9,  9,  9,  9,  9,  9,  9,  9, 11, 11, 11, 11, 11, 11, 11, 11, 11, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13]

freqs_g = [65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95]
cycs_g = [15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]
# fmins = [3, 8, 14, 22, 31]
# fmaxs = [7, 13, 21, 30, 46]

# for the Beamforming analysis, you need a CSD over all conditions to calculate shared filters
# for each subject, calc csd_all& csd_g_all
for sub in subjs:
    epo = mne.read_epochs("{}{}_2-epo.fif".format(proc_dir,sub))

    csd_n = csd_morlet(epo, frequencies=freqs_n, n_jobs=8, n_cycles=cycs_n, decim=1)
    csd_n.save("{}{}-csd.h5".format(proc_dir,sub))
    breakpoint()

    csd_g = csd_morlet(epo, frequencies=freqs_g, n_jobs=8, n_cycles=cycs_g, decim=1)
    csd_g.save("{}{}-gamma-csd.h5".format(proc_dir,sub))

# then, you need CSDs for the single conditions you want to pass through the filters and compare
# for each subject, calc csd & csd_g for each condition
for sub in subjs:
    epo = mne.read_epochs("{}{}_2-epo.fif".format(proc_dir,sub))

    for cond,c in conditions.items():

        csd_n = csd_morlet(epo[c], frequencies=freqs_n, n_jobs=8, n_cycles=cycs_n, decim=1)
        csd_n.save("{}{}_{}-csd.h5".format(proc_dir,sub,cond))
コード例 #29
0
# Set picks
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
                       stim=False, exclude='bads')

# Read epochs
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    picks=picks, baseline=(None, 0), preload=True,
                    reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()

# Read forward operator
forward = mne.read_forward_solution(fname_fwd)

###############################################################################
# Computing the cross-spectral density matrix at 4 evenly spaced frequencies
# from 6 to 10 Hz. We use a decim value of 20 to speed up the computation in
# this example at the loss of accuracy.
csd = csd_morlet(epochs, tmin=0, tmax=0.5, decim=20,
                 frequencies=np.linspace(6, 10, 4))

# Compute DICS spatial filter and estimate source power.
filters = make_dics(epochs.info, forward, csd, reg=0.5)
stc, freqs = apply_dics_csd(csd, filters)

message = 'DICS source power in the 8-12 Hz frequency band'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
                 time_label=message)
コード例 #30
0
                   size=400)

# Indicate the true locations of the source activity on the plot.
brain.add_foci(source_vert1, coords_as_verts=True, hemi='lh')
brain.add_foci(source_vert2, coords_as_verts=True, hemi='rh')

# Rotate the view and add a title.
mlab.view(0, 0, 550, [0, 0, 0])
mlab.title('MNE-dSPM inverse (RMS)', height=0.9)

###############################################################################
# Computing a cortical power map at 10 Hz. using a DICS beamformer:

# Estimate the cross-spectral density (CSD) matrix on the trial containing the
# signal.
csd_signal = csd_morlet(epochs['signal'], frequencies=[10])

# Compute the DICS powermap. For this simulated dataset, we need a lot of
# regularization for the beamformer to behave properly. For real recordings,
# this amount of regularization is probably too much.
filters = make_dics(epochs.info, fwd, csd_signal, reg=1, pick_ori='max-power')
power, f = apply_dics_csd(csd_signal, filters)

# Plot the DICS power map.
brain = power.plot('sample',
                   subjects_dir=subjects_dir,
                   hemi='both',
                   figure=2,
                   size=400)

# Indicate the true locations of the source activity on the plot.
コード例 #31
0
    # Create epochs
    ###############################################################################

    title = 'Simulated evoked for two signal vertices'
    epochs = create_epochs(raw2,
                           title=title,
                           fn_simulated_epochs=None,
                           fn_report_h5=fn_report_h5)

    epochs_grad = epochs.copy().pick_types(meg='grad')
    epochs_mag = epochs.copy().pick_types(meg='mag')
    epochs_joint = epochs.copy().pick_types(meg=True)

    # Make CSDs
    csd = csd_morlet(epochs, [config.signal_freq, config.signal_freq2],
                     tmin=0,
                     tmax=1,
                     decim=5)
    noise_csd = csd_morlet(epochs, [config.signal_freq, config.signal_freq2],
                           tmin=-1,
                           tmax=0,
                           decim=5)

    ###############################################################################
    # Compute DICS beamformer results
    ###############################################################################

    # Speed things up by restricting the forward solution to only the two
    # relevant source points.
    src_sel = np.sort(np.array([config.vertex, nb_vertex]))
    fwd = _restrict_forward_to_src_sel(fwd_disc_man, src_sel)
コード例 #32
0
                    picks=picks,
                    baseline=(None, 0),
                    preload=True,
                    reject=dict(grad=4000e-13))
evoked = epochs.average()

# Read forward operator
forward = mne.read_forward_solution(fname_fwd)

###############################################################################
# Computing the cross-spectral density matrix at 4 evenly spaced frequencies
# from 6 to 10 Hz. We use a decim value of 20 to speed up the computation in
# this example at the loss of accuracy.
csd = csd_morlet(epochs,
                 tmin=0,
                 tmax=0.5,
                 decim=20,
                 frequencies=np.linspace(6, 10, 4),
                 n_cycles=2.5)  # short signals, must live with few cycles

# Compute DICS spatial filter and estimate source power.
filters = make_dics(epochs.info, forward, csd, reg=0.5)
print(filters)
stc, freqs = apply_dics_csd(csd, filters)

message = 'DICS source power in the 8-12 Hz frequency band'
brain = stc.plot(surface='inflated',
                 hemi='rh',
                 subjects_dir=subjects_dir,
                 time_label=message)
コード例 #33
0
ファイル: test_dics.py プロジェクト: shrahimim/mne-python
def test_tf_dics():
    """Test 5D time-frequency beamforming based on DICS."""
    fwd_free, fwd_surf, fwd_fixed, fwd_vol, label = _load_forward()
    epochs, evoked, _, source_vertno = _simulate_data(fwd_fixed)
    vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
    source_ind = vertices.tolist().index(source_vertno)
    reg = 1  # Lots of regularization for our toy dataset

    tmin = 0
    tmax = 9
    tstep = 4
    win_lengths = [5, 5]
    frequencies = [10, 20]
    freq_bins = [(8, 12), (18, 22)]

    # Compute DICS for two time windows and two frequencies
    for mode in ['fourier', 'multitaper', 'cwt_morlet']:
        stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
                       mode=mode, freq_bins=freq_bins, frequencies=frequencies,
                       decim=10, reg=reg, label=label)

        # Did we find the true source at 20 Hz?
        assert np.argmax(stcs[1].data[:, 0]) == source_ind
        assert np.argmax(stcs[1].data[:, 1]) == source_ind

        # 20 Hz power should decrease over time
        assert stcs[1].data[source_ind, 0] > stcs[1].data[source_ind, 1]

        # 20 Hz power should be more than 10 Hz power at the true source
        assert stcs[1].data[source_ind, 0] > stcs[0].data[source_ind, 0]

    # Manually compute source power and compare with the last tf_dics result.
    source_power = []
    time_windows = [(0, 5), (4, 9)]
    for time_window in time_windows:
        csd = csd_morlet(epochs, frequencies=[frequencies[1]],
                         tmin=time_window[0], tmax=time_window[1], decim=10)
        csd = csd.sum()
        csd._data /= csd.n_fft
        filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label)
        stc_source_power, _ = apply_dics_csd(csd, filters)
        source_power.append(stc_source_power.data)

    # Comparing tf_dics results with dics_source_power results
    assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0)

    # Test using noise csds. We're going to use identity matrices. That way,
    # since we're using unit-noise-gain weight normalization, there should be
    # no effect.
    stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
                   mode='cwt_morlet', frequencies=frequencies, decim=10,
                   reg=reg, label=label, normalize_fwd=False,
                   weight_norm='unit-noise-gain')
    noise_csd = csd.copy()
    inds = np.triu_indices(csd.n_channels)
    # Using [:, :] syntax for in-place broadcasting
    noise_csd._data[:, :] = 2 * np.eye(csd.n_channels)[inds][:, np.newaxis]
    noise_csd.n_fft = 2  # Dividing by n_fft should yield an identity CSD
    noise_csds = [noise_csd, noise_csd]  # Two frequency bins
    stcs_norm = tf_dics(epochs, fwd_surf, noise_csds, tmin, tmax, tstep,
                        win_lengths, mode='cwt_morlet',
                        frequencies=frequencies, decim=10, reg=reg,
                        label=label, normalize_fwd=False,
                        weight_norm='unit-noise-gain')
    assert_allclose(stcs_norm[0].data, stcs[0].data, atol=0)
    assert_allclose(stcs_norm[1].data, stcs[1].data, atol=0)

    # Test invalid parameter combinations
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths, mode='fourier', freq_bins=None)
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths, mode='cwt_morlet', frequencies=None)

    # Test if incorrect number of noise CSDs is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, [noise_csds[0]], tmin, tmax,
           tstep, win_lengths, freq_bins=freq_bins)

    # Test if freq_bins and win_lengths incompatibility is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths=[0, 1, 2], freq_bins=freq_bins)

    # Test if time step exceeding window lengths is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep=0.15,
           win_lengths=[0.2, 0.1], freq_bins=freq_bins)

    # Test if incorrent number of n_ffts is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths, freq_bins=freq_bins, n_ffts=[1])

    # Test if incorrect number of mt_bandwidths is detected
    raises(ValueError, tf_dics, epochs, fwd_surf, None, tmin, tmax, tstep,
           win_lengths=win_lengths, freq_bins=freq_bins, mode='multitaper',
           mt_bandwidths=[20])

    # Test if subtracting evoked responses yields NaN's, since we only have one
    # epoch. Suppress division warnings.
    with pytest.warns(RuntimeWarning, match='[invalid|empty]'):
        stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
                       mode='cwt_morlet', frequencies=frequencies,
                       subtract_evoked=True, reg=reg, label=label, decim=20)
    assert np.all(np.isnan(stcs[0].data))
コード例 #34
0
ファイル: source_prep_II.py プロジェクト: TinnErlangen/NEMO
                s=meg,
                n1=len(neg['r1']),
                n2=len(neg['r2']),
                n3=len(neg['s1']),
                n4=len(neg['s2']),
                p1=len(pos['r1']),
                p2=len(pos['r2']),
                p3=len(pos['s1']),
                p4=len(pos['s2'])))

        # calculate CSD matrix for each all, base, exp, rest
        ## alternative with multitaper - template:  csd = csd_multitaper(epo,fmin=7,fmax=14,bandwidth=1)

        csd_all_alpha = csd_morlet(epo_all,
                                   frequencies=freqs["alpha"],
                                   n_jobs=8,
                                   n_cycles=cycles["alpha"],
                                   decim=1)
        csd_all_alpha.save("{dir}nc_{meg}-csd_all_alpha.h5".format(dir=meg_dir,
                                                                   meg=meg))
        csd_all_theta = csd_morlet(epo_all,
                                   frequencies=freqs["theta"],
                                   n_jobs=8,
                                   n_cycles=cycles["theta"],
                                   decim=1)
        csd_all_theta.save("{dir}nc_{meg}-csd_all_theta.h5".format(dir=meg_dir,
                                                                   meg=meg))
        csd_all_beta_low = csd_morlet(epo_all,
                                      frequencies=freqs["beta_low"],
                                      n_jobs=8,
                                      n_cycles=cycles["beta_low"],