Esempio n. 1
0
def test_resample():
    """Test resample (with I/O and multiple files)
    """
    raw = Raw(fif_fname, preload=True).crop(0, 3, False)
    raw_resamp = raw.copy()
    sfreq = raw.info['sfreq']
    # test parallel on upsample
    raw_resamp.resample(sfreq * 2, n_jobs=2)
    assert_true(raw_resamp.n_times == len(raw_resamp._times))
    raw_resamp.save(op.join(tempdir, 'raw_resamp.fif'))
    raw_resamp = Raw(op.join(tempdir, 'raw_resamp.fif'), preload=True)
    assert_true(sfreq == raw_resamp.info['sfreq'] / 2)
    assert_true(raw.n_times == raw_resamp.n_times / 2)
    assert_true(raw_resamp._data.shape[1] == raw_resamp.n_times)
    assert_true(raw._data.shape[0] == raw_resamp._data.shape[0])
    # test non-parallel on downsample
    raw_resamp.resample(sfreq, n_jobs=1)
    assert_true(raw_resamp.info['sfreq'] == sfreq)
    assert_true(raw._data.shape == raw_resamp._data.shape)
    assert_true(raw.first_samp == raw_resamp.first_samp)
    assert_true(raw.last_samp == raw.last_samp)
    # upsampling then downsampling doubles resampling error, but this still
    # works (hooray). Note that the stim channels had to be sub-sampled
    # without filtering to be accurately preserved
    # note we have to treat MEG and EEG+STIM channels differently (tols)
    assert_allclose(raw._data[:306, 200:-200],
                    raw_resamp._data[:306, 200:-200],
                    rtol=1e-2,
                    atol=1e-12)
    assert_allclose(raw._data[306:, 200:-200],
                    raw_resamp._data[306:, 200:-200],
                    rtol=1e-2,
                    atol=1e-7)

    # now check multiple file support w/resampling, as order of operations
    # (concat, resample) should not affect our data
    raw1 = raw.copy()
    raw2 = raw.copy()
    raw3 = raw.copy()
    raw4 = raw.copy()
    raw1 = concatenate_raws([raw1, raw2])
    raw1.resample(10)
    raw3.resample(10)
    raw4.resample(10)
    raw3 = concatenate_raws([raw3, raw4])
    assert_array_equal(raw1._data, raw3._data)
    assert_array_equal(raw1._first_samps, raw3._first_samps)
    assert_array_equal(raw1._last_samps, raw3._last_samps)
    assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
    assert_equal(raw1.first_samp, raw3.first_samp)
    assert_equal(raw1.last_samp, raw3.last_samp)
    assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
Esempio n. 2
0
def test_resample():
    """Test resample (with I/O and multiple files)
    """
    raw = Raw(fif_fname, preload=True).crop(0, 3, False)
    raw_resamp = raw.copy()
    sfreq = raw.info['sfreq']
    # test parallel on upsample
    raw_resamp.resample(sfreq * 2, n_jobs=2)
    assert_true(raw_resamp.n_times == len(raw_resamp._times))
    raw_resamp.save(op.join(tempdir, 'raw_resamp.fif'))
    raw_resamp = Raw(op.join(tempdir, 'raw_resamp.fif'), preload=True)
    assert_true(sfreq == raw_resamp.info['sfreq'] / 2)
    assert_true(raw.n_times == raw_resamp.n_times / 2)
    assert_true(raw_resamp._data.shape[1] == raw_resamp.n_times)
    assert_true(raw._data.shape[0] == raw_resamp._data.shape[0])
    # test non-parallel on downsample
    raw_resamp.resample(sfreq, n_jobs=1)
    assert_true(raw_resamp.info['sfreq'] == sfreq)
    assert_true(raw._data.shape == raw_resamp._data.shape)
    assert_true(raw.first_samp == raw_resamp.first_samp)
    assert_true(raw.last_samp == raw.last_samp)
    # upsampling then downsampling doubles resampling error, but this still
    # works (hooray). Note that the stim channels had to be sub-sampled
    # without filtering to be accurately preserved
    # note we have to treat MEG and EEG+STIM channels differently (tols)
    assert_allclose(raw._data[:306, 200:-200],
                    raw_resamp._data[:306, 200:-200],
                    rtol=1e-2, atol=1e-12)
    assert_allclose(raw._data[306:, 200:-200],
                    raw_resamp._data[306:, 200:-200],
                    rtol=1e-2, atol=1e-7)

    # now check multiple file support w/resampling, as order of operations
    # (concat, resample) should not affect our data
    raw1 = raw.copy()
    raw2 = raw.copy()
    raw3 = raw.copy()
    raw4 = raw.copy()
    raw1 = concatenate_raws([raw1, raw2])
    raw1.resample(10)
    raw3.resample(10)
    raw4.resample(10)
    raw3 = concatenate_raws([raw3, raw4])
    assert_array_equal(raw1._data, raw3._data)
    assert_array_equal(raw1._first_samps, raw3._first_samps)
    assert_array_equal(raw1._last_samps, raw3._last_samps)
    assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
    assert_equal(raw1.first_samp, raw3.first_samp)
    assert_equal(raw1.last_samp, raw3.last_samp)
    assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
Esempio n. 3
0
def test_raw_copy():
    """ Test Raw copy"""
    raw = Raw(fif_fname, preload=True)
    data, _ = raw[:, :]
    copied = raw.copy()
    copied_data, _ = copied[:, :]
    assert_array_equal(data, copied_data)
    assert_equal(sorted(raw.__dict__.keys()), sorted(copied.__dict__.keys()))

    raw = Raw(fif_fname, preload=False)
    data, _ = raw[:, :]
    copied = raw.copy()
    copied_data, _ = copied[:, :]
    assert_array_equal(data, copied_data)
    assert_equal(sorted(raw.__dict__.keys()), sorted(copied.__dict__.keys()))
Esempio n. 4
0
def test_raw_copy():
    """ Test Raw copy"""
    raw = Raw(fif_fname, preload=True)
    data, _ = raw[:, :]
    copied = raw.copy()
    copied_data, _ = copied[:, :]
    assert_array_equal(data, copied_data)
    assert_equal(sorted(raw.__dict__.keys()), sorted(copied.__dict__.keys()))

    raw = Raw(fif_fname, preload=False)
    data, _ = raw[:, :]
    copied = raw.copy()
    copied_data, _ = copied[:, :]
    assert_array_equal(data, copied_data)
    assert_equal(sorted(raw.__dict__.keys()), sorted(copied.__dict__.keys()))
Esempio n. 5
0
def test_io_complex():
    """Test IO with complex data types
    """
    dtypes = [np.complex64, np.complex128]

    raw = Raw(fif_fname, preload=True)
    picks = np.arange(5)
    start, stop = raw.time_as_index([0, 5])

    data_orig, _ = raw[picks, start:stop]

    for di, dtype in enumerate(dtypes):
        imag_rand = np.array(1j * np.random.randn(data_orig.shape[0],
                             data_orig.shape[1]), dtype)

        raw_cp = raw.copy()
        raw_cp._data = np.array(raw_cp._data, dtype)
        raw_cp._data[picks, start:stop] += imag_rand
        # this should throw an error because it's complex
        with warnings.catch_warnings(record=True) as w:
            raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5)
            # warning only gets thrown on first instance
            assert_equal(len(w), 1 if di == 0 else 0)

        raw2 = Raw(op.join(tempdir, 'raw.fif'))
        raw2_data, _ = raw2[picks, :]
        n_samp = raw2_data.shape[1]
        assert_array_almost_equal(raw2_data[:, :n_samp],
                                  raw_cp._data[picks, :n_samp])
        # with preloading
        raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
        raw2_data, _ = raw2[picks, :]
        n_samp = raw2_data.shape[1]
        assert_array_almost_equal(raw2_data[:, :n_samp],
                                  raw_cp._data[picks, :n_samp])
Esempio n. 6
0
def test_hilbert():
    """ Test computation of analytic signal using hilbert """
    raw = Raw(fif_fname, preload=True)
    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
    picks = picks_meg[:4]

    raw2 = raw.copy()
    raw.apply_hilbert(picks)
    raw2.apply_hilbert(picks, envelope=True, n_jobs=2)

    env = np.abs(raw._data[picks, :])
    assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
Esempio n. 7
0
def test_hilbert():
    """ Test computation of analytic signal using hilbert """
    raw = Raw(fif_fname, preload=True)
    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
    picks = picks_meg[:4]

    raw2 = raw.copy()
    raw.apply_hilbert(picks)
    raw2.apply_hilbert(picks, envelope=True, n_jobs=2)

    env = np.abs(raw._data[picks, :])
    assert_array_almost_equal(env, raw2._data[picks, :])
Esempio n. 8
0
def test_hilbert():
    """ Test computation of analytic signal using hilbert """
    raw = Raw(fif_fname, preload=True)
    picks_meg = pick_types(raw.info, meg=True, exclude="bads")
    picks = picks_meg[:4]

    raw2 = raw.copy()
    raw.apply_hilbert(picks)
    raw2.apply_hilbert(picks, envelope=True, n_jobs=2)

    env = np.abs(raw._data[picks, :])
    assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
Esempio n. 9
0
def test_equalize_channels():
    """Test equalization of channels
    """
    raw1 = Raw(fif_fname)

    raw2 = raw1.copy()
    ch_names = raw1.ch_names[2:]
    raw1.drop_channels(raw1.ch_names[:1])
    raw2.drop_channels(raw2.ch_names[1:2])
    my_comparison = [raw1, raw2]
    equalize_channels(my_comparison)
    for e in my_comparison:
        assert_equal(ch_names, e.ch_names)
Esempio n. 10
0
def test_equalize_channels():
    """Test equalization of channels
    """
    raw1 = Raw(fif_fname)

    raw2 = raw1.copy()
    ch_names = raw1.ch_names[2:]
    raw1.drop_channels(raw1.ch_names[:1])
    raw2.drop_channels(raw2.ch_names[1:2])
    my_comparison = [raw1, raw2]
    equalize_channels(my_comparison)
    for e in my_comparison:
        assert_equal(ch_names, e.ch_names)
Esempio n. 11
0
def test_crop():
    """Test cropping raw files
    """
    # split a concatenated file to test a difficult case
    raw = Raw([fif_fname, fif_fname], preload=True)
    split_size = 10.  # in seconds
    sfreq = raw.info['sfreq']
    nsamp = (raw.last_samp - raw.first_samp + 1)

    # do an annoying case (off-by-one splitting)
    tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
    tmins = np.sort(tmins)
    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
    tmaxs /= sfreq
    tmins /= sfreq
    raws = [None] * len(tmins)
    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
        raws[ri] = raw.crop(tmin, tmax, True)
    all_raw_2 = concatenate_raws(raws, preload=True)
    assert_true(raw.first_samp == all_raw_2.first_samp)
    assert_true(raw.last_samp == all_raw_2.last_samp)
    assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])

    tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
    tmaxs /= sfreq
    tmins /= sfreq

    # going in revere order so the last fname is the first file (need it later)
    raws = [None] * len(tmins)
    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
        raws[ri] = raw.copy()
        raws[ri].crop(tmin, tmax, False)
    # test concatenation of split file
    all_raw_1 = concatenate_raws(raws, preload=True)

    all_raw_2 = raw.crop(0, None, True)
    for ar in [all_raw_1, all_raw_2]:
        assert_true(raw.first_samp == ar.first_samp)
        assert_true(raw.last_samp == ar.last_samp)
        assert_array_equal(raw[:, :][0], ar[:, :][0])
Esempio n. 12
0
def test_crop():
    """Test cropping raw files
    """
    # split a concatenated file to test a difficult case
    raw = Raw([fif_fname, fif_fname], preload=True)
    split_size = 10.  # in seconds
    sfreq = raw.info['sfreq']
    nsamp = (raw.last_samp - raw.first_samp + 1)

    # do an annoying case (off-by-one splitting)
    tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
    tmins = np.sort(tmins)
    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
    tmaxs /= sfreq
    tmins /= sfreq
    raws = [None] * len(tmins)
    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
        raws[ri] = raw.crop(tmin, tmax, True)
    all_raw_2 = concatenate_raws(raws, preload=True)
    assert_true(raw.first_samp == all_raw_2.first_samp)
    assert_true(raw.last_samp == all_raw_2.last_samp)
    assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])

    tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
    tmaxs /= sfreq
    tmins /= sfreq

    # going in revere order so the last fname is the first file (need it later)
    raws = [None] * len(tmins)
    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
        raws[ri] = raw.copy()
        raws[ri].crop(tmin, tmax, False)
    # test concatenation of split file
    all_raw_1 = concatenate_raws(raws, preload=True)

    all_raw_2 = raw.crop(0, None, True)
    for ar in [all_raw_1, all_raw_2]:
        assert_true(raw.first_samp == ar.first_samp)
        assert_true(raw.last_samp == ar.last_samp)
        assert_array_equal(raw[:, :][0], ar[:, :][0])
Esempio n. 13
0
def test_io_complex():
    """Test IO with complex data types
    """
    dtypes = [np.complex64, np.complex128]

    raw = Raw(fif_fname, preload=True)
    picks = np.arange(5)
    start, stop = raw.time_as_index([0, 5])

    data_orig, _ = raw[picks, start:stop]

    for di, dtype in enumerate(dtypes):
        imag_rand = np.array(
            1j * np.random.randn(data_orig.shape[0], data_orig.shape[1]),
            dtype)

        raw_cp = raw.copy()
        raw_cp._data = np.array(raw_cp._data, dtype)
        raw_cp._data[picks, start:stop] += imag_rand
        # this should throw an error because it's complex
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')
            raw_cp.save(op.join(tempdir, 'raw.fif'),
                        picks,
                        tmin=0,
                        tmax=5,
                        overwrite=True)
            # warning gets thrown on every instance b/c simplifilter('always')
            assert_equal(len(w), 1)

        raw2 = Raw(op.join(tempdir, 'raw.fif'))
        raw2_data, _ = raw2[picks, :]
        n_samp = raw2_data.shape[1]
        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
        # with preloading
        raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
        raw2_data, _ = raw2[picks, :]
        n_samp = raw2_data.shape[1]
        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
Esempio n. 14
0
# Setting the noise covariance and whitened data covariance regularization
# parameters
noise_reg = 0.03
data_reg = 0.001

# Subtract evoked response prior to computation?
subtract_evoked = False

# Calculating covariance from empty room noise. To use baseline data as noise
# substitute raw for raw_noise, epochs for epochs_noise, and 0 for tmax.
# Note, if using baseline data, the averaged evoked response in the baseline
# epoch should be flat.
noise_covs = []
for (l_freq, h_freq) in freq_bins:
    raw_band = raw_noise.copy()
    raw_band.filter(l_freq, h_freq, picks=epochs.picks, method='iir', n_jobs=1)
    epochs_band = mne.Epochs(raw_band,
                             epochs_noise.events,
                             event_id,
                             tmin=tmin,
                             tmax=tmax,
                             picks=epochs.picks,
                             proj=True)

    noise_cov = compute_covariance(epochs_band)
    noise_cov = mne.cov.regularize(noise_cov,
                                   epochs_band.info,
                                   mag=noise_reg,
                                   grad=noise_reg,
                                   eeg=noise_reg,
Esempio n. 15
0
def test_filter():
    """ Test filtering (FIR and IIR) and Raw.apply_function interface """
    raw = Raw(fif_fname, preload=True).crop(0, 10, False)
    sig_dec = 11
    sig_dec_notch = 12
    sig_dec_notch_fit = 12
    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
    picks = picks_meg[:4]

    raw_lp = raw.copy()
    raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)

    raw_hp = raw.copy()
    raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)

    raw_bp = raw.copy()
    raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)

    raw_bs = raw.copy()
    raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)

    data, _ = raw[picks, :]

    lp_data, _ = raw_lp[picks, :]
    hp_data, _ = raw_hp[picks, :]
    bp_data, _ = raw_bp[picks, :]
    bs_data, _ = raw_bs[picks, :]

    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)

    raw_lp_iir = raw.copy()
    raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
    raw_hp_iir = raw.copy()
    raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
    raw_bp_iir = raw.copy()
    raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
    lp_data_iir, _ = raw_lp_iir[picks, :]
    hp_data_iir, _ = raw_hp_iir[picks, :]
    bp_data_iir, _ = raw_bp_iir[picks, :]
    summation = lp_data_iir + hp_data_iir + bp_data_iir
    assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
                              sig_dec)

    # make sure we didn't touch other channels
    data, _ = raw[picks_meg[4:], :]
    bp_data, _ = raw_bp[picks_meg[4:], :]
    assert_array_equal(data, bp_data)
    bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
    assert_array_equal(data, bp_data_iir)

    # do a very simple check on line filtering
    raw_bs = raw.copy()
    with warnings.catch_warnings(True) as w:
        raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
        data_bs, _ = raw_bs[picks, :]
        raw_notch = raw.copy()
        raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
    data_notch, _ = raw_notch[picks, :]
    assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)

    # now use the sinusoidal fitting
    raw_notch = raw.copy()
    raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
    data_notch, _ = raw_notch[picks, :]
    data, _ = raw[picks, :]
    assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
Esempio n. 16
0
def test_filter():
    """ Test filtering (FIR and IIR) and Raw.apply_function interface """
    raw = Raw(fif_fname, preload=True).crop(0, 10, False)
    sig_dec = 11
    sig_dec_notch = 12
    sig_dec_notch_fit = 12
    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
    picks = picks_meg[:4]

    raw_lp = raw.copy()
    raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)

    raw_hp = raw.copy()
    raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)

    raw_bp = raw.copy()
    raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)

    raw_bs = raw.copy()
    raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)

    data, _ = raw[picks, :]

    lp_data, _ = raw_lp[picks, :]
    hp_data, _ = raw_hp[picks, :]
    bp_data, _ = raw_bp[picks, :]
    bs_data, _ = raw_bs[picks, :]

    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)

    raw_lp_iir = raw.copy()
    raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
    raw_hp_iir = raw.copy()
    raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
    raw_bp_iir = raw.copy()
    raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
    lp_data_iir, _ = raw_lp_iir[picks, :]
    hp_data_iir, _ = raw_hp_iir[picks, :]
    bp_data_iir, _ = raw_bp_iir[picks, :]
    summation = lp_data_iir + hp_data_iir + bp_data_iir
    assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
                              sig_dec)

    # make sure we didn't touch other channels
    data, _ = raw[picks_meg[4:], :]
    bp_data, _ = raw_bp[picks_meg[4:], :]
    assert_array_equal(data, bp_data)
    bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
    assert_array_equal(data, bp_data_iir)

    # do a very simple check on line filtering
    raw_bs = raw.copy()
    with warnings.catch_warnings(True) as w:
        raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
        data_bs, _ = raw_bs[picks, :]
        raw_notch = raw.copy()
        raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
    data_notch, _ = raw_notch[picks, :]
    assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)

    # now use the sinusoidal fitting
    raw_notch = raw.copy()
    raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
    data_notch, _ = raw_notch[picks, :]
    data, _ = raw[picks, :]
    assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
Esempio n. 17
0
# Setting the noise covariance and whitened data covariance regularization
# parameters
noise_reg = 0.03
data_reg = 0.001

# Subtract evoked response prior to computation?
subtract_evoked = False

# Calculating covariance from empty room noise. To use baseline data as noise
# substitute raw for raw_noise, epochs for epochs_noise, and 0 for tmax.
# Note, if using baseline data, the averaged evoked response in the baseline 
# epoch should be flat.
noise_covs = []
for (l_freq, h_freq) in freq_bins:
    raw_band = raw_noise.copy()
    raw_band.filter(l_freq, h_freq, picks=epochs.picks, method='iir', n_jobs=1)
    epochs_band = mne.Epochs(raw_band, epochs_noise.events, event_id,
                             tmin=tmin, tmax=tmax, picks=epochs.picks,
                             proj=True)
                             
    noise_cov = compute_covariance(epochs_band)
    noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=noise_reg,
                                   grad=noise_reg, eeg=noise_reg, proj=True)
    noise_covs.append(noise_cov)
    del raw_band  # to save memory

# Computing LCMV solutions for time-frequency windows in a label in source
# space for faster computation, use label=None for full solution
stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
               freq_bins=freq_bins, subtract_evoked=subtract_evoked,