Example #1
0
    def transform(self, epochs):
        from mne import EpochsArray
        from mne.time_frequency import single_trial_power
        sfreq = epochs.info['sfreq']

        # Time Frequency decomposition
        tfr = single_trial_power(epochs._data, sfreq=sfreq, **self.tfr_kwargs)

        # Consider frequencies as if it was different time points
        n_trial, n_chan, n_freq, n_time = tfr.shape
        tfr = np.reshape(tfr, [n_trial, n_chan, n_freq * n_time])

        # Make pseudo epochs
        sfreq = epochs.info['sfreq']
        decim = self.tfr_kwargs.get('decim', None)
        if isinstance(decim, slice):
            decim = decim.step

        if decim is not None and decim > 1:
            sfreq /= decim
        info = epochs.info.copy()
        info['sfreq'] = sfreq
        self._tfr_epochs = EpochsArray(data=tfr,
                                       info=info,
                                       events=epochs.events)
Example #2
0
    def transform(self, epochs):
        from mne import EpochsArray
        from mne.time_frequency import single_trial_power
        sfreq = epochs.info['sfreq']

        # Time Frequency decomposition
        tfr = single_trial_power(epochs._data, sfreq=sfreq,
                                 **self.tfr_kwargs)

        # Consider frequencies as if it was different time points
        n_trial, n_chan, n_freq, n_time = tfr.shape
        tfr = np.reshape(tfr, [n_trial, n_chan, n_freq * n_time])

        # Make pseudo epochs
        sfreq = epochs.info['sfreq']
        decim = self.tfr_kwargs.get('decim', None)
        if isinstance(decim, slice):
            decim = decim.step

        if decim is not None and decim > 1:
            sfreq /= decim
        info = epochs.info.copy()
        info['sfreq'] = sfreq
        self._tfr_epochs = EpochsArray(data=tfr, info=info,
                                       events=epochs.events)
Example #3
0
def test_time_frequency():
    """Test time frequency transform (PSD and phase lock)
    """
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.5

    # Setup for reading the raw data
    raw = io.Raw(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                            stim=False, include=include, exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    times = epochs.times

    frequencies = np.arange(6, 20, 5)  # define frequencies of interest
    Fs = raw.info['sfreq']  # sampling in Hz
    n_cycles = frequencies / float(4)
    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
                                      n_cycles=n_cycles, use_fft=True)

    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
    assert_true(power.shape == phase_lock.shape)
    assert_true(np.sum(phase_lock >= 1) == 0)
    assert_true(np.sum(phase_lock <= 0) == 0)

    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
                                      n_cycles=2, use_fft=False)

    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
    assert_true(power.shape == phase_lock.shape)
    assert_true(np.sum(phase_lock >= 1) == 0)
    assert_true(np.sum(phase_lock <= 0) == 0)

    tfr = cwt_morlet(data[0], Fs, frequencies, use_fft=True, n_cycles=2)
    assert_true(tfr.shape == (len(picks), len(frequencies), len(times)))

    single_power = single_trial_power(data, Fs, frequencies, use_fft=False,
                                      n_cycles=2)

    assert_array_almost_equal(np.mean(single_power), power)
Example #4
0
def test_time_frequency():
    """Test time frequency transform (PSD and phase lock)
    """
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.5

    # Setup for reading the raw data
    raw = fiff.Raw(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = fiff.pick_types(raw.info, meg='grad', eeg=False,
                            stim=False, include=include, exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                        baseline=(None, 0))
    data = epochs.get_data()
    times = epochs.times

    frequencies = np.arange(6, 20, 5)  # define frequencies of interest
    Fs = raw.info['sfreq']  # sampling in Hz
    n_cycles = frequencies / float(4)
    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
                                      n_cycles=n_cycles, use_fft=True)

    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
    assert_true(power.shape == phase_lock.shape)
    assert_true(np.sum(phase_lock >= 1) == 0)
    assert_true(np.sum(phase_lock <= 0) == 0)

    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
                                       n_cycles=2, use_fft=False)

    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
    assert_true(power.shape == phase_lock.shape)
    assert_true(np.sum(phase_lock >= 1) == 0)
    assert_true(np.sum(phase_lock <= 0) == 0)

    tfr = cwt_morlet(data[0], Fs, frequencies, use_fft=True, n_cycles=2)
    assert_true(tfr.shape == (len(picks), len(frequencies), len(times)))

    single_power = single_trial_power(data, Fs, frequencies, use_fft=False,
                                      n_cycles=2)

    assert_array_almost_equal(np.mean(single_power), power)
Example #5
0
    def transform(self, epochs):
        from mne import EpochsArray
        from mne.time_frequency import single_trial_power
        # Time Frequency decomposition
        tfr = single_trial_power(epochs._data, sfreq=epochs.info['sfreq'],
                                 **self.tfr_kwargs)

        # Consider frequencies as if it was different time points
        n_trial, n_chan, n_freq, n_time = tfr.shape
        tfr = np.reshape(tfr, [n_trial, n_chan, n_freq * n_time])

        # Make pseudo epochs
        sfreq = epochs.info['sfreq']
        if 'decim' in self.tfr_kwargs.keys():
            sfreq /= self.tfr_kwargs['decim']
        info = epochs.info.copy()
        info['sfreq'] = sfreq
        self._tfr_epochs = EpochsArray(data=tfr, info=info,
                                       events=epochs.events)
# Time vector
times = 1e3 * epochs.times  # change unit to ms

# Factor to downs-sample the temporal dimension of the PSD computed by
# single_trial_power.
decim = 2
frequencies = np.arange(7, 30, 3)  # define frequencies of interest
sfreq = raw.info['sfreq']  # sampling in Hz
n_cycles = frequencies / frequencies[0]
baseline_mask = times[::decim] < 0

# now create TFR representations for all conditions
epochs_power = []
for condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:
    this_power = single_trial_power(condition, sfreq=sfreq,
                                    frequencies=frequencies, n_cycles=n_cycles,
                                    decim=decim)
    this_power = this_power[:, 0, :, :]  # we only have one channel.
    # Compute ratio with baseline power (be sure to correct time vector with
    # decimation factor)
    epochs_baseline = np.mean(this_power[:, :, baseline_mask], axis=2)
    this_power /= epochs_baseline[..., np.newaxis]
    epochs_power.append(this_power)

###############################################################################
# Setup repeated measures ANOVA

n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] / n_conditions
# we will tell the ANOVA how to interpret the data matrix in terms of
# factors. This done via the factor levels argument which is a list
ch_name = raw.info['ch_names'][picks[0]]
times = 1e3 * epochs_condition_1.times  # change unit to ms

# Factor to downsample the temporal dimension of the PSD computed by
# single_trial_power.  Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
frequencies = np.arange(7, 30, 3)  # define frequencies of interest
sfreq = raw.info['sfreq']  # sampling in Hz
n_cycles = 1.5

epochs_power_1 = single_trial_power(data_condition_1,
                                    sfreq=sfreq,
                                    frequencies=frequencies,
                                    n_cycles=n_cycles,
                                    decim=decim)

epochs_power_2 = single_trial_power(data_condition_2,
                                    sfreq=sfreq,
                                    frequencies=frequencies,
                                    n_cycles=n_cycles,
                                    decim=decim)

epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix

# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
baseline_mask = times[::decim] < 0
Example #8
0
def test_time_frequency():
    """Test time frequency transform (PSD and phase lock)
    """
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.5

    # Setup for reading the raw data
    raw = io.Raw(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info,
                       meg='grad',
                       eeg=False,
                       stim=False,
                       include=include,
                       exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    epochs_nopicks = Epochs(raw,
                            events,
                            event_id,
                            tmin,
                            tmax,
                            baseline=(None, 0))

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0],
                            freqs=freqs,
                            n_cycles=n_cycles,
                            use_fft=True,
                            return_itc=True)
    # Now compute evoked
    evoked = epochs.average()
    power_evoked = tfr_morlet(evoked,
                              freqs,
                              n_cycles,
                              use_fft=True,
                              return_itc=False)
    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
    power, itc = tfr_morlet(epochs,
                            freqs=freqs,
                            n_cycles=n_cycles,
                            use_fft=True,
                            return_itc=True)
    # Test picks argument
    power_picks, itc_picks = tfr_morlet(epochs_nopicks,
                                        freqs=freqs,
                                        n_cycles=n_cycles,
                                        use_fft=True,
                                        return_itc=True,
                                        picks=picks)
    # the actual data arrays here are equivalent, too...
    assert_array_almost_equal(power.data, power_picks.data)
    assert_array_almost_equal(itc.data, itc_picks.data)
    assert_array_almost_equal(power.data, power_evoked.data)

    print(itc)  # test repr
    print(itc.ch_names)  # test property
    itc += power  # test add
    itc -= power  # test add

    power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert_true('meg' in power)
    assert_true('grad' in power)
    assert_false('mag' in power)
    assert_false('eeg' in power)

    assert_equal(power.nave, nave)
    assert_equal(itc.nave, nave)
    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    # grand average
    itc2 = itc.copy()
    itc2.info['bads'] = [itc2.ch_names[0]]  # test channel drop
    gave = grand_average([itc2, itc])
    assert_equal(
        gave.data.shape,
        (itc2.data.shape[0] - 1, itc2.data.shape[1], itc2.data.shape[2]))
    assert_equal(itc2.ch_names[1:], gave.ch_names)
    assert_equal(gave.nave, 2)
    itc2.drop_channels(itc2.info["bads"])
    assert_array_almost_equal(gave.data, itc2.data)
    itc2.data = np.ones(itc2.data.shape)
    itc.data = np.zeros(itc.data.shape)
    itc2.nave = 2
    itc.nave = 1
    itc.drop_channels([itc.ch_names[0]])
    combined_itc = combine_tfr([itc2, itc])
    assert_array_almost_equal(combined_itc.data,
                              np.ones(combined_itc.data.shape) * 2 / 3)

    # more tests
    power, itc = tfr_morlet(epochs,
                            freqs=freqs,
                            n_cycles=2,
                            use_fft=False,
                            return_itc=True)

    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    Fs = raw.info['sfreq']  # sampling in Hz
    tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))

    single_power = single_trial_power(data,
                                      Fs,
                                      freqs,
                                      use_fft=False,
                                      n_cycles=2)

    assert_array_almost_equal(np.mean(single_power), power.data)

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)
# Time vector
times = 1e3 * epochs.times  # change unit to ms

# Factor to downs-sample the temporal dimension of the PSD computed by
# single_trial_power.
decim = 2
frequencies = np.arange(7, 30, 3)  # define frequencies of interest
Fs = raw.info['sfreq']  # sampling in Hz
n_cycles = frequencies / frequencies[0]
baseline_mask = times[::decim] < 0

# now create TFR representations for all conditions
epochs_power = []
for condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:
    this_power = single_trial_power(condition, Fs=Fs, frequencies=frequencies,
                                    n_cycles=n_cycles, use_fft=False,
                                    decim=decim)
    this_power = this_power[:, 0, :, :]  # we only have one channel.
    # Compute ratio with baseline power (be sure to correct time vector with
    # decimation factor)
    epochs_baseline = np.mean(this_power[:, :, baseline_mask], axis=2)
    this_power /= epochs_baseline[..., np.newaxis]
    epochs_power.append(this_power)

###############################################################################
# Setup repeated measures ANOVA

n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] / n_conditions
# we will tell the ANOVA how to interpret the data matrix in terms of
# factors. This done via the factor levels argument which is a list
Example #10
0
def test_compute_tfr():
    """Test _compute_tfr function"""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_fname)

    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                       stim=False, include=[], exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    sfreq = epochs.info['sfreq']
    freqs = np.arange(10, 20, 3).astype(float)

    # Check all combination of options
    for method, use_fft, zero_mean, output in product(
        ('multitaper', 'morlet'), (False, True), (False, True),
        ('complex', 'power', 'phase',
         'avg_power_itc', 'avg_power', 'itc')):
        # Check exception
        if (method == 'multitaper') and (output == 'phase'):
            assert_raises(NotImplementedError, _compute_tfr, data, freqs,
                          sfreq, method=method, output=output)
            continue

        # Check runs
        out = _compute_tfr(data, freqs, sfreq, method=method,
                           use_fft=use_fft, zero_mean=zero_mean,
                           n_cycles=2., output=output)
        # Check shapes
        shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
        if ('avg' in output) or ('itc' in output):
            assert_array_equal(shape[1:], out.shape)
        else:
            assert_array_equal(shape, out.shape)

        # Check types
        if output in ('complex', 'avg_power_itc'):
            assert_equal(np.complex, out.dtype)
        else:
            assert_equal(np.float, out.dtype)
        assert_true(np.all(np.isfinite(out)))

    # Check that functions are equivalent to
    # i) single_trial_power: X, shape (n_signals, n_chans, n_times)
    old_power = single_trial_power(data, sfreq, freqs, n_cycles=2.)
    new_power = _compute_tfr(data, freqs, sfreq, n_cycles=2.,
                             method='morlet', output='power')
    assert_array_almost_equal(old_power, new_power)
    old_power = single_trial_power(data, sfreq, freqs, n_cycles=2.,
                                   times=epochs.times, baseline=(-.100, 0),
                                   baseline_mode='ratio')
    new_power = rescale(new_power, epochs.times, (-.100, 0), 'ratio')

    # ii) cwt_morlet: X, shape (n_signals, n_times)
    old_complex = cwt_morlet(data[0], sfreq, freqs, n_cycles=2.)
    new_complex = _compute_tfr(data[[0]], freqs, sfreq, n_cycles=2.,
                               method='morlet', output='complex')
    assert_array_almost_equal(old_complex, new_complex[0])

    # Check errors params
    for _data in (None, 'foo', data[0]):
        assert_raises(ValueError, _compute_tfr, _data, freqs, sfreq)
    for _freqs in (None, 'foo', [[0]]):
        assert_raises(ValueError, _compute_tfr, data, _freqs, sfreq)
    for _sfreq in (None, 'foo'):
        assert_raises(ValueError, _compute_tfr, data, freqs, _sfreq)
    for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
        for value in (None, 'foo'):
            kwargs = {key: value}  # FIXME pep8
            assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
                          **kwargs)

    # No time_bandwidth param in morlet
    assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
                  method='morlet', time_bandwidth=1)
    # No phase in multitaper XXX Check ?
    assert_raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
                  method='multitaper', output='phase')

    # Inter-trial coherence tests
    out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
    assert_true(np.sum(out >= 1) == 0)
    assert_true(np.sum(out <= 0) == 0)

    # Check decim shapes
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
        _decim = slice(None, None, decim) if isinstance(decim, int) else decim
        n_time = len(np.arange(data.shape[2])[_decim])
        shape = np.r_[data.shape[:2], len(freqs), n_time]
        for method in ('multitaper', 'morlet'):
            # Single trials
            out = _compute_tfr(data, freqs, sfreq, method=method,
                               decim=decim, n_cycles=2.)
            assert_array_equal(shape, out.shape)
            # Averages
            out = _compute_tfr(data, freqs, sfreq, method=method,
                               decim=decim, output='avg_power',
                               n_cycles=2.)
            assert_array_equal(shape[1:], out.shape)
Example #11
0
def test_time_frequency():
    """Test the to-be-deprecated time frequency transform (PSD and ITC)"""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                       stim=False, include=include, exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax,
                            baseline=(None, 0))

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    # Now compute evoked
    evoked = epochs.average()
    power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
                              return_itc=False)
    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                              use_fft=True, return_itc=True, decim=slice(0, 2))
    # Test picks argument and average parameter
    assert_raises(ValueError, tfr_morlet, epochs, freqs=freqs,
                  n_cycles=n_cycles, return_itc=True, average=False)

    power_picks, itc_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=True, picks=picks, average=True)

    epochs_power_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=False, picks=picks, average=False)
    power_picks_avg = epochs_power_picks.average()
    # the actual data arrays here are equivalent, too...
    assert_array_almost_equal(power.data, power_picks.data)
    assert_array_almost_equal(power.data, power_picks_avg.data)
    assert_array_almost_equal(itc.data, itc_picks.data)
    assert_array_almost_equal(power.data, power_evoked.data)

    print(itc)  # test repr
    print(itc.ch_names)  # test property
    itc += power  # test add
    itc -= power  # test add

    power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert_true('meg' in power)
    assert_true('grad' in power)
    assert_false('mag' in power)
    assert_false('eeg' in power)

    assert_equal(power.nave, nave)
    assert_equal(itc.nave, nave)
    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(power_.data.shape == (len(picks), len(freqs), 2))
    assert_true(power_.data.shape == itc_.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    # grand average
    itc2 = itc.copy()
    itc2.info['bads'] = [itc2.ch_names[0]]  # test channel drop
    gave = grand_average([itc2, itc])
    assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
                                   itc2.data.shape[1],
                                   itc2.data.shape[2]))
    assert_equal(itc2.ch_names[1:], gave.ch_names)
    assert_equal(gave.nave, 2)
    itc2.drop_channels(itc2.info["bads"])
    assert_array_almost_equal(gave.data, itc2.data)
    itc2.data = np.ones(itc2.data.shape)
    itc.data = np.zeros(itc.data.shape)
    itc2.nave = 2
    itc.nave = 1
    itc.drop_channels([itc.ch_names[0]])
    combined_itc = combine_tfr([itc2, itc])
    assert_array_almost_equal(combined_itc.data,
                              np.ones(combined_itc.data.shape) * 2 / 3)

    # more tests
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
                            return_itc=True)

    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    Fs = raw.info['sfreq']  # sampling in Hz
    tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
    tfr2 = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2,
                      decim=slice(0, 2))
    assert_true(tfr2.shape == (len(picks), len(freqs), 2))

    single_power = single_trial_power(data, Fs, freqs, use_fft=False,
                                      n_cycles=2)
    single_power2 = single_trial_power(data, Fs, freqs, use_fft=False,
                                       n_cycles=2, decim=slice(0, 2))
    single_power3 = single_trial_power(data, Fs, freqs, use_fft=False,
                                       n_cycles=2, decim=slice(1, 3))
    single_power4 = single_trial_power(data, Fs, freqs, use_fft=False,
                                       n_cycles=2, decim=slice(2, 4))

    assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
    assert_array_almost_equal(np.mean(single_power2, axis=0),
                              power.data[:, :, :2])
    assert_array_almost_equal(np.mean(single_power3, axis=0),
                              power.data[:, :, 1:3])
    assert_array_almost_equal(np.mean(single_power4, axis=0),
                              power.data[:, :, 2:4])

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)

    # Test decimation:
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in [2, 3, 8, 9]:
        for use_fft in [True, False]:
            power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
                                    use_fft=use_fft, return_itc=True,
                                    decim=decim)
            assert_equal(power.data.shape[2],
                         np.ceil(float(len(times)) / decim))
    freqs = range(50, 55)
    decim = 2
    _, n_chan, n_time = data.shape
    tfr = cwt_morlet(data[0, :, :], sfreq=epochs.info['sfreq'],
                     freqs=freqs, decim=decim)
    assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))

    # Test cwt modes
    Ws = morlet(512, [10, 20], n_cycles=2)
    assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
    for use_fft in [True, False]:
        for mode in ['same', 'valid', 'full']:
            # XXX JRK: full wavelet decomposition needs to be implemented
            if (not use_fft) and mode == 'full':
                assert_raises(ValueError, cwt, data[0, :, :], Ws,
                              use_fft=use_fft, mode=mode)
                continue
            cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode)

    # Test decim parameter checks
    assert_raises(TypeError, single_trial_power, data, Fs, freqs,
                  use_fft=False, n_cycles=2, decim=None)
    assert_raises(TypeError, tfr_morlet, epochs, freqs=freqs,
                  n_cycles=n_cycles, use_fft=True, return_itc=True,
                  decim='decim')
 def transform(self, X):
     return single_trial_power(X, self.sfreq, self.frequencies,
                               n_cycles=self.n_cycles, decim=self.decim,
                               n_jobs=self.n_jobs)
Example #13
0
# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power.
decim = 2
frequencies = np.arange(7, 30, 3)  # define frequencies of interest
sfreq = raw.info['sfreq']  # sampling in Hz
n_cycles = frequencies / frequencies[0]
baseline_mask = times[::decim] < 0

###############################################################################
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:
    this_power = single_trial_power(condition,
                                    sfreq=sfreq,
                                    frequencies=frequencies,
                                    n_cycles=n_cycles,
                                    decim=decim)
    this_power = this_power[:, 0, :, :]  # we only have one channel.
    # Compute ratio with baseline power (be sure to correct time vector with
    # decimation factor)
    epochs_baseline = np.mean(this_power[:, :, baseline_mask], axis=2)
    this_power /= epochs_baseline[..., np.newaxis]
    epochs_power.append(this_power)

###############################################################################
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# evoked_data = np.mean(data, 0)

# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power.  Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
frequencies = np.arange(8, 40, 2)  # define frequencies of interest
sfreq = raw.info["sfreq"]  # sampling in Hz
epochs_power = single_trial_power(
    data,
    sfreq=sfreq,
    frequencies=frequencies,
    n_cycles=4,
    use_fft=False,
    n_jobs=1,
    baseline=(-100, 0),
    times=times,
    baseline_mode="ratio",
    decim=decim,
)

# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
evoked_data = evoked_data[:, time_mask]
times = times[time_mask]

# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]
# data -= evoked_data[None,:,:] # remove evoked component
# evoked_data = np.mean(data, 0)

# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power.  Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
frequencies = np.arange(8, 40, 2)  # define frequencies of interest
Fs = raw.info['sfreq']  # sampling in Hz
epochs_power = single_trial_power(data,
                                  Fs=Fs,
                                  frequencies=frequencies,
                                  n_cycles=4,
                                  use_fft=False,
                                  n_jobs=1,
                                  baseline=(-100, 0),
                                  times=times,
                                  baseline_mode='ratio',
                                  decim=decim)

# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
evoked_data = evoked_data[:, time_mask]
times = times[time_mask]

# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]
Example #16
0
def test_time_frequency():
    """Test time frequency transform (PSD and phase lock)
    """
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.5

    # Setup for reading the raw data
    raw = io.Raw(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                            stim=False, include=include, exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)

    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)

    print(itc)  # test repr
    print(itc.ch_names) # test property
    itc = itc + power # test add
    itc = itc - power # test add
    itc -= power
    itc += power

    power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert_true('meg' in power)
    assert_true('grad' in power)
    assert_false('mag' in power)
    assert_false('eeg' in power)

    assert_equal(power.nave, nave)
    assert_equal(itc.nave, nave)
    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
                            return_itc=True)

    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    Fs = raw.info['sfreq']  # sampling in Hz
    tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))

    single_power = single_trial_power(data, Fs, freqs, use_fft=False,
                                      n_cycles=2)

    assert_array_almost_equal(np.mean(single_power), power.data)

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)
evoked_data = np.mean(data, 0)

# data -= evoked_data[None,:,:] # remove evoked component
# evoked_data = np.mean(data, 0)

# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power.  Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
frequencies = np.arange(8, 40, 2)  # define frequencies of interest
sfreq = raw.info['sfreq']  # sampling in Hz
epochs_power = single_trial_power(data, sfreq=sfreq, frequencies=frequencies,
                                  n_cycles=4, n_jobs=1,
                                  baseline=(-100, 0), times=times,
                                  baseline_mode='ratio', decim=decim)

# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
evoked_data = evoked_data[:, time_mask]
times = times[time_mask]

# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]

epochs_power = epochs_power[:, 0, :, :]
epochs_power = np.log10(epochs_power)  # take log of ratio
# under the null hypothesis epochs_power should be now be 0
                                reject=reject)
data_condition_2 = epochs_condition_2.get_data()  # as 3D matrix
data_condition_2 *= 1e13  # change unit to fT / cm

# Take only one channel
data_condition_1 = data_condition_1[:, 97:98, :]
data_condition_2 = data_condition_2[:, 97:98, :]

# Time vector
times = 1e3 * epochs_condition_1.times  # change unit to ms

frequencies = np.arange(7, 30, 3)  # define frequencies of interest
Fs = raw.info['sfreq']  # sampling in Hz
n_cycles = 1.5
epochs_power_1 = single_trial_power(data_condition_1, Fs=Fs,
                                   frequencies=frequencies,
                                   n_cycles=n_cycles, use_fft=False)

epochs_power_2 = single_trial_power(data_condition_2, Fs=Fs,
                                   frequencies=frequencies,
                                   n_cycles=n_cycles, use_fft=False)

epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix

# do ratio with baseline power:
epochs_power_1 /= np.mean(epochs_power_1[:, :, times < 0], axis=2)[:, :, None]
epochs_power_2 /= np.mean(epochs_power_2[:, :, times < 0], axis=2)[:, :, None]

###############################################################################
# Compute statistic
Example #19
0
def test_compute_tfr():
    """Test _compute_tfr function"""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_fname)

    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                       stim=False, include=[], exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    sfreq = epochs.info['sfreq']
    freqs = np.arange(10, 20, 3).astype(float)

    # Check all combination of options
    for method, use_fft, zero_mean, output in product(
        ('multitaper', 'morlet'), (False, True), (False, True),
        ('complex', 'power', 'phase',
         'avg_power_itc', 'avg_power', 'itc')):
        # Check exception
        if (method == 'multitaper') and (output == 'phase'):
            assert_raises(NotImplementedError, _compute_tfr, data, freqs,
                          sfreq, method=method, output=output)
            continue

        # Check runs
        out = _compute_tfr(data, freqs, sfreq, method=method,
                           use_fft=use_fft, zero_mean=zero_mean,
                           n_cycles=2., output=output)
        # Check shapes
        shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
        if ('avg' in output) or ('itc' in output):
            assert_array_equal(shape[1:], out.shape)
        else:
            assert_array_equal(shape, out.shape)

        # Check types
        if output in ('complex', 'avg_power_itc'):
            assert_equal(np.complex, out.dtype)
        else:
            assert_equal(np.float, out.dtype)
        assert_true(np.all(np.isfinite(out)))

    # Check that functions are equivalent to
    # i) single_trial_power: X, shape (n_signals, n_chans, n_times)
    old_power = single_trial_power(data, sfreq, freqs, n_cycles=2.)
    new_power = _compute_tfr(data, freqs, sfreq, n_cycles=2.,
                             method='morlet', output='power')
    assert_array_almost_equal(old_power, new_power)
    old_power = single_trial_power(data, sfreq, freqs, n_cycles=2.,
                                   times=epochs.times, baseline=(-.100, 0),
                                   baseline_mode='ratio')
    new_power = rescale(new_power, epochs.times, (-.100, 0), 'ratio')

    # ii) cwt_morlet: X, shape (n_signals, n_times)
    old_complex = cwt_morlet(data[0], sfreq, freqs, n_cycles=2.)
    new_complex = _compute_tfr(data[[0]], freqs, sfreq, n_cycles=2.,
                               method='morlet', output='complex')
    assert_array_almost_equal(old_complex, new_complex[0])

    # Check errors params
    for _data in (None, 'foo', data[0]):
        assert_raises(ValueError, _compute_tfr, _data, freqs, sfreq)
    for _freqs in (None, 'foo', [[0]]):
        assert_raises(ValueError, _compute_tfr, data, _freqs, sfreq)
    for _sfreq in (None, 'foo'):
        assert_raises(ValueError, _compute_tfr, data, freqs, _sfreq)
    for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
        for value in (None, 'foo'):
            kwargs = {key: value}  # FIXME pep8
            assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
                          **kwargs)

    # No time_bandwidth param in morlet
    assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
                  method='morlet', time_bandwidth=1)
    # No phase in multitaper XXX Check ?
    assert_raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
                  method='multitaper', output='phase')

    # Inter-trial coherence tests
    out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
    assert_true(np.sum(out >= 1) == 0)
    assert_true(np.sum(out <= 0) == 0)

    # Check decim shapes
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
        _decim = slice(None, None, decim) if isinstance(decim, int) else decim
        n_time = len(np.arange(data.shape[2])[_decim])
        shape = np.r_[data.shape[:2], len(freqs), n_time]
        for method in ('multitaper', 'morlet'):
            # Single trials
            out = _compute_tfr(data, freqs, sfreq, method=method,
                               decim=decim, n_cycles=2.)
            assert_array_equal(shape, out.shape)
            # Averages
            out = _compute_tfr(data, freqs, sfreq, method=method,
                               decim=decim, output='avg_power',
                               n_cycles=2.)
            assert_array_equal(shape[1:], out.shape)
evoked_data = np.mean(data, 0)

# data -= evoked_data[None,:,:] # remove evoked component
# evoked_data = np.mean(data, 0)

# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power.  Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
frequencies = np.arange(8, 40, 2)  # define frequencies of interest
sfreq = raw.info['sfreq']  # sampling in Hz
epochs_power = single_trial_power(data, sfreq=sfreq, frequencies=frequencies,
                                  n_cycles=4, n_jobs=1,
                                  baseline=(-100, 0), times=times,
                                  baseline_mode='ratio', decim=decim)

# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
evoked_data = evoked_data[:, time_mask]
times = times[time_mask]

# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]

epochs_power = epochs_power[:, 0, :, :]
epochs_power = np.log10(epochs_power)  # take log of ratio
# under the null hypothesis epochs_power should be now be 0
# Time vector
times = 1e3 * epochs.times  # change unit to ms

# Take only one channel
ch_name = raw.info['ch_names'][97]
data = data[:, 97:98, :]

evoked_data = np.mean(data, 0)

# data -= evoked_data[None,:,:] # remove evoked component
# evoked_data = np.mean(data, 0)

frequencies = np.arange(8, 40, 2)  # define frequencies of interest
Fs = raw.info['sfreq']  # sampling in Hz
epochs_power = single_trial_power(data, Fs=Fs, frequencies=frequencies,
                                  n_cycles=4, use_fft=False, n_jobs=1,
                                  baseline=(-100, 0), times=times,
                                  baseline_mode='ratio')

# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
epochs_power = epochs_power[:, :, :, time_mask]
evoked_data = evoked_data[:, time_mask]
times = times[time_mask]

epochs_power = epochs_power[:, 0, :, :]
epochs_power = np.log10(epochs_power)  # take log of ratio
# under the null hypothesis epochs_power should be now be 0

###############################################################################
# Compute statistic
threshold = 2.5
# Time vector
times = 1e3 * epochs_condition_1.times  # change unit to ms

# Factor to downsample the temporal dimension of the PSD computed by
# single_trial_power.  Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
frequencies = np.arange(7, 30, 3)  # define frequencies of interest
sfreq = raw.info['sfreq']  # sampling in Hz
n_cycles = 1.5

epochs_power_1 = single_trial_power(data_condition_1, sfreq=sfreq,
                                    frequencies=frequencies,
                                    n_cycles=n_cycles, decim=decim)

epochs_power_2 = single_trial_power(data_condition_2, sfreq=sfreq,
                                    frequencies=frequencies,
                                    n_cycles=n_cycles, decim=decim)

epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix

# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
baseline_mask = times[::decim] < 0
epochs_baseline_1 = np.mean(epochs_power_1[:, :, baseline_mask], axis=2)
epochs_power_1 /= epochs_baseline_1[..., np.newaxis]
epochs_baseline_2 = np.mean(epochs_power_2[:, :, baseline_mask], axis=2)
Example #23
0
def test_time_frequency():
    """Test time frequency transform (PSD and phase lock)
    """
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.5

    # Setup for reading the raw data
    raw = io.Raw(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                       stim=False, include=include, exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax,
                            baseline=(None, 0))

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    # Now compute evoked
    evoked = epochs.average()
    power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
                              return_itc=False)
    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    # Test picks argument
    power_picks, itc_picks = tfr_morlet(epochs_nopicks, freqs=freqs,
                                        n_cycles=n_cycles, use_fft=True,
                                        return_itc=True, picks=picks)
    # the actual data arrays here are equivalent, too...
    assert_array_almost_equal(power.data, power_picks.data)
    assert_array_almost_equal(itc.data, itc_picks.data)
    assert_array_almost_equal(power.data, power_evoked.data)

    print(itc)  # test repr
    print(itc.ch_names)  # test property
    itc += power  # test add
    itc -= power  # test add

    power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert_true('meg' in power)
    assert_true('grad' in power)
    assert_false('mag' in power)
    assert_false('eeg' in power)

    assert_equal(power.nave, nave)
    assert_equal(itc.nave, nave)
    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    # grand average
    itc2 = itc.copy()
    itc2.info['bads'] = [itc2.ch_names[0]]  # test channel drop
    gave = grand_average([itc2, itc])
    assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
                                   itc2.data.shape[1],
                                   itc2.data.shape[2]))
    assert_equal(itc2.ch_names[1:], gave.ch_names)
    assert_equal(gave.nave, 2)
    itc2.drop_channels(itc2.info["bads"])
    assert_array_almost_equal(gave.data, itc2.data)
    itc2.data = np.ones(itc2.data.shape)
    itc.data = np.zeros(itc.data.shape)
    itc2.nave = 2
    itc.nave = 1
    itc.drop_channels([itc.ch_names[0]])
    combined_itc = combine_tfr([itc2, itc])
    assert_array_almost_equal(combined_itc.data,
                              np.ones(combined_itc.data.shape) * 2 / 3)

    # more tests
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
                            return_itc=True)

    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    Fs = raw.info['sfreq']  # sampling in Hz
    tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))

    single_power = single_trial_power(data, Fs, freqs, use_fft=False,
                                      n_cycles=2)

    assert_array_almost_equal(np.mean(single_power), power.data)

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)

    # Test decimation
    for decim in [2, 3]:
        for use_fft in [True, False]:
            power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
                                    use_fft=use_fft, return_itc=True,
                                    decim=decim)
            assert_equal(power.data.shape[2],
                         np.ceil(float(len(times)) / decim))

    # Test cwt modes
    Ws = morlet(512, [10, 20], n_cycles=2)
    assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
    for use_fft in [True, False]:
        for mode in ['same', 'valid', 'full']:
            # XXX JRK: full wavelet decomposition needs to be implemented
            if (not use_fft) and mode == 'full':
                assert_raises(ValueError, cwt, data[0, :, :], Ws,
                              use_fft=use_fft, mode=mode)
                continue
            cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode)