Exemplo n.º 1
0
    window[start:stop] = 1.
    n += 1
    data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
    data *= window
    return data

times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
src = read_source_spaces(src_fname)
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
                          data_fun=data_fun, random_state=0)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
fig.show()

##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw, stc, trans_fname, src, bem_fname, cov='simple',
                       iir_filter=[0.2, -0.2, 0.04], ecg=True, blink=True,
                       n_jobs=2, verbose=True)
raw_sim.plot()

##############################################################################
# Plot evoked data
events = find_events(raw_sim)  # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, -0.2, epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical')  # quick calc
evoked = epochs.average()
evoked.plot_white(cov)
Exemplo n.º 2
0
events = find_events(raw, shortest_event=0, stim_channel='STI 014')

picks = pick_types(raw.info,
                   meg=False,
                   eeg=True,
                   stim=False,
                   eog=False,
                   exclude='bads')

# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                proj=True,
                picks=picks,
                baseline=None,
                preload=True,
                add_eeg_ref=False)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2

###############################################################################
# Classification with linear discrimant analysis

from sklearn.lda import LDA  # noqa
from sklearn.cross_validation import ShuffleSplit  # noqa

# Assemble a classifier
svc = LDA()
Exemplo n.º 3
0
def test_render_report():
    """Test rendering -*.fif files for mne report.
    """
    tempdir = _TempDir()
    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
    for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new],
                 [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new],
                 [inv_fname, inv_fname_new]]:
        shutil.copyfile(a, b)

    # create and add -epo.fif and -ave.fif files
    epochs_fname = op.join(tempdir, 'temp-epo.fif')
    evoked_fname = op.join(tempdir, 'temp-ave.fif')
    raw = Raw(raw_fname_new)
    picks = pick_types(raw.info, meg='mag', eeg=False)  # faster with one type
    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
    epochs.save(epochs_fname)
    epochs.average().save(evoked_fname)

    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir)
    assert_true(len(w) >= 1)

    # Check correct paths and filenames
    fnames = glob.glob(op.join(tempdir, '*.fif'))
    for fname in fnames:
        assert_true(
            op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)

    assert_equal(len(report.fnames), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving functionality
    report.data_path = tempdir
    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    assert_equal(len(report.html), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving same report to new filename
    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report2.html')))

    # Check overwriting file
    report.save(fname=op.join(tempdir, 'report.html'),
                open_browser=False,
                overwrite=True)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    # Check pattern matching with multiple patterns
    pattern = ['*raw.fif', '*eve.fif']
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, pattern=pattern)
    assert_true(len(w) >= 1)

    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
        glob.glob(op.join(tempdir, '*.raw'))
    for fname in fnames:
        assert_true(
            op.basename(fname) in [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)
Exemplo n.º 4
0
def save_epochs(p, subjects, in_names, in_numbers, analyses, out_names,
                out_numbers, must_match, decim, run_indices):
    """Generate epochs from raw data based on events

    Can only complete after preprocessing is complete.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    in_names : list of str
        Names of input events.
    in_numbers : list of list of int
        Event numbers (in scored event files) associated with each name.
    analyses : list of str
        Lists of analyses of interest.
    out_names : list of list of str
        Event types to make out of old ones.
    out_numbers : list of list of int
        Event numbers to convert to (e.g., [[1, 1, 2, 3, 3], ...] would create
        three event types, where the first two and last two event types from
        the original list get collapsed over).
    must_match : list of int
        Indices from the original in_names that must match in event counts
        before collapsing. Should eventually be expanded to allow for
        ratio-based collapsing.
    decim : int | list of int
        Amount to decimate.
    run_indices : array-like | None
        Run indices to include.
    """
    in_names = np.asanyarray(in_names)
    old_dict = dict()
    for n, e in zip(in_names, in_numbers):
        old_dict[n] = e

    # let's do some sanity checks
    if len(in_names) != len(in_numbers):
        raise RuntimeError('in_names (%d) must have same length as '
                           'in_numbers (%d)' %
                           (len(in_names), len(in_numbers)))
    if np.any(np.array(in_numbers) <= 0):
        raise ValueError('in_numbers must all be > 0')
    if len(out_names) != len(out_numbers):
        raise RuntimeError('out_names must have same length as out_numbers')
    for name, num in zip(out_names, out_numbers):
        num = np.array(num)
        if len(name) != len(np.unique(num[num > 0])):
            raise RuntimeError('each entry in out_names must have length '
                               'equal to the number of unique elements in the '
                               'corresponding entry in out_numbers:\n%s\n%s' %
                               (name, np.unique(num[num > 0])))
        if len(num) != len(in_names):
            raise RuntimeError('each entry in out_numbers must have the same '
                               'length as in_names')
        if (np.array(num) == 0).any():
            raise ValueError('no element of out_numbers can be zero')

    ch_namess = list()
    drop_logs = list()
    sfreqs = set()
    for si, subj in enumerate(subjects):
        if p.disp_files:
            print('  Loading raw files for subject %s.' % subj)
        epochs_dir = op.join(p.work_dir, subj, p.epochs_dir)
        if not op.isdir(epochs_dir):
            os.mkdir(epochs_dir)
        evoked_dir = op.join(p.work_dir, subj, p.inverse_dir)
        if not op.isdir(evoked_dir):
            os.mkdir(evoked_dir)
        # read in raw files
        raw_names = get_raw_fnames(p, subj, 'pca', False, False,
                                   run_indices[si])
        first_samps = []
        last_samps = []
        for raw_fname in raw_names:
            raw = read_raw_fif(raw_fname, preload=False)
            first_samps.append(raw._first_samps[0])
            last_samps.append(raw._last_samps[-1])
        raw = [read_raw_fif(fname, preload=False) for fname in raw_names]
        _fix_raw_eog_cals(raw)  # EOG epoch scales might be bad!
        raw = concatenate_raws(raw)
        # read in events
        events = _read_events(p, subj, run_indices[si], raw)
        new_sfreq = raw.info['sfreq'] / decim[si]
        if p.disp_files:
            print('    Epoching data (decim=%s -> sfreq=%0.1f Hz).' %
                  (decim[si], new_sfreq))
        if new_sfreq not in sfreqs:
            if len(sfreqs) > 0:
                warnings.warn('resulting new sampling frequency %s not equal '
                              'to previous values %s' % (new_sfreq, sfreqs))
            sfreqs.add(new_sfreq)
        epochs_fnames, evoked_fnames = get_epochs_evokeds_fnames(
            p, subj, analyses)
        mat_file, fif_file = epochs_fnames
        if p.autoreject_thresholds:
            assert len(p.autoreject_types) > 0
            assert all(a in ('mag', 'grad', 'eeg', 'ecg', 'eog')
                       for a in p.autoreject_types)
            from autoreject import get_rejection_threshold
            print('    Computing autoreject thresholds', end='')
            rtmin = p.reject_tmin if p.reject_tmin is not None else p.tmin
            rtmax = p.reject_tmax if p.reject_tmax is not None else p.tmax
            temp_epochs = Epochs(raw,
                                 events,
                                 event_id=None,
                                 tmin=rtmin,
                                 tmax=rtmax,
                                 baseline=_get_baseline(p),
                                 proj=True,
                                 reject=None,
                                 flat=None,
                                 preload=True,
                                 decim=decim[si],
                                 reject_by_annotation=p.reject_epochs_by_annot)
            kwargs = dict()
            if 'verbose' in get_args(get_rejection_threshold):
                kwargs['verbose'] = False
            new_dict = get_rejection_threshold(temp_epochs, **kwargs)
            use_reject = dict()
            msgs = list()
            for k in p.autoreject_types:
                msgs.append('%s=%d %s' % (k, DEFAULTS['scalings'][k] *
                                          new_dict[k], DEFAULTS['units'][k]))
                use_reject[k] = new_dict[k]
            print(': ' + ', '.join(msgs))
            hdf5_file = fif_file.replace('-epo.fif', '-reject.h5')
            assert hdf5_file.endswith('.h5')
            write_hdf5(hdf5_file, use_reject, overwrite=True)
        else:
            use_reject = _handle_dict(p.reject, subj)
        # create epochs
        flat = _handle_dict(p.flat, subj)
        use_reject, use_flat = _restrict_reject_flat(use_reject, flat, raw)
        epochs = Epochs(raw,
                        events,
                        event_id=old_dict,
                        tmin=p.tmin,
                        tmax=p.tmax,
                        baseline=_get_baseline(p),
                        reject=use_reject,
                        flat=use_flat,
                        proj=p.epochs_proj,
                        preload=True,
                        decim=decim[si],
                        on_missing=p.on_missing,
                        reject_tmin=p.reject_tmin,
                        reject_tmax=p.reject_tmax,
                        reject_by_annotation=p.reject_epochs_by_annot)
        del raw
        if epochs.events.shape[0] < 1:
            epochs.plot_drop_log()
            raise ValueError('No valid epochs')
        drop_logs.append(epochs.drop_log)
        ch_namess.append(epochs.ch_names)
        # only kept trials that were not dropped
        sfreq = epochs.info['sfreq']
        # now deal with conditions to save evoked
        if p.disp_files:
            print('    Matching trial counts and saving data to disk.')
        for var, name in ((out_names, 'out_names'), (out_numbers,
                                                     'out_numbers'),
                          (must_match, 'must_match'), (evoked_fnames,
                                                       'evoked_fnames')):
            if len(var) != len(analyses):
                raise ValueError('len(%s) (%s) != len(analyses) (%s)' %
                                 (name, len(var), len(analyses)))
        for analysis, names, numbers, match, fn in zip(analyses, out_names,
                                                       out_numbers, must_match,
                                                       evoked_fnames):
            # do matching
            numbers = np.asanyarray(numbers)
            nn = numbers[numbers >= 0]
            new_numbers = []
            for num in numbers:
                if num > 0 and num not in new_numbers:
                    # Eventually we could relax this requirement, but not
                    # having it in place is likely to cause people pain...
                    if any(num < n for n in new_numbers):
                        raise RuntimeError('each list of new_numbers must be '
                                           ' monotonically increasing')
                    new_numbers.append(num)
            new_numbers = np.array(new_numbers)
            in_names_match = in_names[match]
            # use some variables to allow safe name re-use
            offset = max(epochs.events[:, 2].max(), new_numbers.max()) + 1
            safety_str = '__mnefun_copy__'
            assert len(new_numbers) == len(names)  # checked above
            if p.match_fun is None:
                # first, equalize trial counts (this will make a copy)
                e = epochs[list(in_names[numbers > 0])]
                if len(in_names_match) > 1:
                    e.equalize_event_counts(in_names_match)

                # second, collapse relevant types
                for num, name in zip(new_numbers, names):
                    collapse = [
                        x for x in in_names[num == numbers] if x in e.event_id
                    ]
                    combine_event_ids(e,
                                      collapse,
                                      {name + safety_str: num + offset},
                                      copy=False)
                for num, name in zip(new_numbers, names):
                    e.events[e.events[:, 2] == num + offset, 2] -= offset
                    e.event_id[name] = num
                    del e.event_id[name + safety_str]
            else:  # custom matching
                e = p.match_fun(epochs.copy(), analysis, nn, in_names_match,
                                names)

            # now make evoked for each out type
            evokeds = list()
            n_standard = 0
            kinds = ['standard']
            if p.every_other:
                kinds += ['even', 'odd']
            for kind in kinds:
                for name in names:
                    this_e = e[name]
                    if kind == 'even':
                        this_e = this_e[::2]
                    elif kind == 'odd':
                        this_e = this_e[1::2]
                    else:
                        assert kind == 'standard'
                    if len(this_e) > 0:
                        ave = this_e.average(picks='all')
                        stde = this_e.standard_error(picks='all')
                        if kind != 'standard':
                            ave.comment += ' %s' % (kind, )
                            stde.comment += ' %s' % (kind, )
                        evokeds.append(ave)
                        evokeds.append(stde)
                        if kind == 'standard':
                            n_standard += 2
            write_evokeds(fn, evokeds)
            naves = [
                str(n) for n in sorted(
                    set([evoked.nave for evoked in evokeds[:n_standard]]))
            ]
            naves = ', '.join(naves)
            if p.disp_files:
                print('      Analysis "%s": %s epochs / condition' %
                      (analysis, naves))

        if p.disp_files:
            print('    Saving epochs to disk.')
        if 'mat' in p.epochs_type:
            spio.savemat(mat_file,
                         dict(epochs=epochs.get_data(),
                              events=epochs.events,
                              sfreq=sfreq,
                              drop_log=epochs.drop_log),
                         do_compression=True,
                         oned_as='column')
        if 'fif' in p.epochs_type:
            epochs.save(fif_file, **_get_epo_kwargs())

    if p.plot_drop_logs:
        for subj, drop_log in zip(subjects, drop_logs):
            plot_drop_log(drop_log, threshold=p.drop_thresh, subject=subj)
Exemplo n.º 5
0
def test_csd_degenerate(evoked_csd_sphere):
    """Test degenerate conditions."""
    evoked, csd, sphere = evoked_csd_sphere
    warn_evoked = evoked.copy()
    warn_evoked.info['bads'].append(warn_evoked.ch_names[3])
    with pytest.raises(ValueError, match='Either drop.*or interpolate'):
        compute_current_source_density(warn_evoked)

    with pytest.raises(TypeError, match='must be an instance of'):
        compute_current_source_density(None)

    fail_evoked = evoked.copy()
    with pytest.raises(ValueError, match='Zero or infinite position'):
        for ch in fail_evoked.info['chs']:
            ch['loc'][:3] = np.array([0, 0, 0])
        compute_current_source_density(fail_evoked, sphere=sphere)

    with pytest.raises(ValueError, match='Zero or infinite position'):
        fail_evoked.info['chs'][3]['loc'][:3] = np.inf
        compute_current_source_density(fail_evoked, sphere=sphere)

    with pytest.raises(ValueError, match='No EEG channels found.'):
        fail_evoked = evoked.copy()
        fail_evoked.set_channel_types(
            {ch_name: 'ecog'
             for ch_name in fail_evoked.ch_names})
        compute_current_source_density(fail_evoked, sphere=sphere)

    with pytest.raises(TypeError, match='lambda2'):
        compute_current_source_density(evoked, lambda2='0', sphere=sphere)

    with pytest.raises(ValueError, match='lambda2 must be between 0 and 1'):
        compute_current_source_density(evoked, lambda2=2, sphere=sphere)

    with pytest.raises(TypeError, match='stiffness must be'):
        compute_current_source_density(evoked, stiffness='0', sphere=sphere)

    with pytest.raises(ValueError, match='stiffness must be non-negative'):
        compute_current_source_density(evoked, stiffness=-2, sphere=sphere)

    with pytest.raises(TypeError, match='n_legendre_terms must be'):
        compute_current_source_density(evoked,
                                       n_legendre_terms=0.1,
                                       sphere=sphere)

    with pytest.raises(ValueError,
                       match=('n_legendre_terms must be '
                              'greater than 0')):
        compute_current_source_density(evoked,
                                       n_legendre_terms=0,
                                       sphere=sphere)

    with pytest.raises(ValueError, match='sphere must be'):
        compute_current_source_density(evoked, sphere=-0.1)

    with pytest.raises(ValueError,
                       match=('sphere radius must be '
                              'greater than 0')):
        compute_current_source_density(evoked, sphere=(-0.1, 0., 0., -1.))

    with pytest.raises(TypeError):
        compute_current_source_density(evoked, copy=2, sphere=sphere)

    # gh-7859
    raw = RawArray(evoked.data, evoked.info)
    epochs = Epochs(raw, [[0, 0, 1]],
                    tmin=0,
                    tmax=evoked.times[-1] - evoked.times[0],
                    baseline=None,
                    preload=False,
                    proj=False)
    epochs.drop_bad()
    assert len(epochs) == 1
    assert_allclose(epochs.get_data()[0], evoked.data)
    with pytest.raises(RuntimeError, match='Computing CSD requires.*preload'):
        compute_current_source_density(epochs)
    epochs.load_data()
    raw = compute_current_source_density(raw)
    assert not np.allclose(raw.get_data(), evoked.data)
    evoked = compute_current_source_density(evoked)
    assert_allclose(raw.get_data(), evoked.data)
    epochs = compute_current_source_density(epochs)
    assert_allclose(epochs.get_data()[0], evoked.data)
Exemplo n.º 6
0
def test_ica_core(method):
    """Test ICA on raw and epochs."""
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()

    # XXX. The None cases helped revealing bugs but are time consuming.
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    noise_cov = [None, test_cov]
    # removed None cases to speed up...
    n_components = [2, 1.0]  # for future dbg add cases
    max_pca_components = [3]
    picks_ = [picks]
    methods = [method]
    iter_ica_params = product(noise_cov, n_components, max_pca_components,
                              picks_, methods)

    # # test init catchers
    pytest.raises(ValueError, ICA, n_components=3, max_pca_components=2)
    pytest.raises(ValueError, ICA, n_components=2.3, max_pca_components=2)

    # test essential core functionality
    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
        # Test ICA raw
        ica = ICA(noise_cov=n_cov, n_components=n_comp,
                  max_pca_components=max_n, n_pca_components=max_n,
                  random_state=0, method=method, max_iter=1)
        pytest.raises(ValueError, ica.__contains__, 'mag')

        print(ica)  # to test repr

        # test fit checker
        pytest.raises(RuntimeError, ica.get_sources, raw)
        pytest.raises(RuntimeError, ica.get_sources, epochs)

        # Test error upon empty epochs fitting
        with pytest.raises(RuntimeError, match='none were found'):
            ica.fit(epochs[0:0])

        # test decomposition
        with pytest.warns(UserWarning, match='did not converge'):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        repr(ica)  # to test repr
        assert ('mag' in ica)  # should now work without error

        # test re-fit
        unmixing1 = ica.unmixing_matrix_
        with pytest.warns(UserWarning, match='did not converge'):
            ica.fit(raw, picks=pcks, start=start, stop=stop)
        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)

        raw_sources = ica.get_sources(raw)
        # test for #3804
        assert_equal(raw_sources._filenames, [None])
        print(raw_sources)

        # test for gh-6271 (scaling of ICA traces)
        fig = raw_sources.plot()
        assert len(fig.axes[0].lines) in (4, 5, 6)
        for line in fig.axes[0].lines:
            y = line.get_ydata()
            if len(y) > 2:  # actual data, not markers
                assert np.ptp(y) < 15
        plt.close('all')

        sources = raw_sources[:, :][0]
        assert (sources.shape[0] == ica.n_components_)

        # test preload filter
        raw3 = raw.copy()
        raw3.preload = False
        pytest.raises(RuntimeError, ica.apply, raw3,
                      include=[1, 2])

        #######################################################################
        # test epochs decomposition
        ica = ICA(noise_cov=n_cov, n_components=n_comp,
                  max_pca_components=max_n, n_pca_components=max_n,
                  random_state=0, method=method)
        with pytest.warns(None):  # sometimes warns
            ica.fit(epochs, picks=picks)
        data = epochs.get_data()[:, 0, :]
        n_samples = np.prod(data.shape)
        assert_equal(ica.n_samples_, n_samples)
        print(ica)  # to test repr

        sources = ica.get_sources(epochs).get_data()
        assert (sources.shape[1] == ica.n_components_)

        pytest.raises(ValueError, ica.score_sources, epochs,
                      target=np.arange(1))

        # test preload filter
        epochs3 = epochs.copy()
        epochs3.preload = False
        pytest.raises(RuntimeError, ica.apply, epochs3,
                      include=[1, 2])

    # test for bug with whitener updating
    _pre_whitener = ica.pre_whitener_.copy()
    epochs._data[:, 0, 10:15] *= 1e12
    ica.apply(epochs.copy())
    assert_array_equal(_pre_whitener, ica.pre_whitener_)

    # test expl. var threshold leading to empty sel
    ica.n_components = 0.1
    pytest.raises(RuntimeError, ica.fit, epochs)

    offender = 1, 2, 3,
    pytest.raises(ValueError, ica.get_sources, offender)
    pytest.raises(TypeError, ica.fit, offender)
    pytest.raises(TypeError, ica.apply, offender)
Exemplo n.º 7
0
def test_render_report(renderer, tmpdir):
    """Test rendering *.fif files for mne report."""
    tempdir = str(tmpdir)
    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
    raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif')
    ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
    proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif')
    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
    nirs_fname_new = op.join(tempdir, 'temp_raw-nirs.snirf')
    for a, b in [[raw_fname, raw_fname_new],
                 [raw_fname, raw_fname_new_bids],
                 [ms_fname, ms_fname_new],
                 [event_fname, event_fname_new],
                 [cov_fname, cov_fname_new],
                 [proj_fname, proj_fname_new],
                 [fwd_fname, fwd_fname_new],
                 [inv_fname, inv_fname_new],
                 [nirs_fname, nirs_fname_new]]:
        shutil.copyfile(a, b)

    # create and add -epo.fif and -ave.fif files
    epochs_fname = op.join(tempdir, 'temp-epo.fif')
    evoked_fname = op.join(tempdir, 'temp-ave.fif')
    # Speed it up by picking channels
    raw = read_raw_fif(raw_fname_new, preload=True)
    raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002'])
    raw.del_proj()
    raw.set_eeg_reference(projection=True)
    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
    epochs.save(epochs_fname, overwrite=True)
    # This can take forever, so let's make it fast
    # Also, make sure crop range is wide enough to avoid rendering bug
    evoked = epochs.average()
    with pytest.warns(RuntimeWarning, match='tmax is not in Evoked'):
        evoked.crop(0.1, 0.2)
    evoked.save(evoked_fname)

    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
                    projs=True)
    with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
        report.parse_folder(data_path=tempdir, on_error='raise')
    assert repr(report)

    # Check correct paths and filenames
    fnames = glob.glob(op.join(tempdir, '*.fif'))
    fnames.extend(glob.glob(op.join(tempdir, '*.snirf')))
    for fname in fnames:
        assert (op.basename(fname) in
                [op.basename(x) for x in report.fnames])
        assert (''.join(report.html).find(op.basename(fname)) != -1)

    assert len(report.fnames) == len(fnames)
    assert len(report.html) == len(report.fnames)
    assert len(report.fnames) == len(report)

    # Check saving functionality
    report.data_path = tempdir
    fname = op.join(tempdir, 'report.html')
    report.save(fname=fname, open_browser=False)
    assert (op.isfile(fname))
    with open(fname, 'rb') as fid:
        html = fid.read().decode('utf-8')
    assert '(MaxShield on)' in html
    # Projectors in Raw.info
    assert '<h4>SSP Projectors</h4>' in html
    # Projectors in `proj_fname_new`
    assert f'SSP Projectors: {op.basename(proj_fname_new)}' in html
    # Evoked in `evoked_fname`
    assert f'Evoked: {op.basename(evoked_fname)} ({evoked.comment})' in html
    assert 'Topomap (ch_type =' in html
    assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html

    assert len(report.html) == len(fnames)
    assert len(report.html) == len(report.fnames)

    # Check saving same report to new filename
    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
    assert (op.isfile(op.join(tempdir, 'report2.html')))

    # Check overwriting file
    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
                overwrite=True)
    assert (op.isfile(op.join(tempdir, 'report.html')))

    # Check pattern matching with multiple patterns
    pattern = ['*raw.fif', '*eve.fif']
    with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
        report.parse_folder(data_path=tempdir, pattern=pattern)
    assert (repr(report))

    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
        glob.glob(op.join(tempdir, '*.raw'))
    for fname in fnames:
        assert (op.basename(fname) in
                [op.basename(x) for x in report.fnames])
        assert (''.join(report.html).find(op.basename(fname)) != -1)

    pytest.raises(ValueError, Report, image_format='foo')
    pytest.raises(ValueError, Report, image_format=None)

    # SVG rendering
    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
                    image_format='svg')
    tempdir = pathlib.Path(tempdir)  # test using pathlib.Path
    with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
        report.parse_folder(data_path=tempdir, on_error='raise')

    # ndarray support smoke test
    report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')

    with pytest.raises(TypeError, match='figure must be a'):
        report.add_figs_to_section('foo', 'caption', 'section')
    with pytest.raises(TypeError, match='figure must be a'):
        report.add_figs_to_section(['foo'], 'caption', 'section')
Exemplo n.º 8
0
def PreProcess(raw,
               event_id,
               plot_psd=False,
               filter_data=True,
               filter_range=(1, 30),
               plot_events=False,
               epoch_time=(-.2, 1),
               baseline=(-.2, 0),
               rej_thresh_uV=200,
               rereference=False,
               emcp_raw=False,
               emcp_epochs=False,
               epoch_decim=1,
               plot_electrodes=False,
               plot_erp=False):

    sfreq = raw.info['sfreq']
    #create new output freq for after epoch or wavelet decim
    nsfreq = sfreq / epoch_decim
    tmin = epoch_time[0]
    tmax = epoch_time[1]
    if filter_range[1] > nsfreq:
        filter_range[
            1] = nsfreq / 2.5  #lower than 2 to avoid aliasing from decim??

    #pull event names in order of trigger number
    event_names = ['A_error', 'B_error']
    i = 0
    for key, value in sorted(event_id.items(), key=lambda x: (x[1], x[0])):
        event_names[i] = key
        i += 1

    #Filtering
    if rereference:
        print('Rerefering to average mastoid')
        raw = mastoidReref(raw)

    if filter_data:
        print('Filtering Data Between ' + str(filter_range[0]) + ' and ' +
              str(filter_range[1]) + ' Hz.')
        raw.filter(filter_range[0],
                   filter_range[1],
                   method='iir',
                   verbose='WARNING')

    if plot_psd:
        raw.plot_psd(fmin=filter_range[0], fmax=nsfreq / 2)

    #Eye Correction
    if emcp_raw:
        print('Raw Eye Movement Correction')
        raw = GrattonEmcpRaw(raw)

    #Epoching
    events = find_events(raw, shortest_event=1)
    color = {1: 'red', 2: 'black'}
    #artifact rejection
    rej_thresh = rej_thresh_uV * 1e-6

    #plot event timing
    if plot_events:
        viz.plot_events(events,
                        sfreq,
                        raw.first_samp,
                        color=color,
                        event_id=event_id)

    #Construct events - Main function from MNE
    epochs = Epochs(raw,
                    events=events,
                    event_id=event_id,
                    tmin=tmin,
                    tmax=tmax,
                    baseline=baseline,
                    preload=True,
                    reject={'eeg': rej_thresh},
                    verbose=False,
                    decim=epoch_decim)
    print('Remaining Trials: ' + str(len(epochs)))

    #Gratton eye movement correction procedure on epochs
    if emcp_epochs:
        print('Epochs Eye Movement Correct')
        epochs = GrattonEmcpEpochs(epochs)

    ## plot ERP at each electrode
    evoked_dict = {
        event_names[0]: epochs[event_names[0]].average(),
        event_names[1]: epochs[event_names[1]].average()
    }

    # butterfly plot
    if plot_electrodes:
        picks = pick_types(evoked_dict[event_names[0]].info,
                           meg=False,
                           eeg=True,
                           eog=False)
        fig_zero = evoked_dict[event_names[0]].plot(spatial_colors=True,
                                                    picks=picks)
        fig_zero = evoked_dict[event_names[1]].plot(spatial_colors=True,
                                                    picks=picks)

    # plot ERP in each condition on same plot
    if plot_erp:
        #find the electrode most miximal on the head (highest in z)
        picks = np.argmax([
            evoked_dict[event_names[0]].info['chs'][i]['loc'][2]
            for i in range(len(evoked_dict[event_names[0]].info['chs']))
        ])
        colors = {event_names[0]: "Red", event_names[1]: "Blue"}
        viz.plot_compare_evokeds(evoked_dict,
                                 colors=colors,
                                 picks=picks,
                                 split_legend=True)

    return epochs
Exemplo n.º 9
0
def test_add_reference():
    """Test adding a reference."""
    raw = read_raw_fif(fif_fname, preload=True)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # check if channel already exists
    assert_raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])
    # add reference channel to Raw
    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # for Neuromag fif's, the reference electrode location is placed in
    # elements [3:6] of each "data" electrode location
    assert_allclose(raw.info['chs'][-1]['loc'][:3],
                    raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)

    ref_idx = raw.ch_names.index('Ref')
    ref_data, _ = raw[ref_idx]
    assert_array_equal(ref_data, 0)

    # add reference channel to Raw when no digitization points exist
    raw = read_raw_fif(fif_fname).crop(0, 1).load_data()
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    del raw.info['dig']

    raw_ref = add_reference_channels(raw, 'Ref', copy=True)

    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # Test adding an existing channel as reference channel
    assert_raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])

    # add two reference channels to Raw
    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
    _check_channel_names(raw_ref, ['M1', 'M2'])
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    assert_array_equal(raw_ref._data[-2:, :], 0)

    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
    _check_channel_names(raw, ['M1', 'M2'])
    ref_idx = raw.ch_names.index('M1')
    ref_idy = raw.ch_names.index('M2')
    ref_data, _ = raw[[ref_idx, ref_idy]]
    assert_array_equal(ref_data, 0)

    # add reference channel to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    # default: proj=True, after which adding a Ref channel is prohibited
    assert_raises(RuntimeError, add_reference_channels, epochs, 'Ref')

    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
    # CAR after custom reference is an Error
    assert_raises(RuntimeError, epochs_ref.set_eeg_reference)

    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
    _check_channel_names(epochs_ref, 'Ref')
    ref_idx = epochs_ref.ch_names.index('Ref')
    ref_data = epochs_ref.get_data()[:, ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add two reference channels to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    with warnings.catch_warnings(record=True):  # multiple set zero
        epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
    _check_channel_names(epochs_ref, ['M1', 'M2'])
    ref_idx = epochs_ref.ch_names.index('M1')
    ref_idy = epochs_ref.ch_names.index('M2')
    assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
    assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add reference channel to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
    _check_channel_names(evoked_ref, 'Ref')
    ref_idx = evoked_ref.ch_names.index('Ref')
    ref_data = evoked_ref.data[ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # add two reference channels to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    with warnings.catch_warnings(record=True):  # multiple set zero
        evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
    _check_channel_names(evoked_ref, ['M1', 'M2'])
    ref_idx = evoked_ref.ch_names.index('M1')
    ref_idy = evoked_ref.ch_names.index('M2')
    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # Test invalid inputs
    raw_np = read_raw_fif(fif_fname, preload=False)
    assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
    assert_raises(ValueError, add_reference_channels, raw, 1)
Exemplo n.º 10
0
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')

event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2

raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=False, eeg=True, exclude=[])

reject = dict(eeg=80e-6)
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                picks=picks,
                preload=True,
                reject=reject)

# make sure it works if MEG channels are present:
picks2 = np.concatenate([[0, 1, 2], picks])
n_meg = 3
epochs2 = Epochs(raw,
                 events[epochs.selection],
                 event_id,
                 tmin,
                 tmax,
                 picks=picks2,
                 preload=True)
Exemplo n.º 11
0
def test_cov_estimation_with_triggers():
    """Test estimation from raw with triggers."""
    tempdir = _TempDir()
    raw = read_raw_fif(raw_fname)
    raw.set_eeg_reference(projection=True).load_data()
    events = find_events(raw, stim_channel='STI 014')
    event_ids = [1, 2, 3, 4]
    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)

    # cov with merged events and keep_sample_mean=True
    events_merged = merge_events(events, event_ids, 1234)
    epochs = Epochs(raw,
                    events_merged,
                    1234,
                    tmin=-0.2,
                    tmax=0,
                    baseline=(-0.2, -0.1),
                    proj=True,
                    reject=reject,
                    preload=True)

    cov = compute_covariance(epochs, keep_sample_mean=True)
    _assert_cov(cov, read_cov(cov_km_fname))

    # Test with tmin and tmax (different but not too much)
    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
    assert_true(np.all(cov.data != cov_tmin_tmax.data))
    err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
           linalg.norm(cov_tmin_tmax.data, ord='fro'))
    assert_true(err < 0.05, msg=err)

    # cov using a list of epochs and keep_sample_mean=True
    epochs = [
        Epochs(raw,
               events,
               ev_id,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=True,
               reject=reject) for ev_id in event_ids
    ]
    cov2 = compute_covariance(epochs, keep_sample_mean=True)
    assert_array_almost_equal(cov.data, cov2.data)
    assert_true(cov.ch_names == cov2.ch_names)

    # cov with keep_sample_mean=False using a list of epochs
    cov = compute_covariance(epochs, keep_sample_mean=False)
    _assert_cov(cov, read_cov(cov_fname), nfree=False)

    method_params = {'empirical': {'assume_centered': False}}
    assert_raises(ValueError,
                  compute_covariance,
                  epochs,
                  keep_sample_mean=False,
                  method_params=method_params)
    assert_raises(ValueError,
                  compute_covariance,
                  epochs,
                  keep_sample_mean=False,
                  method='factor_analysis')

    # test IO when computation done in Python
    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
    _assert_cov(cov, cov_read, 1e-5)

    # cov with list of epochs with different projectors
    epochs = [
        Epochs(raw,
               events[:1],
               None,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=True),
        Epochs(raw,
               events[:1],
               None,
               tmin=-0.2,
               tmax=0,
               baseline=(-0.2, -0.1),
               proj=False)
    ]
    # these should fail
    assert_raises(ValueError, compute_covariance, epochs)
    assert_raises(ValueError, compute_covariance, epochs, projs=None)
    # these should work, but won't be equal to above
    with warnings.catch_warnings(record=True) as w:  # too few samples warning
        warnings.simplefilter('always')
        cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
        cov = compute_covariance(epochs, projs=[])
    assert_equal(len(w), 2)

    # test new dict support
    epochs = Epochs(raw,
                    events,
                    dict(a=1, b=2, c=3, d=4),
                    tmin=-0.01,
                    tmax=0,
                    proj=True,
                    reject=reject,
                    preload=True)
    with warnings.catch_warnings(record=True):  # samples
        compute_covariance(epochs)

        # projs checking
        compute_covariance(epochs, projs=[])
    assert_raises(TypeError, compute_covariance, epochs, projs='foo')
    assert_raises(TypeError, compute_covariance, epochs, projs=['foo'])
Exemplo n.º 12
0
def test_plot_ica_properties():
    """Test plotting of ICA properties."""
    res = 8
    raw = _get_raw(preload=True)
    raw.add_proj([], remove_existing=True)
    events = _get_events()
    picks = _get_picks(raw)[:6]
    pick_names = [raw.ch_names[k] for k in picks]
    raw.pick_channels(pick_names)
    reject = dict(grad=4000e-13, mag=4e-12)

    epochs = Epochs(raw,
                    events[:10],
                    event_id,
                    tmin,
                    tmax,
                    baseline=(None, 0),
                    preload=True)

    ica = ICA(noise_cov=read_cov(cov_fname),
              n_components=2,
              max_iter=1,
              max_pca_components=2,
              n_pca_components=2,
              random_state=0)
    with pytest.warns(RuntimeWarning, match='projection'):
        ica.fit(raw)

    # test _create_properties_layout
    fig, ax = _create_properties_layout()
    assert_equal(len(ax), 5)

    topoargs = dict(topomap_args={'res': res, 'contours': 0, "sensors": False})
    ica.plot_properties(raw, picks=0, **topoargs)
    ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs)
    ica.plot_properties(epochs,
                        picks=1,
                        image_args={'sigma': 1.5},
                        topomap_args={
                            'res': 10,
                            'colorbar': True
                        },
                        psd_args={'fmax': 65.},
                        plot_std=False,
                        figsize=[4.5, 4.5],
                        reject=reject)
    plt.close('all')

    pytest.raises(TypeError, ica.plot_properties, epochs, dB=list('abc'))
    pytest.raises(TypeError, ica.plot_properties, ica)
    pytest.raises(TypeError, ica.plot_properties, [0.2])
    pytest.raises(TypeError, plot_ica_properties, epochs, epochs)
    pytest.raises(TypeError, ica.plot_properties, epochs, psd_args='not dict')
    pytest.raises(ValueError, ica.plot_properties, epochs, plot_std=[])

    fig, ax = plt.subplots(2, 3)
    ax = ax.ravel()[:-1]
    ica.plot_properties(epochs, picks=1, axes=ax, **topoargs)
    fig = ica.plot_properties(raw, picks=[0, 1], **topoargs)
    assert_equal(len(fig), 2)
    pytest.raises(TypeError,
                  plot_ica_properties,
                  epochs,
                  ica,
                  picks=[0, 1],
                  axes=ax)
    pytest.raises(ValueError, ica.plot_properties, epochs, axes='not axes')
    plt.close('all')

    # Test merging grads.
    pick_names = raw.ch_names[:15:2] + raw.ch_names[1:15:2]
    raw = _get_raw(preload=True).pick_channels(pick_names)
    raw.info.normalize_proj()
    ica = ICA(random_state=0, max_iter=1)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw)
    ica.plot_properties(raw)
    plt.close('all')

    # Test handling of zeros
    raw._data[:] = 0
    with pytest.warns(None):  # Usually UserWarning: Infinite value .* for epo
        ica.plot_properties(raw)
    ica = ICA(random_state=0, max_iter=1)
    epochs.pick_channels(pick_names)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(epochs)
    epochs._data[0] = 0
    with pytest.warns(None):  # Usually UserWarning: Infinite value .* for epo
        ica.plot_properties(epochs)
    plt.close('all')
evoked_fnames = load_data(condition=condition,
                          data_format='evoked',
                          data_type='simulation',
                          verbose=True)

raw = Raw(raw_fnames[0])
events = find_events(raw, stim_channel="STI 014", shortest_event=1)

# Visualize raw file
raw.plot()

# Make an evoked file from the experimental data
picks = pick_types(raw.info, meg=True, eog=True, exclude='bads')

# Read epochs
event_id, tmin, tmax = 9, -0.2, 0.5
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                baseline=(None, 0),
                picks=picks,
                reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()  # average epochs and get an Evoked dataset.
evoked.plot()

# Compare to the simulated data
evoked_sim = read_evokeds(evoked_fnames[0], condition=0)
evoked_sim.plot()
Exemplo n.º 14
0
def test_apply_reference():
    """Test base function for rereferencing."""
    raw = read_raw_fif(fif_fname, preload=True)

    # Rereference raw data by creating a copy of original data
    reref, ref_data = _apply_reference(raw.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert (reref.info['custom_ref_applied'])
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # The CAR reference projection should have been removed by the function
    assert (not _has_eeg_average_ref_proj(reref.info['projs']))

    # Test that data is modified in place when copy=False
    reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'])
    assert (raw is reref)

    # Test that disabling the reference does not change anything
    reref, ref_data = _apply_reference(raw.copy(), [])
    assert_array_equal(raw._data, reref._data)

    # Test re-referencing Epochs object
    raw = read_raw_fif(fif_fname, preload=False)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    reref, ref_data = _apply_reference(epochs.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert (reref.info['custom_ref_applied'])
    _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test re-referencing Evoked object
    evoked = epochs.average()
    reref, ref_data = _apply_reference(evoked.copy(),
                                       ref_from=['EEG 001', 'EEG 002'])
    assert (reref.info['custom_ref_applied'])
    _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Referencing needs data to be preloaded
    raw_np = read_raw_fif(fif_fname, preload=False)
    pytest.raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])

    # Test having inactive SSP projections that deal with channels involved
    # during re-referencing
    raw = read_raw_fif(fif_fname, preload=True)
    raw.add_proj(
        Projection(
            active=False,
            data=dict(col_names=['EEG 001', 'EEG 002'],
                      row_names=None,
                      data=np.array([[1, 1]]),
                      ncol=2,
                      nrow=1),
            desc='test',
            kind=1,
        ))
    # Projection concerns channels mentioned in projector
    pytest.raises(RuntimeError, _apply_reference, raw, ['EEG 001'])

    # Projection does not concern channels mentioned in projector, no error
    _apply_reference(raw, ['EEG 003'], ['EEG 004'])
Exemplo n.º 15
0
def test_ica_eeg():
    """Test ICA on EEG."""
    method = 'fastica'
    raw_fif = read_raw_fif(fif_fname, preload=True)
    with warnings.catch_warnings(record=True):  # events etc.
        raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname,
                                     montage=eeglab_montage,
                                     preload=True)
    for raw in [raw_fif, raw_eeglab]:
        events = make_fixed_length_events(raw,
                                          99999,
                                          start=0,
                                          stop=0.3,
                                          duration=0.1)
        picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2]
        picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2]
        picks_all = []
        picks_all.extend(picks_meg)
        picks_all.extend(picks_eeg)
        epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True)
        evoked = epochs.average()

        for picks in [picks_meg, picks_eeg, picks_all]:
            if len(picks) == 0:
                continue
            # test fit
            for inst in [raw, epochs]:
                ica = ICA(n_components=2,
                          random_state=0,
                          max_iter=2,
                          method=method)
                with warnings.catch_warnings(record=True):  # convergence
                    ica.fit(inst, picks=picks)

            # test apply and get_sources
            for inst in [raw, epochs, evoked]:
                ica.apply(inst)
                ica.get_sources(inst)

    with warnings.catch_warnings(record=True):  # misc channel
        raw = read_raw_ctf(ctf_fname2, preload=True)
    events = make_fixed_length_events(raw,
                                      99999,
                                      start=0,
                                      stop=0.2,
                                      duration=0.1)
    picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2]
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2]
    picks_all = picks_meg + picks_eeg
    for comp in [0, 1]:
        raw.apply_gradient_compensation(comp)
        epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True)
        evoked = epochs.average()

        for picks in [picks_meg, picks_eeg, picks_all]:
            if len(picks) == 0:
                continue
            # test fit
            for inst in [raw, epochs]:
                ica = ICA(n_components=2,
                          random_state=0,
                          max_iter=2,
                          method=method)
                with warnings.catch_warnings(record=True):  # convergence
                    ica.fit(inst)

            # test apply and get_sources
            for inst in [raw, epochs, evoked]:
                ica.apply(inst)
                ica.get_sources(inst)
Exemplo n.º 16
0
def test_psd():
    """Tests the welch and multitaper PSD
    """
    raw = io.read_raw_fif(raw_fname)
    picks_psd = [0, 1]

    # Populate raw with sinusoids
    rng = np.random.RandomState(40)
    data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
    freqs_sig = [8., 50.]
    for ix, freq in zip(picks_psd, freqs_sig):
        data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
    first_samp = raw._first_samps[0]
    raw = RawArray(data, raw.info)

    tmin, tmax = 0, 20  # use a few seconds of data
    fmin, fmax = 2, 70  # look at frequencies between 2 and 70Hz
    n_fft = 128

    # -- Raw --
    kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
                   picks=picks_psd)  # Common to all
    kws_welch = dict(n_fft=n_fft)
    kws_mt = dict(low_bias=True)
    funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt),
             (compute_raw_psd, kws_welch)]

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        for func, kws in funcs:
            kws = kws.copy()
            kws.update(kws_psd)
            psds, freqs = func(raw, proj=False, **kws)
            psds_proj, freqs_proj = func(raw, proj=True, **kws)

            assert_true(psds.shape == (len(kws['picks']), len(freqs)))
            assert_true(np.sum(freqs < 0) == 0)
            assert_true(np.sum(psds < 0) == 0)

            # Is power found where it should be
            ixs_max = np.argmax(psds, axis=1)
            for ixmax, ifreq in zip(ixs_max, freqs_sig):
                # Find nearest frequency to the "true" freq
                ixtrue = np.argmin(np.abs(ifreq - freqs))
                assert_true(np.abs(ixmax - ixtrue) < 2)

            # Make sure the projection doesn't change channels it shouldn't
            assert_array_almost_equal(psds, psds_proj)
            # Array input shouldn't work
            assert_raises(ValueError, func, raw[:3, :20][0])
        assert_true(len(w), 3)

    # -- Epochs/Evoked --
    events = read_events(event_fname)
    events[:, 0] -= first_samp
    tmin, tmax, event_id = -0.5, 0.5, 1
    epochs = Epochs(raw,
                    events[:10],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks_psd,
                    proj=False,
                    preload=True,
                    baseline=None)
    evoked = epochs.average()

    tmin_full, tmax_full = -1, 1
    epochs_full = Epochs(raw,
                         events[:10],
                         event_id,
                         tmin_full,
                         tmax_full,
                         picks=picks_psd,
                         proj=False,
                         preload=True,
                         baseline=None)
    kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
                   picks=picks_psd)  # Common to all
    funcs = [(psd_welch, kws_welch), (psd_multitaper, kws_mt),
             (compute_epochs_psd, kws_welch)]

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        for func, kws in funcs:
            kws = kws.copy()
            kws.update(kws_psd)

            psds, freqs = func(epochs[:1], proj=False, **kws)
            psds_proj, freqs_proj = func(epochs[:1], proj=True, **kws)
            psds_f, freqs_f = func(epochs_full[:1], proj=False, **kws)

            # this one will fail if you add for example 0.1 to tmin
            assert_array_almost_equal(psds, psds_f, 27)
            # Make sure the projection doesn't change channels it shouldn't
            assert_array_almost_equal(psds, psds_proj, 27)

            # Is power found where it should be
            ixs_max = np.argmax(psds.mean(0), axis=1)
            for ixmax, ifreq in zip(ixs_max, freqs_sig):
                # Find nearest frequency to the "true" freq
                ixtrue = np.argmin(np.abs(ifreq - freqs))
                assert_true(np.abs(ixmax - ixtrue) < 2)
            assert_true(psds.shape == (1, len(kws['picks']), len(freqs)))
            assert_true(np.sum(freqs < 0) == 0)
            assert_true(np.sum(psds < 0) == 0)

            # Array input shouldn't work
            assert_raises(ValueError, func, epochs.get_data())

            if func is not compute_epochs_psd:
                # Testing evoked (doesn't work w/ compute_epochs_psd)
                psds_ev, freqs_ev = func(evoked, proj=False, **kws)
                psds_ev_proj, freqs_ev_proj = func(evoked, proj=True, **kws)

                # Is power found where it should be
                ixs_max = np.argmax(psds_ev, axis=1)
                for ixmax, ifreq in zip(ixs_max, freqs_sig):
                    # Find nearest frequency to the "true" freq
                    ixtrue = np.argmin(np.abs(ifreq - freqs_ev))
                    assert_true(np.abs(ixmax - ixtrue) < 2)

                # Make sure the projection doesn't change channels it shouldn't
                assert_array_almost_equal(psds_ev, psds_ev_proj, 27)
                assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))
        assert_true(len(w), 3)
Exemplo n.º 17
0
def test_ica_full_data_recovery(method):
    """Test recovery of full data when no source is rejected."""
    # Most basic recovery
    _skip_check_picard(method)
    raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')[:10]
    with warnings.catch_warnings(record=True):  # bad proj
        epochs = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks,
                        baseline=(None, 0),
                        preload=True)
    evoked = epochs.average()
    n_channels = 5
    data = raw._data[:n_channels].copy()
    data_epochs = epochs.get_data()
    data_evoked = evoked.data
    raw.annotations = Annotations([0.5], [0.5], ['BAD'])
    methods = [method]
    for method in methods:
        stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
        for n_components, n_pca_components, ok in stuff:
            ica = ICA(n_components=n_components,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components,
                      method=method,
                      max_iter=1)
            with warnings.catch_warnings(record=True):
                ica.fit(raw, picks=list(range(n_channels)))
            raw2 = ica.apply(raw.copy(), exclude=[])
            if ok:
                assert_allclose(data[:n_channels],
                                raw2._data[:n_channels],
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
                assert (np.max(diff) > 1e-14)

            ica = ICA(n_components=n_components,
                      method=method,
                      max_pca_components=n_pca_components,
                      n_pca_components=n_pca_components)
            with warnings.catch_warnings(record=True):
                ica.fit(epochs, picks=list(range(n_channels)))
            epochs2 = ica.apply(epochs.copy(), exclude=[])
            data2 = epochs2.get_data()[:, :n_channels]
            if ok:
                assert_allclose(data_epochs[:, :n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(data_epochs[:, :n_channels] - data2)
                assert (np.max(diff) > 1e-14)

            evoked2 = ica.apply(evoked.copy(), exclude=[])
            data2 = evoked2.data[:n_channels]
            if ok:
                assert_allclose(data_evoked[:n_channels],
                                data2,
                                rtol=1e-10,
                                atol=1e-15)
            else:
                diff = np.abs(evoked.data[:n_channels] - data2)
                assert (np.max(diff) > 1e-14)
    pytest.raises(ValueError, ICA, method='pizza-decomposision')
Exemplo n.º 18
0
def test_time_frequency():
    """Test the to-be-deprecated time-frequency transform (PSD and ITC)"""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info,
                       meg='grad',
                       eeg=False,
                       stim=False,
                       include=include,
                       exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    epochs_nopicks = Epochs(raw,
                            events,
                            event_id,
                            tmin,
                            tmax,
                            baseline=(None, 0))

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0],
                            freqs=freqs,
                            n_cycles=n_cycles,
                            use_fft=True,
                            return_itc=True)
    # Now compute evoked
    evoked = epochs.average()
    power_evoked = tfr_morlet(evoked,
                              freqs,
                              n_cycles,
                              use_fft=True,
                              return_itc=False)
    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
    power, itc = tfr_morlet(epochs,
                            freqs=freqs,
                            n_cycles=n_cycles,
                            use_fft=True,
                            return_itc=True)
    power_, itc_ = tfr_morlet(epochs,
                              freqs=freqs,
                              n_cycles=n_cycles,
                              use_fft=True,
                              return_itc=True,
                              decim=slice(0, 2))
    # Test picks argument and average parameter
    assert_raises(ValueError,
                  tfr_morlet,
                  epochs,
                  freqs=freqs,
                  n_cycles=n_cycles,
                  return_itc=True,
                  average=False)

    power_picks, itc_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=True, picks=picks, average=True)

    epochs_power_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=False, picks=picks, average=False)
    power_picks_avg = epochs_power_picks.average()
    # the actual data arrays here are equivalent, too...
    assert_array_almost_equal(power.data, power_picks.data)
    assert_array_almost_equal(power.data, power_picks_avg.data)
    assert_array_almost_equal(itc.data, itc_picks.data)
    assert_array_almost_equal(power.data, power_evoked.data)

    print(itc)  # test repr
    print(itc.ch_names)  # test property
    itc += power  # test add
    itc -= power  # test sub

    power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert_true('meg' in power)
    assert_true('grad' in power)
    assert_false('mag' in power)
    assert_false('eeg' in power)

    assert_equal(power.nave, nave)
    assert_equal(itc.nave, nave)
    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(power_.data.shape == (len(picks), len(freqs), 2))
    assert_true(power_.data.shape == itc_.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    # grand average
    itc2 = itc.copy()
    itc2.info['bads'] = [itc2.ch_names[0]]  # test channel drop
    gave = grand_average([itc2, itc])
    assert_equal(
        gave.data.shape,
        (itc2.data.shape[0] - 1, itc2.data.shape[1], itc2.data.shape[2]))
    assert_equal(itc2.ch_names[1:], gave.ch_names)
    assert_equal(gave.nave, 2)
    itc2.drop_channels(itc2.info["bads"])
    assert_array_almost_equal(gave.data, itc2.data)
    itc2.data = np.ones(itc2.data.shape)
    itc.data = np.zeros(itc.data.shape)
    itc2.nave = 2
    itc.nave = 1
    itc.drop_channels([itc.ch_names[0]])
    combined_itc = combine_tfr([itc2, itc])
    assert_array_almost_equal(combined_itc.data,
                              np.ones(combined_itc.data.shape) * 2 / 3)

    # more tests
    power, itc = tfr_morlet(epochs,
                            freqs=freqs,
                            n_cycles=2,
                            use_fft=False,
                            return_itc=True)

    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    Fs = raw.info['sfreq']  # sampling in Hz
    tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
    tfr2 = cwt_morlet(data[0],
                      Fs,
                      freqs,
                      use_fft=True,
                      n_cycles=2,
                      decim=slice(0, 2))
    assert_true(tfr2.shape == (len(picks), len(freqs), 2))

    single_power = single_trial_power(data,
                                      Fs,
                                      freqs,
                                      use_fft=False,
                                      n_cycles=2)
    single_power2 = single_trial_power(data,
                                       Fs,
                                       freqs,
                                       use_fft=False,
                                       n_cycles=2,
                                       decim=slice(0, 2))
    single_power3 = single_trial_power(data,
                                       Fs,
                                       freqs,
                                       use_fft=False,
                                       n_cycles=2,
                                       decim=slice(1, 3))
    single_power4 = single_trial_power(data,
                                       Fs,
                                       freqs,
                                       use_fft=False,
                                       n_cycles=2,
                                       decim=slice(2, 4))

    assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
    assert_array_almost_equal(np.mean(single_power2, axis=0),
                              power.data[:, :, :2])
    assert_array_almost_equal(np.mean(single_power3, axis=0), power.data[:, :,
                                                                         1:3])
    assert_array_almost_equal(np.mean(single_power4, axis=0), power.data[:, :,
                                                                         2:4])

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)

    # Test decimation:
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in [2, 3, 8, 9]:
        for use_fft in [True, False]:
            power, itc = tfr_morlet(epochs,
                                    freqs=freqs,
                                    n_cycles=2,
                                    use_fft=use_fft,
                                    return_itc=True,
                                    decim=decim)
            assert_equal(power.data.shape[2],
                         np.ceil(float(len(times)) / decim))
    freqs = range(50, 55)
    decim = 2
    _, n_chan, n_time = data.shape
    tfr = cwt_morlet(data[0, :, :],
                     sfreq=epochs.info['sfreq'],
                     freqs=freqs,
                     decim=decim)
    assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))

    # Test cwt modes
    Ws = morlet(512, [10, 20], n_cycles=2)
    assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
    for use_fft in [True, False]:
        for mode in ['same', 'valid', 'full']:
            # XXX JRK: full wavelet decomposition needs to be implemented
            if (not use_fft) and mode == 'full':
                assert_raises(ValueError,
                              cwt,
                              data[0, :, :],
                              Ws,
                              use_fft=use_fft,
                              mode=mode)
                continue
            cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode)

    # Test decim parameter checks
    assert_raises(TypeError,
                  single_trial_power,
                  data,
                  Fs,
                  freqs,
                  use_fft=False,
                  n_cycles=2,
                  decim=None)
    assert_raises(TypeError,
                  tfr_morlet,
                  epochs,
                  freqs=freqs,
                  n_cycles=n_cycles,
                  use_fft=True,
                  return_itc=True,
                  decim='decim')
Exemplo n.º 19
0
def test_ica_additional(method):
    """Test additional ICA functionality."""
    _skip_check_picard(method)

    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    raw.del_proj()  # avoid warnings
    raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[1::2]
    epochs = Epochs(raw, events, None, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True, proj=False)
    epochs.decimate(3, verbose='error')
    assert len(epochs) == 4

    # test if n_components=None works
    ica = ICA(n_components=None, max_pca_components=None,
              n_pca_components=None, random_state=0, method=method, max_iter=1)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(epochs)
    # for testing eog functionality
    picks2 = np.concatenate([picks, pick_types(raw.info, False, eog=True)])
    epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
                        baseline=(None, 0), preload=True)
    del picks2

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
              n_pca_components=4, method=method)
    assert (ica.info is None)
    with pytest.warns(RuntimeWarning, match='normalize_proj'):
        ica.fit(raw, picks[:5])
    assert (isinstance(ica.info, Info))
    assert (ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4, method=method,
              n_pca_components=4, random_state=0)
    pytest.raises(RuntimeError, ica.save, '')

    ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # check passing a ch_name to find_bads_ecg
    with pytest.warns(RuntimeWarning, match='longer'):
        _, scores_1 = ica.find_bads_ecg(raw)
        _, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1])
    assert scores_1[0] != scores_2[0]

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    with pytest.raises(RuntimeError, match='No component detected'):
        corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False,)
    corrmap([ica, ica2], (0, 0), threshold=0.5, plot=False, show=False)
    assert (ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert (0 in ica.labels_["blinks"])
    # test retrieval of component maps as arrays
    components = ica.get_components()
    template = components[:, 0]
    EvokedArray(components, ica.info, tmin=0.).plot_topomap([0], time_unit='s')

    corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    assert (ica2.labels_["blinks"] == ica3.labels_["blinks"])

    plt.close('all')

    # No match
    bad_ica = ica2.copy()
    bad_ica.mixing_matrix_[:] = 0.
    with pytest.warns(RuntimeWarning, match='divide'):
        with catch_logging() as log:
            corrmap([ica, bad_ica], (0, 0), threshold=0.5, plot=False,
                    show=False, verbose=True)
    log = log.getvalue()
    assert 'No maps selected' in log

    # make sure a single threshold in a list works
    corrmap([ica, ica3], template, threshold=[0.5], label='blinks', plot=True,
            ch_type="mag")

    ica_different_channels = ICA(n_components=2, random_state=0).fit(
        raw, picks=[2, 3, 4, 5])
    pytest.raises(ValueError, corrmap, [ica_different_channels, ica], (0, 0))

    # test warnings on bad filenames
    ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-ica.fif'):
        ica.save(ica_badname)
    with pytest.warns(RuntimeWarning, match='-ica.fif'):
        read_ica(ica_badname)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4, method=method, max_iter=1)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw, picks=picks[:5], decim=3)
    assert raw_._data.shape[1] == n_samples

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4,
              n_pca_components=4, method=method, max_iter=1)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw, picks=None, decim=3)
    assert (ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert (np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    pytest.raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
                  n_pca_components=4, method=method, max_iter=1)
        with pytest.warns(None):  # ICA does not converge
            ica.fit(raw, picks=picks[:10], start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert (ica.mixing_matrix_.shape == (2, 2))
        assert (ica.unmixing_matrix_.shape == (2, 2))
        assert (ica.pca_components_.shape == (4, 10))
        assert (sources.shape[1] == ica.n_components_)

        for exclude in [[], [0], np.array([1, 2, 3])]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert (list(ica.exclude) == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.apply(raw)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert (ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert (ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert (ica.exclude == [ica_raw.ch_names.index(e) for e in
                                    ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4, 20, fir_design='firwin2')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert ((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin')
        assert ((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert (ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ pre_whitener_')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                     'pca_mean_', 'pca_explained_variance_',
                     'pre_whitener_']:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert (ica.ch_names == ica_read.ch_names)
        assert (isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw, target='EOG 061', score_func=func,
                                   start=0, stop=10)
        assert (ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, start=0, stop=50, score_func=stats.skew)
    # check exception handling
    pytest.raises(ValueError, ica.score_sources, raw,
                  target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # variance, kurtosis params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
                             eog_ch=ch_name, skew_criterion=idx,
                             var_criterion=idx, kurt_criterion=idx)

    # Make sure detect_artifacts marks the right components.
    # For int criterion, the doc says "E.g. range(2) would return the two
    # sources with the highest score". Assert that's what it does.
    # Only test for skew, since it's always the same code.
    ica.exclude = []
    ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=None,
                         eog_ch=None, skew_criterion=0,
                         var_criterion=None, kurt_criterion=None)
    assert np.abs(scores[ica.exclude]) == np.max(np.abs(scores))

    evoked = epochs.average()
    evoked_data = evoked.data.copy()
    raw_data = raw[:][0].copy()
    epochs_data = epochs.get_data().copy()

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
    assert_equal(len(scores), ica.n_components_)
    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
    assert_equal(len(scores), ica.n_components_)

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert_equal(len(scores), ica.n_components_)

    idx, scores = ica.find_bads_ecg(epochs, method='ctps')

    assert_equal(len(scores), ica.n_components_)
    pytest.raises(ValueError, ica.find_bads_ecg, epochs.average(),
                  method='ctps')
    pytest.raises(ValueError, ica.find_bads_ecg, raw,
                  method='crazy-coupling')

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert_equal(len(scores), ica.n_components_)

    raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert (isinstance(scores, list))
    assert_equal(len(scores[0]), ica.n_components_)

    idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
    assert_equal(len(scores), ica.n_components_)

    idx, scores = ica.find_bads_ecg(evoked, method='correlation')
    assert_equal(len(scores), ica.n_components_)

    assert_array_equal(raw_data, raw[:][0])
    assert_array_equal(epochs_data, epochs.get_data())
    assert_array_equal(evoked_data, evoked.data)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog, target='EOG 061',
                                   score_func=func)
        assert (ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    pytest.raises(ValueError, ica.score_sources, epochs,
                  target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw, target='MEG 1531',
                                   score_func='pearsonr')

    with pytest.warns(RuntimeWarning, match='longer'):
        ecg_events = ica_find_ecg_events(
            raw, sources[np.abs(ecg_scores).argmax()])
    assert (ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw, target='EOG 061',
                                   score_func='pearsonr')
    with pytest.warns(RuntimeWarning, match='longer'):
        eog_events = ica_find_eog_events(
            raw, sources[np.abs(eog_scores).argmax()])
    assert (eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert (ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_equal(len(ica_raw._filenames), 1)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert (ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert (ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert (ica.n_components_ == len(ica_chans))
    assert (ica.n_components_ == ica_epochs.get_data().shape[1])
    assert (ica_epochs._raw is None)
    assert (ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert (ncomps_ == expected)

    ica = ICA(method=method)
    with pytest.warns(None):  # sometimes does not converge
        ica.fit(raw, picks=picks[:5])
    with pytest.warns(RuntimeWarning, match='longer'):
        ica.find_bads_ecg(raw)
    ica.find_bads_eog(epochs, ch_name='MEG 0121')
    assert_array_equal(raw_data, raw[:][0])

    raw.drop_channels(['MEG 0122'])
    pytest.raises(RuntimeError, ica.find_bads_eog, raw)
    with pytest.warns(RuntimeWarning, match='longer'):
        pytest.raises(RuntimeError, ica.find_bads_ecg, raw)
Exemplo n.º 20
0
def test_compute_tfr():
    """Test _compute_tfr function"""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = io.read_raw_fif(raw_fname)
    events = read_events(event_fname)

    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info,
                       meg='grad',
                       eeg=False,
                       stim=False,
                       include=[],
                       exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0))
    data = epochs.get_data()
    sfreq = epochs.info['sfreq']
    freqs = np.arange(10, 20, 3).astype(float)

    # Check all combination of options
    for method, use_fft, zero_mean, output in product(
        ('multitaper', 'morlet'), (False, True), (False, True),
        ('complex', 'power', 'phase', 'avg_power_itc', 'avg_power', 'itc')):
        # Check exception
        if (method == 'multitaper') and (output == 'phase'):
            assert_raises(NotImplementedError,
                          _compute_tfr,
                          data,
                          freqs,
                          sfreq,
                          method=method,
                          output=output)
            continue

        # Check runs
        out = _compute_tfr(data,
                           freqs,
                           sfreq,
                           method=method,
                           use_fft=use_fft,
                           zero_mean=zero_mean,
                           n_cycles=2.,
                           output=output)
        # Check shapes
        shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
        if ('avg' in output) or ('itc' in output):
            assert_array_equal(shape[1:], out.shape)
        else:
            assert_array_equal(shape, out.shape)

        # Check types
        if output in ('complex', 'avg_power_itc'):
            assert_equal(np.complex, out.dtype)
        else:
            assert_equal(np.float, out.dtype)
        assert_true(np.all(np.isfinite(out)))

    # Check that functions are equivalent to
    # i) single_trial_power: X, shape (n_signals, n_chans, n_times)
    old_power = single_trial_power(data, sfreq, freqs, n_cycles=2.)
    new_power = _compute_tfr(data,
                             freqs,
                             sfreq,
                             n_cycles=2.,
                             method='morlet',
                             output='power')
    assert_array_almost_equal(old_power, new_power)
    old_power = single_trial_power(data,
                                   sfreq,
                                   freqs,
                                   n_cycles=2.,
                                   times=epochs.times,
                                   baseline=(-.100, 0),
                                   baseline_mode='ratio')
    new_power = rescale(new_power, epochs.times, (-.100, 0), 'ratio')

    # ii) cwt_morlet: X, shape (n_signals, n_times)
    old_complex = cwt_morlet(data[0], sfreq, freqs, n_cycles=2.)
    new_complex = _compute_tfr(data[[0]],
                               freqs,
                               sfreq,
                               n_cycles=2.,
                               method='morlet',
                               output='complex')
    assert_array_almost_equal(old_complex, new_complex[0])

    # Check errors params
    for _data in (None, 'foo', data[0]):
        assert_raises(ValueError, _compute_tfr, _data, freqs, sfreq)
    for _freqs in (None, 'foo', [[0]]):
        assert_raises(ValueError, _compute_tfr, data, _freqs, sfreq)
    for _sfreq in (None, 'foo'):
        assert_raises(ValueError, _compute_tfr, data, freqs, _sfreq)
    for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
        for value in (None, 'foo'):
            kwargs = {key: value}  # FIXME pep8
            assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
                          **kwargs)

    # No time_bandwidth param in morlet
    assert_raises(ValueError,
                  _compute_tfr,
                  data,
                  freqs,
                  sfreq,
                  method='morlet',
                  time_bandwidth=1)
    # No phase in multitaper XXX Check ?
    assert_raises(NotImplementedError,
                  _compute_tfr,
                  data,
                  freqs,
                  sfreq,
                  method='multitaper',
                  output='phase')

    # Inter-trial coherence tests
    out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
    assert_true(np.sum(out >= 1) == 0)
    assert_true(np.sum(out <= 0) == 0)

    # Check decim shapes
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
        _decim = slice(None, None, decim) if isinstance(decim, int) else decim
        n_time = len(np.arange(data.shape[2])[_decim])
        shape = np.r_[data.shape[:2], len(freqs), n_time]
        for method in ('multitaper', 'morlet'):
            # Single trials
            out = _compute_tfr(data,
                               freqs,
                               sfreq,
                               method=method,
                               decim=decim,
                               n_cycles=2.)
            assert_array_equal(shape, out.shape)
            # Averages
            out = _compute_tfr(data,
                               freqs,
                               sfreq,
                               method=method,
                               decim=decim,
                               output='avg_power',
                               n_cycles=2.)
            assert_array_equal(shape[1:], out.shape)
Exemplo n.º 21
0
picks = pick_types(raw.info, eeg=True)
events = mne.find_events(raw)

###############################################################################
# Create Epochs

# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = .7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
                                       tmin=0., tmax=tmax, new_id=2)

epochs = Epochs(raw, events=new_events, tmax=tmax + .1,
                event_id={"square": 2}, picks=picks)

###############################################################################
# Plot

# Parameters for plotting
order = rts.argsort()  # sorting from fast to slow trials

rois = dict()
for pick, channel in enumerate(epochs.ch_names):
    last_char = channel[-1]  # for 10/20, last letter codes the hemisphere
    roi = ("Midline" if last_char in "z12" else
           ("Left" if int(last_char) % 2 else "Right"))
    rois[roi] = rois.get(roi, list()) + [pick]

# The actual plots
Exemplo n.º 22
0
def test_simulate_raw_bem(raw_data):
    """Test simulation of raw data with BEM."""
    raw, src_ss, stc, trans, sphere = raw_data
    src = setup_source_space('sample', 'oct1', subjects_dir=subjects_dir)
    for s in src:
        s['nuse'] = 3
        s['vertno'] = src[1]['vertno'][:3]
        s['inuse'].fill(0)
        s['inuse'][s['vertno']] = 1
    # use different / more complete STC here
    vertices = [s['vertno'] for s in src]
    stc = SourceEstimate(np.eye(sum(len(v) for v in vertices)), vertices, 0,
                         1. / raw.info['sfreq'])
    stcs = [stc] * 15
    raw_sim_sph = simulate_raw(raw.info, stcs, trans, src, sphere)
    raw_sim_bem = simulate_raw(raw.info, stcs, trans, src, bem_fname)
    # some components (especially radial) might not match that well,
    # so just make sure that most components have high correlation
    assert_array_equal(raw_sim_sph.ch_names, raw_sim_bem.ch_names)
    picks = pick_types(raw.info, meg=True, eeg=True)
    n_ch = len(picks)
    corr = np.corrcoef(raw_sim_sph[picks][0], raw_sim_bem[picks][0])
    assert_array_equal(corr.shape, (2 * n_ch, 2 * n_ch))
    med_corr = np.median(np.diag(corr[:n_ch, -n_ch:]))
    assert med_corr > 0.65
    # do some round-trip localization
    for s in src:
        transform_surface_to(s, 'head', trans)
    locs = np.concatenate([s['rr'][s['vertno']] for s in src])
    tmax = (len(locs) - 1) / raw.info['sfreq']
    cov = make_ad_hoc_cov(raw.info)
    # The tolerance for the BEM is surprisingly high (28) but I get the same
    # result when using MNE-C and Xfit, even when using a proper 5120 BEM :(
    for use_raw, bem, tol in ((raw_sim_sph, sphere, 2), (raw_sim_bem,
                                                         bem_fname, 31)):
        events = find_events(use_raw, 'STI 014')
        assert len(locs) == 6
        evoked = Epochs(use_raw, events, 1, 0, tmax, baseline=None).average()
        assert len(evoked.times) == len(locs)
        fits = fit_dipole(evoked, cov, bem, trans, min_dist=1.)[0].pos
        diffs = np.sqrt(np.sum((locs - fits)**2, axis=-1)) * 1000
        med_diff = np.median(diffs)
        assert med_diff < tol, '%s: %s' % (bem, med_diff)
    # also test event timings with SourceSimulator
    first_samp = raw.first_samp
    events = find_events(raw, initial_event=True, verbose=False)
    evt_times = events[:, 0]
    assert len(events) == 3
    labels_sim = [[], [], []]  # random l+r hemisphere points
    labels_sim[0] = Label([src_ss[0]['vertno'][1]], hemi='lh')
    labels_sim[1] = Label([src_ss[0]['vertno'][4]], hemi='lh')
    labels_sim[2] = Label([src_ss[1]['vertno'][2]], hemi='rh')
    wf_sim = np.array([2, 1, 0])
    for this_fs in (0, first_samp):
        ss = SourceSimulator(src_ss,
                             1. / raw.info['sfreq'],
                             first_samp=this_fs)
        for i in range(3):
            ss.add_data(labels_sim[i], wf_sim, events[np.newaxis, i])
        assert ss.n_times == evt_times[-1] + len(wf_sim) - this_fs
    raw_sim = simulate_raw(raw.info,
                           ss,
                           src=src_ss,
                           bem=bem_fname,
                           first_samp=first_samp)
    data = raw_sim.get_data()
    amp0 = data[:, evt_times - first_samp].max()
    amp1 = data[:, evt_times + 1 - first_samp].max()
    amp2 = data[:, evt_times + 2 - first_samp].max()
    assert_allclose(amp0 / amp1, wf_sim[0] / wf_sim[1], rtol=1e-5)
    assert amp2 == 0
    assert raw_sim.n_times == ss.n_times
Exemplo n.º 23
0
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
stc = simulate_sparse_stc(src,
                          n_dipoles=n_dipoles,
                          times=times,
                          data_fun=data_fun,
                          random_state=rng)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
mne.viz.utils.plt_show()

##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw.info, [stc] * 10, forward=fwd, verbose=True)
cov = make_ad_hoc_cov(raw_sim.info)
add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], random_state=rng)
add_ecg(raw_sim, random_state=rng)
add_eog(raw_sim, random_state=rng)
raw_sim.plot()

##############################################################################
# Plot evoked data
events = find_events(raw_sim)  # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, tmin=-0.2, tmax=epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical',
                         verbose='error')  # quick calc
evoked = epochs.average()
evoked.plot_white(cov, time_unit='s')
Exemplo n.º 24
0
def test_render_report():
    """Test rendering -*.fif files for mne report."""
    tempdir = _TempDir()
    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
    ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
    for a, b in [[raw_fname, raw_fname_new],
                 [ms_fname, ms_fname_new],
                 [event_fname, event_fname_new],
                 [cov_fname, cov_fname_new],
                 [fwd_fname, fwd_fname_new],
                 [inv_fname, inv_fname_new]]:
        shutil.copyfile(a, b)

    # create and add -epo.fif and -ave.fif files
    epochs_fname = op.join(tempdir, 'temp-epo.fif')
    evoked_fname = op.join(tempdir, 'temp-ave.fif')
    # Speed it up by picking channels
    raw = read_raw_fif(raw_fname_new, preload=True)
    raw.pick_channels(['MEG 0111', 'MEG 0121'])
    raw.del_proj()
    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
    epochs.save(epochs_fname)
    # This can take forever (stall Travis), so let's make it fast
    # Also, make sure crop range is wide enough to avoid rendering bug
    epochs.average().crop(0.1, 0.2).save(evoked_fname)

    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, on_error='raise')
    assert_true(len(w) >= 1)
    assert_true(repr(report))

    # Check correct paths and filenames
    fnames = glob.glob(op.join(tempdir, '*.fif'))
    for fname in fnames:
        assert_true(op.basename(fname) in
                    [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)

    assert_equal(len(report.fnames), len(fnames))
    assert_equal(len(report.html), len(report.fnames))
    assert_equal(len(report.fnames), len(report))

    # Check saving functionality
    report.data_path = tempdir
    fname = op.join(tempdir, 'report.html')
    report.save(fname=fname, open_browser=False)
    assert_true(op.isfile(fname))
    with open(fname, 'rb') as fid:
        html = fid.read().decode('utf-8')
    assert '(MaxShield on)' in html

    assert_equal(len(report.html), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    # Check saving same report to new filename
    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report2.html')))

    # Check overwriting file
    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
                overwrite=True)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    # Check pattern matching with multiple patterns
    pattern = ['*raw.fif', '*eve.fif']
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, pattern=pattern)
    assert_true(len(w) >= 1)
    assert_true(repr(report))

    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
        glob.glob(op.join(tempdir, '*.raw'))
    for fname in fnames:
        assert_true(op.basename(fname) in
                    [op.basename(x) for x in report.fnames])
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)

    assert_raises(ValueError, Report, image_format='foo')
    assert_raises(ValueError, Report, image_format=None)

    # SVG rendering
    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
                    image_format='svg')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=tempdir, on_error='raise')

    # ndarray support smoke test
    report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
Exemplo n.º 25
0
def test_io_set_raw(fnames, tmpdir):
    """Test importing EEGLAB .set files."""
    tmpdir = str(tmpdir)
    raw_fname, raw_fname_onefile = fnames
    with pytest.warns(RuntimeWarning) as w:
        _test_raw_reader(read_raw_eeglab,
                         input_fname=raw_fname,
                         montage=montage)
        _test_raw_reader(read_raw_eeglab,
                         input_fname=raw_fname_onefile,
                         montage=montage)
    for want in ('Events like', 'consist entirely', 'could not be mapped',
                 'string preload is not supported'):
        assert (any(want in str(ww.message) for ww in w))
    with pytest.warns(RuntimeWarning) as w:
        # test finding events in continuous data
        event_id = {'rt': 1, 'square': 2}
        raw0 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=True)
        raw1 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=False)
        raw2 = read_raw_eeglab(input_fname=raw_fname_onefile,
                               montage=montage,
                               event_id=event_id)
        raw3 = read_raw_eeglab(input_fname=raw_fname,
                               montage=montage,
                               event_id=event_id)
        raw4 = read_raw_eeglab(input_fname=raw_fname, montage=montage)
        assert raw0.filenames[0].endswith('.fdt')  # .set with additional .fdt
        assert raw2.filenames[0].endswith('.set')  # standalone .set
        Epochs(raw0, find_events(raw0), event_id)
        epochs = Epochs(raw1, find_events(raw1), event_id)
        assert_equal(len(find_events(raw4)), 0)  # no events without event_id
        assert_equal(epochs["square"].average().nave, 80)  # 80 with
        assert_array_equal(raw0[:][0], raw1[:][0], raw2[:][0], raw3[:][0])
        assert_array_equal(raw0[:][-1], raw1[:][-1], raw2[:][-1], raw3[:][-1])
        assert_equal(len(w), 4)
        # 1 for preload=False / str with fname_onefile, 3 for dropped events
        raw0.filter(1,
                    None,
                    l_trans_bandwidth='auto',
                    filter_length='auto',
                    phase='zero')  # test that preloading works

    # test that using uint16_codec does not break stuff
    raw0 = read_raw_eeglab(input_fname=raw_fname,
                           montage=montage,
                           event_id=event_id,
                           preload=False,
                           uint16_codec='ascii')

    # test old EEGLAB version event import (read old version)
    eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
                     squeeze_me=True)['EEG']
    for event in eeg.event:  # old version allows integer events
        event.type = 1
    assert_equal(read_events_eeglab(eeg)[-1, -1], 1)
    eeg.event = eeg.event[0]  # single event
    eeg.event.latency = float(eeg.event.latency) - .1  # test rounding
    assert_equal(read_events_eeglab(eeg)[-1, -1], 1)

    # test reading file with one event (read old version)
    eeg = io.loadmat(raw_fname_mat, struct_as_record=False,
                     squeeze_me=True)['EEG']
    one_event_fname = op.join(tmpdir, 'test_one_event.set')
    io.savemat(one_event_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': eeg.nbchan,
            'data': 'test_one_event.fdt',
            'epoch': eeg.epoch,
            'event': eeg.event[0],
            'chanlocs': eeg.chanlocs,
            'pnts': eeg.pnts
        }
    },
               appendmat=False,
               oned_as='row')
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    one_event_fname.replace('.set', '.fdt'))
    event_id = {eeg.event[0].type: 1}
    test_raw = read_raw_eeglab(input_fname=one_event_fname,
                               montage=montage,
                               event_id=event_id,
                               preload=True)

    # test that sample indices are read python-wise (zero-based)
    assert find_events(test_raw)[0, 0] == round(eeg.event[0].latency) - 1

    # test negative event latencies
    negative_latency_fname = op.join(tmpdir, 'test_negative_latency.set')
    evnts = deepcopy(eeg.event[0])
    evnts.latency = 0
    io.savemat(negative_latency_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': eeg.nbchan,
            'data': 'test_one_event.fdt',
            'epoch': eeg.epoch,
            'event': evnts,
            'chanlocs': eeg.chanlocs,
            'pnts': eeg.pnts
        }
    },
               appendmat=False,
               oned_as='row')
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    negative_latency_fname.replace('.set', '.fdt'))
    event_id = {eeg.event[0].type: 1}
    with pytest.warns(RuntimeWarning, match="has a sample index of -1."):
        read_raw_eeglab(input_fname=negative_latency_fname,
                        preload=True,
                        event_id=event_id,
                        montage=montage)

    # test overlapping events
    overlap_fname = op.join(tmpdir, 'test_overlap_event.set')
    io.savemat(overlap_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': eeg.nbchan,
            'data': 'test_overlap_event.fdt',
            'epoch': eeg.epoch,
            'event': [eeg.event[0], eeg.event[0]],
            'chanlocs': eeg.chanlocs,
            'pnts': eeg.pnts
        }
    },
               appendmat=False,
               oned_as='row')
    shutil.copyfile(op.join(base_dir, 'test_raw.fdt'),
                    overlap_fname.replace('.set', '.fdt'))
    event_id = {'rt': 1, 'square': 2}
    with pytest.warns(RuntimeWarning, match='will be dropped'):
        raw = read_raw_eeglab(input_fname=overlap_fname,
                              montage=montage,
                              event_id=event_id,
                              preload=True)
    events_stimchan = find_events(raw)
    events_read_events_eeglab = read_events_eeglab(overlap_fname, event_id)
    assert (len(events_stimchan) == 1)
    assert (len(events_read_events_eeglab) == 2)

    # test reading file with one channel
    one_chan_fname = op.join(tmpdir, 'test_one_channel.set')
    io.savemat(one_chan_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': 1,
            'data': np.random.random((1, 3)),
            'epoch': eeg.epoch,
            'event': eeg.epoch,
            'chanlocs': {
                'labels': 'E1',
                'Y': -6.6069,
                'X': 6.3023,
                'Z': -2.9423
            },
            'times': eeg.times[:3],
            'pnts': 3
        }
    },
               appendmat=False,
               oned_as='row')
    with pytest.warns(None) as w:
        read_raw_eeglab(input_fname=one_chan_fname, preload=True)
    # no warning for 'no events found'
    assert len(w) == 0

    # test reading file with 3 channels - one without position information
    # first, create chanlocs structured array
    ch_names = ['F3', 'unknown', 'FPz']
    x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan]
    dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')]
    chanlocs = np.zeros((3, ), dtype=dt)
    for ind, vals in enumerate(zip(ch_names, x, y, z)):
        for fld in range(4):
            chanlocs[ind][dt[fld][0]] = vals[fld]

    if LooseVersion(np.__version__) == '1.14.0':
        # There is a bug in 1.14.0 (or maybe with SciPy 1.0.0?) that causes
        # this write to fail!
        raise SkipTest('Need to fix bug in NumPy 1.14.0!')

    # save set file
    one_chanpos_fname = op.join(tmpdir, 'test_chanpos.set')
    io.savemat(one_chanpos_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': 3,
            'data': np.random.random((3, 3)),
            'epoch': eeg.epoch,
            'event': eeg.epoch,
            'chanlocs': chanlocs,
            'times': eeg.times[:3],
            'pnts': 3
        }
    },
               appendmat=False,
               oned_as='row')
    # load it
    with pytest.warns(RuntimeWarning, match='did not have a position'):
        raw = read_raw_eeglab(input_fname=one_chanpos_fname, preload=True)
    # position should be present for first two channels
    for i in range(2):
        assert_array_equal(
            raw.info['chs'][i]['loc'][:3],
            np.array([-chanlocs[i]['Y'], chanlocs[i]['X'], chanlocs[i]['Z']]))
    # position of the last channel should be zero
    assert_array_equal(raw.info['chs'][-1]['loc'][:3], [np.nan] * 3)

    # test reading channel names from set and positions from montage
    with pytest.warns(RuntimeWarning, match='did not have a position'):
        raw = read_raw_eeglab(input_fname=one_chanpos_fname,
                              preload=True,
                              montage=montage)

    # when montage was passed - channel positions should be taken from there
    correct_pos = [[-0.56705965, 0.67706631, 0.46906776], [np.nan] * 3,
                   [0., 0.99977915, -0.02101571]]
    for ch_ind in range(3):
        assert_array_almost_equal(raw.info['chs'][ch_ind]['loc'][:3],
                                  np.array(correct_pos[ch_ind]))

    # test reading channel names but not positions when there is no X (only Z)
    # field in the EEG.chanlocs structure
    nopos_chanlocs = chanlocs[['labels', 'Z']]
    nopos_fname = op.join(tmpdir, 'test_no_chanpos.set')
    io.savemat(nopos_fname, {
        'EEG': {
            'trials': eeg.trials,
            'srate': eeg.srate,
            'nbchan': 3,
            'data': np.random.random((3, 2)),
            'epoch': eeg.epoch,
            'event': eeg.epoch,
            'chanlocs': nopos_chanlocs,
            'times': eeg.times[:2],
            'pnts': 2
        }
    },
               appendmat=False,
               oned_as='row')
    # load the file
    raw = read_raw_eeglab(input_fname=nopos_fname, preload=True)
    # test that channel names have been loaded but not channel positions
    for i in range(3):
        assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i])
        assert_array_equal(raw.info['chs'][i]['loc'][:3],
                           np.array([np.nan, np.nan, np.nan]))
# find events and generate epochs
event_fname = './data/record-[2014.03.10-19.17.37]-eve.fif'
events = read_events(event_fname)
event_id = {'13 Hz': 2, '17 Hz': 4, '21 Hz': 3, 'resting-state': 1}

## Modificação de parâmetros Epoch e adicionado "Picks"
# Define time range (1 to labeled)
# Only works up to 151s in record-[2012.07.19-11.24.02]
tmin, tmax = -0., 1

# epochs = Epochs(raw, events, event_id, tmin=0, tmax=360.9, baseline=None)
epochs = Epochs(raw,
                events,
                event_id,
                tmin,
                tmax,
                proj=False,
                baseline=None,
                preload=True,
                verbose=False)

# Extract data from Epochs object in (Trial, Channel, Sample) format
epochs_data = epochs.get_data()

# Make a pipeline with riemannian geometry models

clf = make_pipeline(Covariances(), TangentSpace(metric='riemann'),
                    LogisticRegression())

# Get labels
Exemplo n.º 27
0
def test_add_reference():
    raw = Raw(fif_fname, preload=True)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # check if channel already exists
    assert_raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])
    # add reference channel to Raw
    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    ref_idx = raw.ch_names.index('Ref')
    ref_data, _ = raw[ref_idx]
    assert_array_equal(ref_data, 0)

    raw = Raw(fif_fname, preload=True)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)

    # Test adding an existing channel as reference channel
    assert_raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])

    # add two reference channels to Raw
    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
    _check_channel_names(raw_ref, ['M1', 'M2'])
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    assert_array_equal(raw_ref._data[-2:, :], 0)

    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
    _check_channel_names(raw, ['M1', 'M2'])
    ref_idx = raw.ch_names.index('M1')
    ref_idy = raw.ch_names.index('M2')
    ref_data, _ = raw[[ref_idx, ref_idy]]
    assert_array_equal(ref_data, 0)

    # add reference channel to epochs
    raw = Raw(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
    _check_channel_names(epochs_ref, 'Ref')
    ref_idx = epochs_ref.ch_names.index('Ref')
    ref_data = epochs_ref.get_data()[:, ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add two reference channels to epochs
    raw = Raw(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
    _check_channel_names(epochs_ref, ['M1', 'M2'])
    ref_idx = epochs_ref.ch_names.index('M1')
    ref_idy = epochs_ref.ch_names.index('M2')
    assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
    assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add reference channel to evoked
    raw = Raw(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    evoked = epochs.average()
    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
    _check_channel_names(evoked_ref, 'Ref')
    ref_idx = evoked_ref.ch_names.index('Ref')
    ref_data = evoked_ref.data[ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # add two reference channels to evoked
    raw = Raw(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    evoked = epochs.average()
    evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
    _check_channel_names(evoked_ref, ['M1', 'M2'])
    ref_idx = evoked_ref.ch_names.index('M1')
    ref_idy = evoked_ref.ch_names.index('M2')
    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # Test invalid inputs
    raw_np = Raw(fif_fname, preload=False)
    assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
    assert_raises(ValueError, add_reference_channels, raw, 1)
Exemplo n.º 28
0
def test_array_raw():
    """Test creating raw from array."""
    # creating
    raw = read_raw_fif(fif_fname).crop(2, 5)
    data, times = raw[:, :]
    sfreq = raw.info['sfreq']
    ch_names = [(ch[4:] if 'STI' not in ch else ch)
                for ch in raw.info['ch_names']]  # change them, why not
    types = list()
    for ci in range(101):
        types.extend(('grad', 'grad', 'mag'))
    types.extend(['ecog', 'seeg', 'hbo'])  # really 3 meg channels
    types.extend(['stim'] * 9)
    types.extend(['eeg'] * 60)
    picks = np.concatenate([
        pick_types(raw.info)[::20],
        pick_types(raw.info, meg=False, stim=True),
        pick_types(raw.info, meg=False, eeg=True)[::20]
    ])
    del raw
    data = data[picks]
    ch_names = np.array(ch_names)[picks].tolist()
    types = np.array(types)[picks].tolist()
    types.pop(-1)
    # wrong length
    pytest.raises(ValueError, create_info, ch_names, sfreq, types)
    # bad entry
    types.append('foo')
    pytest.raises(KeyError, create_info, ch_names, sfreq, types)
    types[-1] = 'eog'
    # default type
    info = create_info(ch_names, sfreq)
    assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
    # use real types
    info = create_info(ch_names, sfreq, types)
    raw2 = _test_raw_reader(RawArray,
                            test_preloading=False,
                            data=data,
                            info=info,
                            first_samp=2 * data.shape[1])
    data2, times2 = raw2[:, :]
    assert_allclose(data, data2)
    assert_allclose(times, times2)
    assert ('RawArray' in repr(raw2))
    pytest.raises(TypeError, RawArray, info, data)

    # filtering
    picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
    assert_equal(len(picks), 4)
    raw_lp = raw2.copy()
    kwargs = dict(fir_design='firwin', picks=picks)
    raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs)
    raw_hp = raw2.copy()
    raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs)
    raw_bp = raw2.copy()
    raw_bp.filter(8.0,
                  12.0,
                  l_trans_bandwidth=4.,
                  h_trans_bandwidth=4.,
                  **kwargs)
    raw_bs = raw2.copy()
    raw_bs.filter(16.0,
                  4.0,
                  l_trans_bandwidth=4.,
                  h_trans_bandwidth=4.,
                  **kwargs)
    data, _ = raw2[picks, :]
    lp_data, _ = raw_lp[picks, :]
    hp_data, _ = raw_hp[picks, :]
    bp_data, _ = raw_bp[picks, :]
    bs_data, _ = raw_bs[picks, :]
    sig_dec = 15
    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)

    # plotting
    raw2.plot()
    raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False)
    plt.close('all')

    # epoching
    events = find_events(raw2, stim_channel='STI 014')
    events[:, 2] = 1
    assert len(events) > 2
    epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
    evoked = epochs.average()
    assert_equal(evoked.nave, len(events) - 1)

    # complex data
    rng = np.random.RandomState(0)
    data = rng.randn(1, 100) + 1j * rng.randn(1, 100)
    raw = RawArray(data, create_info(1, 1000., 'eeg'))
    assert_allclose(raw._data, data)

    # Using digital montage to give MNI electrode coordinates
    n_elec = 10
    ts_size = 10000
    Fs = 512.
    ch_names = [str(i) for i in range(n_elec)]
    ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist()

    data = np.random.rand(n_elec, ts_size)
    montage = make_dig_montage(ch_pos=dict(zip(ch_names, ch_pos_loc)))
    info = create_info(ch_names, Fs, 'ecog', montage=montage)

    raw = RawArray(data, info)
    raw.plot_psd(average=False)  # looking for inexistent layout
    raw.plot_psd_topo()
Exemplo n.º 29
0
def test_epochs_proj_mixin():
    """Test SSP proj methods from ProjMixin class
    """
    for proj in [True, False]:
        epochs = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks,
                        baseline=(None, 0),
                        proj=proj)

        assert_true(all(p['active'] == proj for p in epochs.info['projs']))

        # test adding / deleting proj
        if proj:
            epochs.get_data()
            assert_true(all(p['active'] == proj for p in epochs.info['projs']))
            assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
                          {'remove_existing': True})
            assert_raises(ValueError, epochs.add_proj, 'spam')
            assert_raises(ValueError, epochs.del_proj, 0)
        else:
            projs = deepcopy(epochs.info['projs'])
            n_proj = len(epochs.info['projs'])
            epochs.del_proj(0)
            assert_true(len(epochs.info['projs']) == n_proj - 1)
            epochs.add_proj(projs, remove_existing=False)
            assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
            epochs.add_proj(projs, remove_existing=True)
            assert_true(len(epochs.info['projs']) == n_proj)

    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    proj=False,
                    add_eeg_ref=True)
    epochs2 = Epochs(raw,
                     events[:4],
                     event_id,
                     tmin,
                     tmax,
                     picks=picks,
                     baseline=(None, 0),
                     proj=True,
                     add_eeg_ref=True)
    assert_allclose(epochs.copy().apply_proj().get_data()[0],
                    epochs2.get_data()[0])

    data = epochs.get_data().copy()
    data2 = np.array([e for e in epochs])
    assert_array_equal(data, data2)
    epochs.apply_proj()
    assert_array_equal(epochs._projector, epochs2._projector)
    assert_allclose(epochs._data, epochs2.get_data())
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=None,
                    proj=False,
                    add_eeg_ref=True)
    data = epochs.get_data().copy()
    epochs.apply_proj()
    assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
Exemplo n.º 30
0
    raw.filter(l_freq=.05, h_freq=25.0, fir_design='firwin')
    channel_trigger = np.where(np.array(raw.ch_names) == 'USPT001')[0][0]
    # trigger baseline is 255
    # Replace 255 values with 0 for easier reading of events
    trigger_baseline = np.where(raw._data[channel_trigger, :] == 255)[0]
    raw._data[channel_trigger, trigger_baseline] = 0.
    # find triggers
    events_meg = mne.find_events(raw)
    # Add 48ms to the trigger events (according to the photodiod)
    events_meg = np.array(events_meg, float)
    events_meg[:, 0] += round(.048 * raw.info['sfreq'])
    events_meg = np.array(events_meg, int)
    # Select only events corresponding to stimulus onset
    events_meg = events_meg[np.where(events_meg[:, 2] <= 125)]
    epochs = Epochs(raw, events_meg,
                    tmin=-0.35, tmax=0, preload=True,
                    baseline=(-0.35, 0.0), decim=10)
    # Copy first run dev_head_t to following runs
    if run_number == 0:
        dev_head_t = epochs.info['dev_head_t']
    else:
        epochs.info['dev_head_t'] = dev_head_t
    # no button 2 recording during acquisition of the 1st session for sub01
    if (subject == 'sub01_YFAALKWR_JA') & (epochs.info['nchan'] == 308):
        epochs.drop_channels(['UADC007-2104'])
    epochs_list.append(epochs)
epochs = concatenate_epochs(epochs_list)
# compute noise covariance and save it
cov = mne.compute_covariance(epochs)
cov_fname = op.join(subject_path, '%s-cov.fif' % subject)
cov.save(cov_fname)