コード例 #1
0
ファイル: test_core_gui.py プロジェクト: mh105/mne-python
def test_slice_browser_display(_slice_browser):
    """Test that the slice browser GUI displays properly."""
    # test no seghead, fsaverage doesn't have seghead
    with pytest.warns(RuntimeWarning, match='`seghead` not found'):
        with catch_logging() as log:
            _slice_browser(subject='fsaverage', subjects_dir=subjects_dir,
                           verbose=True)
    log = log.getvalue()
    assert 'using marching cubes' in log

    # test functions
    with pytest.warns(RuntimeWarning, match='`pial` surface not found'):
        gui = _slice_browser(subject=subject, subjects_dir=subjects_dir)

    # test RAS
    gui._RAS_textbox.setPlainText('10 10 10\n')
    assert_allclose(gui._ras, [10, 10, 10])

    # test vox
    gui._VOX_textbox.setPlainText('150, 150, 150\n')
    assert_allclose(gui._ras, [23, 22, 23])

    # test click
    with use_log_level('debug'):
        _fake_click(gui._figs[2], gui._figs[2].axes[0],
                    [137, 140], xform='data', kind='release')
    assert_allclose(gui._ras, [10, 12, 23])
コード例 #2
0
ファイル: test_datasets.py プロジェクト: vlitvak/mne-python
def test_manifest_check_download(tmpdir, n_have, monkeypatch):
    """Test our manifest downloader."""
    monkeypatch.setattr(datasets.utils, '_fetch_file', _fake_zip_fetch)
    destination = op.join(str(tmpdir), 'empty')
    manifest_path = op.join(str(tmpdir), 'manifest.txt')
    with open(manifest_path, 'w') as fid:
        for fname in _zip_fnames:
            fid.write('%s\n' % fname)
    assert n_have in range(len(_zip_fnames) + 1)
    assert not op.isdir(destination)
    if n_have > 0:
        os.makedirs(op.join(destination, 'foo'))
        assert op.isdir(op.join(destination, 'foo'))
    for fname in _zip_fnames:
        assert not op.isfile(op.join(destination, fname))
    for fname in _zip_fnames[:n_have]:
        with open(op.join(destination, fname), 'w'):
            pass
    with catch_logging() as log:
        with use_log_level(True):
            url = hash_ = ''  # we mock the _fetch_file so these are not used
            _manifest_check_download(manifest_path, destination, url, hash_)
    log = log.getvalue()
    n_missing = 3 - n_have
    assert ('%d file%s missing from' % (n_missing, _pl(n_missing))) in log
    for want in ('Extracting missing', 'Successfully '):
        if n_missing > 0:
            assert want in log
        else:
            assert want not in log
    assert op.isdir(destination)
    for fname in _zip_fnames:
        assert op.isfile(op.join(destination, fname))
コード例 #3
0
ファイル: test_progressbar.py プロジェクト: aces/EEG2BIDS
def test_progressbar_parallel_more(capsys):
    """Test ProgressBar with parallel computing, advanced version."""
    assert capsys.readouterr().out == ''
    # This must be "1" because "capsys" won't get stdout properly otherwise
    parallel, p_fun, _ = parallel_func(_identity_block_wide,
                                       n_jobs=1,
                                       verbose=False)
    arr = np.arange(10)
    with use_log_level(True):
        with ProgressBar(len(arr) * 2) as pb:
            out = parallel(
                p_fun(x, pb.subset(pb_idx))
                for pb_idx, x in array_split_idx(arr, 2, n_per_split=2))
            idxs = np.concatenate([o[1] for o in out])
            assert_array_equal(idxs, np.arange(len(arr) * 2))
            out = np.concatenate([o[0] for o in out])
            assert op.isfile(pb._mmap_fname)
            sum_ = np.memmap(pb._mmap_fname,
                             dtype='bool',
                             mode='r',
                             shape=len(arr) * 2).sum()
            assert sum_ == len(arr) * 2
    assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
    cap = capsys.readouterr()
    out = cap.err
    assert '100%' in out
コード例 #4
0
ファイル: test_datasets.py プロジェクト: adykstra/mne-python
def test_manifest_check_download(tmpdir, n_have, monkeypatch):
    """Test our manifest downloader."""
    monkeypatch.setattr(datasets.utils, '_fetch_file', _fake_zip_fetch)
    destination = op.join(str(tmpdir), 'empty')
    manifest_path = op.join(str(tmpdir), 'manifest.txt')
    with open(manifest_path, 'w') as fid:
        for fname in _zip_fnames:
            fid.write('%s\n' % fname)
    assert n_have in range(len(_zip_fnames) + 1)
    assert not op.isdir(destination)
    if n_have > 0:
        os.makedirs(op.join(destination, 'foo'))
        assert op.isdir(op.join(destination, 'foo'))
    for fname in _zip_fnames:
        assert not op.isfile(op.join(destination, fname))
    for fname in _zip_fnames[:n_have]:
        with open(op.join(destination, fname), 'w'):
            pass
    with catch_logging() as log:
        with use_log_level(True):
            url = hash_ = ''  # we mock the _fetch_file so these are not used
            _manifest_check_download(manifest_path, destination, url, hash_)
    log = log.getvalue()
    n_missing = 3 - n_have
    assert ('%d file%s missing from' % (n_missing, _pl(n_missing))) in log
    for want in ('Extracting missing', 'Successfully '):
        if n_missing > 0:
            assert want in log
        else:
            assert want not in log
    assert op.isdir(destination)
    for fname in _zip_fnames:
        assert op.isfile(op.join(destination, fname))
コード例 #5
0
def test_progressbar():
    """Test progressbar class."""
    a = np.arange(10)
    pbar = ProgressBar(a)
    assert a is pbar.iterable
    assert pbar.max_value == 10

    pbar = ProgressBar(10)
    assert pbar.max_value == 10
    assert pbar.iterable is None

    # Make sure that non-iterable input raises an error
    def iter_func(a):
        for ii in a:
            pass
    pytest.raises(Exception, iter_func, ProgressBar(20))

    # Make sure different progress bars can be used
    with catch_logging() as log, modified_env(MNE_TQDM='tqdm'), \
            use_log_level('debug'), ProgressBar(np.arange(3)) as pbar:
        for p in pbar:
            pass
    log = log.getvalue()
    assert 'Using ProgressBar with tqdm\n' in log
    with modified_env(MNE_TQDM='broken'), pytest.raises(ValueError):
        ProgressBar(np.arange(3))
    with modified_env(MNE_TQDM='tqdm.broken'), pytest.raises(AttributeError):
        ProgressBar(np.arange(3))
コード例 #6
0
def test_progressbar_parallel_basic(capsys):
    """Test ProgressBar with parallel computing, basic version."""
    assert capsys.readouterr().out == ''
    parallel, p_fun, _ = parallel_func(_identity, total=10, n_jobs=1,
                                       verbose=True)
    with use_log_level(True):
        out = parallel(p_fun(x) for x in range(10))
    assert out == list(range(10))
    cap = capsys.readouterr()
    out = cap.err
    assert '100%' in out
コード例 #7
0
def test_frame_info(capsys, monkeypatch):
    """Test _frame_info."""
    stack = _frame_info(100)
    assert 2 < len(stack) < 100
    this, pytest_line = stack[:2]
    assert re.match('^test_logging:[1-9][0-9]$', this) is not None, this
    assert 'pytest' in pytest_line
    capsys.readouterr()
    with use_log_level('debug', add_frames=4):
        _fun()
    out, _ = capsys.readouterr()
    out = out.replace('\n', ' ')
    assert re.match(
        '.*pytest'
        '.*test_logging:[2-9][0-9] '
        '.*test_logging:[1-9][0-9] :.*Test', out) is not None, this
    monkeypatch.setattr('inspect.currentframe', lambda: None)
    assert _frame_info(1) == ['unknown']
コード例 #8
0
ファイル: test_time_gen.py プロジェクト: jmontoyam/mne-python
def test_generalization_across_time():
    """Test time generalization decoding
    """
    from sklearn.svm import SVC
    from sklearn.base import is_classifier
    # KernelRidge is used for testing 1) regression analyses 2) n-dimensional
    # predictions.
    from sklearn.kernel_ridge import KernelRidge
    from sklearn.preprocessing import LabelEncoder
    from sklearn.metrics import roc_auc_score, mean_squared_error

    epochs = make_epochs()
    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
    if check_version('sklearn', '0.18'):
        from sklearn.model_selection import (KFold, StratifiedKFold,
                                             ShuffleSplit, LeaveOneLabelOut)
        cv_shuffle = ShuffleSplit()
        cv = LeaveOneLabelOut()
        # XXX we cannot pass any other parameters than X and y to cv.split
        # so we have to build it before hand
        cv_lolo = [(train, test) for train, test in cv.split(
                   X=y_4classes, y=y_4classes, labels=y_4classes)]

        # With sklearn >= 0.17, `clf` can be identified as a regressor, and
        # the scoring metrics can therefore be automatically assigned.
        scorer_regress = None
    else:
        from sklearn.cross_validation import (KFold, StratifiedKFold,
                                              ShuffleSplit, LeaveOneLabelOut)
        cv_shuffle = ShuffleSplit(len(epochs))
        cv_lolo = LeaveOneLabelOut(y_4classes)

        # With sklearn < 0.17, `clf` cannot be identified as a regressor, and
        # therefore the scoring metrics cannot be automatically assigned.
        scorer_regress = mean_squared_error
    # Test default running
    gat = GeneralizationAcrossTime(picks='foo')
    assert_equal("<GAT | no fit, no prediction, no score>", "%s" % gat)
    assert_raises(ValueError, gat.fit, epochs)
    with warnings.catch_warnings(record=True):
        # check classic fit + check manual picks
        gat.picks = [0]
        gat.fit(epochs)
        # check optional y as array
        gat.picks = None
        gat.fit(epochs, y=epochs.events[:, 2])
        # check optional y as list
        gat.fit(epochs, y=epochs.events[:, 2].tolist())
    assert_equal(len(gat.picks_), len(gat.ch_names), 1)
    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), no "
                 "prediction, no score>", '%s' % gat)
    assert_equal(gat.ch_names, epochs.ch_names)
    # test different predict function:
    gat = GeneralizationAcrossTime(predict_method='decision_function')
    gat.fit(epochs)
    # With classifier, the default cv is StratifiedKFold
    assert_true(gat.cv_.__class__ == StratifiedKFold)
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 1))
    gat.predict_method = 'predict_proba'
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 2))
    gat.predict_method = 'foo'
    assert_raises(NotImplementedError, gat.predict, epochs)
    gat.predict_method = 'predict'
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 1))
    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
                 "predicted 14 epochs, no score>",
                 "%s" % gat)
    gat.score(epochs)
    assert_true(gat.scorer_.__name__ == 'accuracy_score')
    # check clf / predict_method combinations for which the scoring metrics
    # cannot be inferred.
    gat.scorer = None
    gat.predict_method = 'decision_function'
    assert_raises(ValueError, gat.score, epochs)
    # Check specifying y manually
    gat.predict_method = 'predict'
    gat.score(epochs, y=epochs.events[:, 2])
    gat.score(epochs, y=epochs.events[:, 2].tolist())
    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
                 "predicted 14 epochs,\n scored "
                 "(accuracy_score)>", "%s" % gat)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs, y=epochs.events[:, 2])

    old_mode = gat.predict_mode
    gat.predict_mode = 'super-foo-mode'
    assert_raises(ValueError, gat.predict, epochs)
    gat.predict_mode = old_mode

    gat.score(epochs, y=epochs.events[:, 2])
    assert_true("accuracy_score" in '%s' % gat.scorer_)
    epochs2 = epochs.copy()

    # check _DecodingTime class
    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
                 "0.050 (s), length: 0.050 (s), n_time_windows: 15>",
                 "%s" % gat.train_times_)
    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
                 "0.050 (s), length: 0.050 (s), n_time_windows: 15 x 15>",
                 "%s" % gat.test_times_)

    # the y-check
    gat.predict_mode = 'mean-prediction'
    epochs2.events[:, 2] += 10
    gat_ = copy.deepcopy(gat)
    with use_log_level('error'):
        assert_raises(ValueError, gat_.score, epochs2)
    gat.predict_mode = 'cross-validation'

    # Test basics
    # --- number of trials
    assert_true(gat.y_train_.shape[0] ==
                gat.y_true_.shape[0] ==
                len(gat.y_pred_[0][0]) == 14)
    # ---  number of folds
    assert_true(np.shape(gat.estimators_)[1] == gat.cv)
    # ---  length training size
    assert_true(len(gat.train_times_['slices']) == 15 ==
                np.shape(gat.estimators_)[0])
    # ---  length testing sizes
    assert_true(len(gat.test_times_['slices']) == 15 ==
                np.shape(gat.scores_)[0])
    assert_true(len(gat.test_times_['slices'][0]) == 15 ==
                np.shape(gat.scores_)[1])

    # Test score_mode
    gat.score_mode = 'foo'
    assert_raises(ValueError, gat.score, epochs)
    gat.score_mode = 'fold-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15, 5])
    gat.score_mode = 'mean-sample-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15])
    gat.score_mode = 'mean-fold-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15])
    gat.predict_mode = 'mean-prediction'
    with warnings.catch_warnings(record=True) as w:
        gat.score(epochs)
        assert_true(any("score_mode changed from " in str(ww.message)
                        for ww in w))

    # Test longer time window
    gat = GeneralizationAcrossTime(train_times={'length': .100})
    with warnings.catch_warnings(record=True):
        gat2 = gat.fit(epochs)
    assert_true(gat is gat2)  # return self
    assert_true(hasattr(gat2, 'cv_'))
    assert_true(gat2.cv_ != gat.cv)
    with warnings.catch_warnings(record=True):  # not vectorizing
        scores = gat.score(epochs)
    assert_true(isinstance(scores, np.ndarray))  # type check
    assert_equal(len(scores[0]), len(scores))  # shape check
    assert_equal(len(gat.test_times_['slices'][0][0]), 2)
    # Decim training steps
    gat = GeneralizationAcrossTime(train_times={'step': .100})
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)
    gat.score(epochs)
    assert_true(len(gat.scores_) == len(gat.estimators_) == 8)  # training time
    assert_equal(len(gat.scores_[0]), 15)  # testing time

    # Test start stop training & test cv without n_fold params
    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
    train_times = dict(start=0.090, stop=0.250)
    gat = GeneralizationAcrossTime(cv=cv_lolo, train_times=train_times)
    # predict without fit
    assert_raises(RuntimeError, gat.predict, epochs)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs, y=y_4classes)
    gat.score(epochs)
    assert_equal(len(gat.scores_), 4)
    assert_equal(gat.train_times_['times'][0], epochs.times[6])
    assert_equal(gat.train_times_['times'][-1], epochs.times[9])

    # Test score without passing epochs & Test diagonal decoding
    gat = GeneralizationAcrossTime(test_times='diagonal')
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.fit(epochs)
    assert_raises(RuntimeError, gat.score)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.predict(epochs)
    scores = gat.score()
    assert_true(scores is gat.scores_)
    assert_equal(np.shape(gat.scores_), (15, 1))
    assert_array_equal([tim for ttime in gat.test_times_['times']
                        for tim in ttime], gat.train_times_['times'])
    # Test generalization across conditions
    gat = GeneralizationAcrossTime(predict_mode='mean-prediction', cv=2)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs[0:6])
    with warnings.catch_warnings(record=True):
        # There are some empty test folds because of n_trials
        gat.predict(epochs[7:])
        gat.score(epochs[7:])

    # Test training time parameters
    gat_ = copy.deepcopy(gat)
    # --- start stop outside time range
    gat_.train_times = dict(start=-999.)
    with use_log_level('error'):
        assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(start=999.)
    assert_raises(ValueError, gat_.fit, epochs)
    # --- impossible slices
    gat_.train_times = dict(step=.000001)
    assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(length=.000001)
    assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(length=999.)
    assert_raises(ValueError, gat_.fit, epochs)

    # Test testing time parameters
    # --- outside time range
    gat.test_times = dict(start=-999.)
    with warnings.catch_warnings(record=True):  # no epochs in fold
        assert_raises(ValueError, gat.predict, epochs)
    gat.test_times = dict(start=999.)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    # --- impossible slices
    gat.test_times = dict(step=.000001)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    gat_ = copy.deepcopy(gat)
    gat_.train_times_['length'] = .000001
    gat_.test_times = dict(length=.000001)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat_.predict, epochs)
    # --- test time region of interest
    gat.test_times = dict(step=.150)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 5, 14, 1))
    # --- silly value
    gat.test_times = 'foo'
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    assert_raises(RuntimeError, gat.score)
    # --- unmatched length between training and testing time
    gat.test_times = dict(length=.150)
    assert_raises(ValueError, gat.predict, epochs)
    # --- irregular length training and testing times
    # 2 estimators, the first one is trained on two successive time samples
    # whereas the second one is trained on a single time sample.
    train_times = dict(slices=[[0, 1], [1]])
    # The first estimator is tested once, the second estimator is tested on
    # two successive time samples.
    test_times = dict(slices=[[[0, 1]], [[0], [1]]])
    gat = GeneralizationAcrossTime(train_times=train_times,
                                   test_times=test_times)
    gat.fit(epochs)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.score(epochs)
    assert_array_equal(np.shape(gat.y_pred_[0]), [1, len(epochs), 1])
    assert_array_equal(np.shape(gat.y_pred_[1]), [2, len(epochs), 1])
    # check cannot Automatically infer testing times for adhoc training times
    gat.test_times = None
    assert_raises(ValueError, gat.predict, epochs)

    svc = SVC(C=1, kernel='linear', probability=True)
    gat = GeneralizationAcrossTime(clf=svc, predict_mode='mean-prediction')
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)

    # sklearn needs it: c.f.
    # https://github.com/scikit-learn/scikit-learn/issues/2723
    # and http://bit.ly/1u7t8UT
    with use_log_level('error'):
        assert_raises(ValueError, gat.score, epochs2)
        gat.score(epochs)
    assert_true(0.0 <= np.min(scores) <= 1.0)
    assert_true(0.0 <= np.max(scores) <= 1.0)

    # Test that gets error if train on one dataset, test on another, and don't
    # specify appropriate cv:
    gat = GeneralizationAcrossTime(cv=cv_shuffle)
    gat.fit(epochs)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)

    gat.predict(epochs)
    assert_raises(ValueError, gat.predict, epochs[:10])

    # Make CV with some empty train and test folds:
    # --- empty test fold(s) should warn when gat.predict()
    gat._cv_splits[0] = [gat._cv_splits[0][0], np.empty(0)]
    with warnings.catch_warnings(record=True) as w:
        gat.predict(epochs)
        assert_true(len(w) > 0)
        assert_true(any('do not have any test epochs' in str(ww.message)
                        for ww in w))
    # --- empty train fold(s) should raise when gat.fit()
    gat = GeneralizationAcrossTime(cv=[([0], [1]), ([], [0])])
    assert_raises(ValueError, gat.fit, epochs[:2])

    # Check that still works with classifier that output y_pred with
    # shape = (n_trials, 1) instead of (n_trials,)
    if check_version('sklearn', '0.17'):  # no is_regressor before v0.17
        gat = GeneralizationAcrossTime(clf=KernelRidge(), cv=2)
        epochs.crop(None, epochs.times[2])
        gat.fit(epochs)
        # With regression the default cv is KFold and not StratifiedKFold
        assert_true(gat.cv_.__class__ == KFold)
        gat.score(epochs)
        # with regression the default scoring metrics is mean squared error
        assert_true(gat.scorer_.__name__ == 'mean_squared_error')

    # Test combinations of complex scenarios
    # 2 or more distinct classes
    n_classes = [2, 4]  # 4 tested
    # nicely ordered labels or not
    le = LabelEncoder()
    y = le.fit_transform(epochs.events[:, 2])
    y[len(y) // 2:] += 2
    ys = (y, y + 1000)
    # Univariate and multivariate prediction
    svc = SVC(C=1, kernel='linear', probability=True)
    reg = KernelRidge()

    def scorer_proba(y_true, y_pred):
        return roc_auc_score(y_true, y_pred[:, 0])

    # We re testing 3 scenario: default, classifier + predict_proba, regressor
    scorers = [None, scorer_proba, scorer_regress]
    predict_methods = [None, 'predict_proba', None]
    clfs = [svc, svc, reg]
    # Test all combinations
    for clf, predict_method, scorer in zip(clfs, predict_methods, scorers):
        for y in ys:
            for n_class in n_classes:
                for predict_mode in ['cross-validation', 'mean-prediction']:
                    # Cannot use AUC for n_class > 2
                    if (predict_method == 'predict_proba' and n_class != 2):
                        continue

                    y_ = y % n_class

                    with warnings.catch_warnings(record=True):
                        gat = GeneralizationAcrossTime(
                            cv=2, clf=clf, scorer=scorer,
                            predict_mode=predict_mode)
                        gat.fit(epochs, y=y_)
                        gat.score(epochs, y=y_)

                    # Check that scorer is correctly defined manually and
                    # automatically.
                    scorer_name = gat.scorer_.__name__
                    if scorer is None:
                        if is_classifier(clf):
                            assert_equal(scorer_name, 'accuracy_score')
                        else:
                            assert_equal(scorer_name, 'mean_squared_error')
                    else:
                        assert_equal(scorer_name, scorer.__name__)
コード例 #9
0
def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc):
    """Test time viewer primitives."""
    if renderer_interactive._get_3d_backend() != 'pyvista':
        pytest.skip('TimeViewer tests only supported on PyVista')
    with pytest.raises(ValueError, match="between 0 and 1"):
        _create_testing_brain(hemi='lh', show_traces=-1.0)
    with pytest.raises(ValueError, match="got unknown keys"):
        _create_testing_brain(hemi='lh',
                              surf='white',
                              src='volume',
                              volume_options={'foo': 'bar'})
    brain = _create_testing_brain(
        hemi='both',
        show_traces=False,
        brain_kwargs=dict(silhouette=dict(decimate=0.95)))
    # test sub routines when show_traces=False
    brain._on_pick(None, None)
    brain._configure_vertex_time_course()
    brain._configure_label_time_course()
    brain.setup_time_viewer()  # for coverage
    brain.callbacks["time"](value=0)
    assert "renderer" not in brain.callbacks
    brain.callbacks["orientation"](value='lat', update_widget=True)
    brain.callbacks["orientation"](value='medial', update_widget=True)
    brain.callbacks["time"](
        value=0.0,
        time_as_index=False,
    )
    # Need to process events for old Qt
    brain.callbacks["smoothing"](value=1)
    _assert_brain_range(brain, [0.1, 0.3])
    from mne.utils import use_log_level
    print('\nCallback fmin\n')
    with use_log_level('debug'):
        brain.callbacks["fmin"](value=12.0)
    assert brain._data["fmin"] == 12.0
    brain.callbacks["fmax"](value=4.0)
    _assert_brain_range(brain, [4.0, 4.0])
    brain.callbacks["fmid"](value=6.0)
    _assert_brain_range(brain, [4.0, 6.0])
    brain.callbacks["fmid"](value=4.0)
    brain.callbacks["fplus"]()
    brain.callbacks["fminus"]()
    brain.callbacks["fmin"](value=12.0)
    brain.callbacks["fmid"](value=4.0)
    _assert_brain_range(brain, [4.0, 12.0])
    brain._shift_time(op=lambda x, y: x + y)
    brain._shift_time(op=lambda x, y: x - y)
    brain._rotate_azimuth(15)
    brain._rotate_elevation(15)
    brain.toggle_interface()
    brain.toggle_interface(value=False)
    brain.callbacks["playback_speed"](value=0.1)
    brain.toggle_playback()
    brain.toggle_playback(value=False)
    brain.apply_auto_scaling()
    brain.restore_user_scaling()
    brain.reset()
    plt.close('all')
    brain.help()
    assert len(plt.get_fignums()) == 1
    plt.close('all')
    assert len(plt.get_fignums()) == 0

    # screenshot
    # Need to turn the interface back on otherwise the window is too wide
    # (it keeps the window size and expands the 3D area when the interface
    # is toggled off)
    brain.toggle_interface(value=True)
    brain.show_view(view=dict(azimuth=180., elevation=90.))
    img = brain.screenshot(mode='rgb')
    want_shape = np.array([300 * pixel_ratio, 300 * pixel_ratio, 3])
    assert_allclose(img.shape, want_shape)
    brain.close()
コード例 #10
0
ファイル: test_time_gen.py プロジェクト: jmontoyam/mne-python
def test_generalization_across_time():
    """Test time generalization decoding
    """
    from sklearn.svm import SVC
    from sklearn.base import is_classifier
    # KernelRidge is used for testing 1) regression analyses 2) n-dimensional
    # predictions.
    from sklearn.kernel_ridge import KernelRidge
    from sklearn.preprocessing import LabelEncoder
    from sklearn.metrics import roc_auc_score, mean_squared_error

    epochs = make_epochs()
    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
    if check_version('sklearn', '0.18'):
        from sklearn.model_selection import (KFold, StratifiedKFold,
                                             ShuffleSplit, LeaveOneLabelOut)
        cv_shuffle = ShuffleSplit()
        cv = LeaveOneLabelOut()
        # XXX we cannot pass any other parameters than X and y to cv.split
        # so we have to build it before hand
        cv_lolo = [(train, test) for train, test in cv.split(
            X=y_4classes, y=y_4classes, labels=y_4classes)]

        # With sklearn >= 0.17, `clf` can be identified as a regressor, and
        # the scoring metrics can therefore be automatically assigned.
        scorer_regress = None
    else:
        from sklearn.cross_validation import (KFold, StratifiedKFold,
                                              ShuffleSplit, LeaveOneLabelOut)
        cv_shuffle = ShuffleSplit(len(epochs))
        cv_lolo = LeaveOneLabelOut(y_4classes)

        # With sklearn < 0.17, `clf` cannot be identified as a regressor, and
        # therefore the scoring metrics cannot be automatically assigned.
        scorer_regress = mean_squared_error
    # Test default running
    gat = GeneralizationAcrossTime(picks='foo')
    assert_equal("<GAT | no fit, no prediction, no score>", "%s" % gat)
    assert_raises(ValueError, gat.fit, epochs)
    with warnings.catch_warnings(record=True):
        # check classic fit + check manual picks
        gat.picks = [0]
        gat.fit(epochs)
        # check optional y as array
        gat.picks = None
        gat.fit(epochs, y=epochs.events[:, 2])
        # check optional y as list
        gat.fit(epochs, y=epochs.events[:, 2].tolist())
    assert_equal(len(gat.picks_), len(gat.ch_names), 1)
    assert_equal(
        "<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), no "
        "prediction, no score>", '%s' % gat)
    assert_equal(gat.ch_names, epochs.ch_names)
    # test different predict function:
    gat = GeneralizationAcrossTime(predict_method='decision_function')
    gat.fit(epochs)
    # With classifier, the default cv is StratifiedKFold
    assert_true(gat.cv_.__class__ == StratifiedKFold)
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 1))
    gat.predict_method = 'predict_proba'
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 2))
    gat.predict_method = 'foo'
    assert_raises(NotImplementedError, gat.predict, epochs)
    gat.predict_method = 'predict'
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 1))
    assert_equal(
        "<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
        "predicted 14 epochs, no score>", "%s" % gat)
    gat.score(epochs)
    assert_true(gat.scorer_.__name__ == 'accuracy_score')
    # check clf / predict_method combinations for which the scoring metrics
    # cannot be inferred.
    gat.scorer = None
    gat.predict_method = 'decision_function'
    assert_raises(ValueError, gat.score, epochs)
    # Check specifying y manually
    gat.predict_method = 'predict'
    gat.score(epochs, y=epochs.events[:, 2])
    gat.score(epochs, y=epochs.events[:, 2].tolist())
    assert_equal(
        "<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
        "predicted 14 epochs,\n scored "
        "(accuracy_score)>", "%s" % gat)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs, y=epochs.events[:, 2])

    old_mode = gat.predict_mode
    gat.predict_mode = 'super-foo-mode'
    assert_raises(ValueError, gat.predict, epochs)
    gat.predict_mode = old_mode

    gat.score(epochs, y=epochs.events[:, 2])
    assert_true("accuracy_score" in '%s' % gat.scorer_)
    epochs2 = epochs.copy()

    # check _DecodingTime class
    assert_equal(
        "<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
        "0.050 (s), length: 0.050 (s), n_time_windows: 15>",
        "%s" % gat.train_times_)
    assert_equal(
        "<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
        "0.050 (s), length: 0.050 (s), n_time_windows: 15 x 15>",
        "%s" % gat.test_times_)

    # the y-check
    gat.predict_mode = 'mean-prediction'
    epochs2.events[:, 2] += 10
    gat_ = copy.deepcopy(gat)
    with use_log_level('error'):
        assert_raises(ValueError, gat_.score, epochs2)
    gat.predict_mode = 'cross-validation'

    # Test basics
    # --- number of trials
    assert_true(gat.y_train_.shape[0] == gat.y_true_.shape[0] == len(
        gat.y_pred_[0][0]) == 14)
    # ---  number of folds
    assert_true(np.shape(gat.estimators_)[1] == gat.cv)
    # ---  length training size
    assert_true(
        len(gat.train_times_['slices']) == 15 == np.shape(gat.estimators_)[0])
    # ---  length testing sizes
    assert_true(
        len(gat.test_times_['slices']) == 15 == np.shape(gat.scores_)[0])
    assert_true(
        len(gat.test_times_['slices'][0]) == 15 == np.shape(gat.scores_)[1])

    # Test score_mode
    gat.score_mode = 'foo'
    assert_raises(ValueError, gat.score, epochs)
    gat.score_mode = 'fold-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15, 5])
    gat.score_mode = 'mean-sample-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15])
    gat.score_mode = 'mean-fold-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15])
    gat.predict_mode = 'mean-prediction'
    with warnings.catch_warnings(record=True) as w:
        gat.score(epochs)
        assert_true(
            any("score_mode changed from " in str(ww.message) for ww in w))

    # Test longer time window
    gat = GeneralizationAcrossTime(train_times={'length': .100})
    with warnings.catch_warnings(record=True):
        gat2 = gat.fit(epochs)
    assert_true(gat is gat2)  # return self
    assert_true(hasattr(gat2, 'cv_'))
    assert_true(gat2.cv_ != gat.cv)
    with warnings.catch_warnings(record=True):  # not vectorizing
        scores = gat.score(epochs)
    assert_true(isinstance(scores, np.ndarray))  # type check
    assert_equal(len(scores[0]), len(scores))  # shape check
    assert_equal(len(gat.test_times_['slices'][0][0]), 2)
    # Decim training steps
    gat = GeneralizationAcrossTime(train_times={'step': .100})
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)
    gat.score(epochs)
    assert_true(len(gat.scores_) == len(gat.estimators_) == 8)  # training time
    assert_equal(len(gat.scores_[0]), 15)  # testing time

    # Test start stop training & test cv without n_fold params
    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
    train_times = dict(start=0.090, stop=0.250)
    gat = GeneralizationAcrossTime(cv=cv_lolo, train_times=train_times)
    # predict without fit
    assert_raises(RuntimeError, gat.predict, epochs)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs, y=y_4classes)
    gat.score(epochs)
    assert_equal(len(gat.scores_), 4)
    assert_equal(gat.train_times_['times'][0], epochs.times[6])
    assert_equal(gat.train_times_['times'][-1], epochs.times[9])

    # Test score without passing epochs & Test diagonal decoding
    gat = GeneralizationAcrossTime(test_times='diagonal')
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.fit(epochs)
    assert_raises(RuntimeError, gat.score)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.predict(epochs)
    scores = gat.score()
    assert_true(scores is gat.scores_)
    assert_equal(np.shape(gat.scores_), (15, 1))
    assert_array_equal(
        [tim for ttime in gat.test_times_['times'] for tim in ttime],
        gat.train_times_['times'])
    # Test generalization across conditions
    gat = GeneralizationAcrossTime(predict_mode='mean-prediction', cv=2)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs[0:6])
    with warnings.catch_warnings(record=True):
        # There are some empty test folds because of n_trials
        gat.predict(epochs[7:])
        gat.score(epochs[7:])

    # Test training time parameters
    gat_ = copy.deepcopy(gat)
    # --- start stop outside time range
    gat_.train_times = dict(start=-999.)
    with use_log_level('error'):
        assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(start=999.)
    assert_raises(ValueError, gat_.fit, epochs)
    # --- impossible slices
    gat_.train_times = dict(step=.000001)
    assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(length=.000001)
    assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(length=999.)
    assert_raises(ValueError, gat_.fit, epochs)

    # Test testing time parameters
    # --- outside time range
    gat.test_times = dict(start=-999.)
    with warnings.catch_warnings(record=True):  # no epochs in fold
        assert_raises(ValueError, gat.predict, epochs)
    gat.test_times = dict(start=999.)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    # --- impossible slices
    gat.test_times = dict(step=.000001)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    gat_ = copy.deepcopy(gat)
    gat_.train_times_['length'] = .000001
    gat_.test_times = dict(length=.000001)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat_.predict, epochs)
    # --- test time region of interest
    gat.test_times = dict(step=.150)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 5, 14, 1))
    # --- silly value
    gat.test_times = 'foo'
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    assert_raises(RuntimeError, gat.score)
    # --- unmatched length between training and testing time
    gat.test_times = dict(length=.150)
    assert_raises(ValueError, gat.predict, epochs)
    # --- irregular length training and testing times
    # 2 estimators, the first one is trained on two successive time samples
    # whereas the second one is trained on a single time sample.
    train_times = dict(slices=[[0, 1], [1]])
    # The first estimator is tested once, the second estimator is tested on
    # two successive time samples.
    test_times = dict(slices=[[[0, 1]], [[0], [1]]])
    gat = GeneralizationAcrossTime(train_times=train_times,
                                   test_times=test_times)
    gat.fit(epochs)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.score(epochs)
    assert_array_equal(np.shape(gat.y_pred_[0]), [1, len(epochs), 1])
    assert_array_equal(np.shape(gat.y_pred_[1]), [2, len(epochs), 1])
    # check cannot Automatically infer testing times for adhoc training times
    gat.test_times = None
    assert_raises(ValueError, gat.predict, epochs)

    svc = SVC(C=1, kernel='linear', probability=True)
    gat = GeneralizationAcrossTime(clf=svc, predict_mode='mean-prediction')
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)

    # sklearn needs it: c.f.
    # https://github.com/scikit-learn/scikit-learn/issues/2723
    # and http://bit.ly/1u7t8UT
    with use_log_level('error'):
        assert_raises(ValueError, gat.score, epochs2)
        gat.score(epochs)
    assert_true(0.0 <= np.min(scores) <= 1.0)
    assert_true(0.0 <= np.max(scores) <= 1.0)

    # Test that gets error if train on one dataset, test on another, and don't
    # specify appropriate cv:
    gat = GeneralizationAcrossTime(cv=cv_shuffle)
    gat.fit(epochs)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)

    gat.predict(epochs)
    assert_raises(ValueError, gat.predict, epochs[:10])

    # Make CV with some empty train and test folds:
    # --- empty test fold(s) should warn when gat.predict()
    gat._cv_splits[0] = [gat._cv_splits[0][0], np.empty(0)]
    with warnings.catch_warnings(record=True) as w:
        gat.predict(epochs)
        assert_true(len(w) > 0)
        assert_true(
            any('do not have any test epochs' in str(ww.message) for ww in w))
    # --- empty train fold(s) should raise when gat.fit()
    gat = GeneralizationAcrossTime(cv=[([0], [1]), ([], [0])])
    assert_raises(ValueError, gat.fit, epochs[:2])

    # Check that still works with classifier that output y_pred with
    # shape = (n_trials, 1) instead of (n_trials,)
    if check_version('sklearn', '0.17'):  # no is_regressor before v0.17
        gat = GeneralizationAcrossTime(clf=KernelRidge(), cv=2)
        epochs.crop(None, epochs.times[2])
        gat.fit(epochs)
        # With regression the default cv is KFold and not StratifiedKFold
        assert_true(gat.cv_.__class__ == KFold)
        gat.score(epochs)
        # with regression the default scoring metrics is mean squared error
        assert_true(gat.scorer_.__name__ == 'mean_squared_error')

    # Test combinations of complex scenarios
    # 2 or more distinct classes
    n_classes = [2, 4]  # 4 tested
    # nicely ordered labels or not
    le = LabelEncoder()
    y = le.fit_transform(epochs.events[:, 2])
    y[len(y) // 2:] += 2
    ys = (y, y + 1000)
    # Univariate and multivariate prediction
    svc = SVC(C=1, kernel='linear', probability=True)
    reg = KernelRidge()

    def scorer_proba(y_true, y_pred):
        return roc_auc_score(y_true, y_pred[:, 0])

    # We re testing 3 scenario: default, classifier + predict_proba, regressor
    scorers = [None, scorer_proba, scorer_regress]
    predict_methods = [None, 'predict_proba', None]
    clfs = [svc, svc, reg]
    # Test all combinations
    for clf, predict_method, scorer in zip(clfs, predict_methods, scorers):
        for y in ys:
            for n_class in n_classes:
                for predict_mode in ['cross-validation', 'mean-prediction']:
                    # Cannot use AUC for n_class > 2
                    if (predict_method == 'predict_proba' and n_class != 2):
                        continue

                    y_ = y % n_class

                    with warnings.catch_warnings(record=True):
                        gat = GeneralizationAcrossTime(
                            cv=2,
                            clf=clf,
                            scorer=scorer,
                            predict_mode=predict_mode)
                        gat.fit(epochs, y=y_)
                        gat.score(epochs, y=y_)

                    # Check that scorer is correctly defined manually and
                    # automatically.
                    scorer_name = gat.scorer_.__name__
                    if scorer is None:
                        if is_classifier(clf):
                            assert_equal(scorer_name, 'accuracy_score')
                        else:
                            assert_equal(scorer_name, 'mean_squared_error')
                    else:
                        assert_equal(scorer_name, scorer.__name__)
コード例 #11
0
def test_logging_options(tmpdir):
    """Test logging (to file)."""
    with use_log_level(None):  # just ensure it's set back
        with pytest.raises(ValueError, match="Invalid value for the 'verbose"):
            set_log_level('foo')
        tempdir = str(tmpdir)
        test_name = op.join(tempdir, 'test.log')
        with open(fname_log, 'r') as old_log_file:
            # [:-1] used to strip an extra "No baseline correction applied"
            old_lines = clean_lines(old_log_file.readlines())
            old_lines.pop(-1)
        with open(fname_log_2, 'r') as old_log_file_2:
            old_lines_2 = clean_lines(old_log_file_2.readlines())
            old_lines_2.pop(14)
            old_lines_2.pop(-1)

        if op.isfile(test_name):
            os.remove(test_name)
        # test it one way (printing default off)
        set_log_file(test_name)
        set_log_level('WARNING')
        # should NOT print
        evoked = read_evokeds(fname_evoked, condition=1)
        with open(test_name) as fid:
            assert (fid.readlines() == [])
        # should NOT print
        evoked = read_evokeds(fname_evoked, condition=1, verbose=False)
        with open(test_name) as fid:
            assert (fid.readlines() == [])
        # should NOT print
        evoked = read_evokeds(fname_evoked, condition=1, verbose='WARNING')
        with open(test_name) as fid:
            assert (fid.readlines() == [])
        # SHOULD print
        evoked = read_evokeds(fname_evoked, condition=1, verbose=True)
        with open(test_name, 'r') as new_log_file:
            new_lines = clean_lines(new_log_file.readlines())
        assert new_lines == old_lines
        set_log_file(None)  # Need to do this to close the old file
        os.remove(test_name)

        # now go the other way (printing default on)
        set_log_file(test_name)
        set_log_level('INFO')
        # should NOT print
        evoked = read_evokeds(fname_evoked, condition=1, verbose='WARNING')
        with open(test_name) as fid:
            assert (fid.readlines() == [])
        # should NOT print
        evoked = read_evokeds(fname_evoked, condition=1, verbose=False)
        with open(test_name) as fid:
            assert (fid.readlines() == [])
        # SHOULD print
        evoked = read_evokeds(fname_evoked, condition=1)
        with open(test_name, 'r') as new_log_file:
            new_lines = clean_lines(new_log_file.readlines())
        assert new_lines == old_lines
        # check to make sure appending works (and as default, raises a warning)
        set_log_file(test_name, overwrite=False)
        with pytest.warns(RuntimeWarning, match='appended to the file'):
            set_log_file(test_name)
        evoked = read_evokeds(fname_evoked, condition=1)
        with open(test_name, 'r') as new_log_file:
            new_lines = clean_lines(new_log_file.readlines())
        assert new_lines == old_lines_2

        # make sure overwriting works
        set_log_file(test_name, overwrite=True)
        # this line needs to be called to actually do some logging
        evoked = read_evokeds(fname_evoked, condition=1)
        del evoked
        with open(test_name, 'r') as new_log_file:
            new_lines = clean_lines(new_log_file.readlines())
        assert new_lines == old_lines
コード例 #12
0
def test_ieeg_elec_locate_gui_display(_locate_ieeg, _fake_CT_coords):
    """Test that the intracranial location GUI displays properly."""
    raw = mne.io.read_raw_fif(raw_path, preload=True)
    raw.pick_types(eeg=True)
    ch_dict = {
        'EEG 001': 'LAMY 1',
        'EEG 002': 'LAMY 2',
        'EEG 003': 'LSTN 1',
        'EEG 004': 'LSTN 2'
    }
    raw.pick_channels(list(ch_dict.keys()))
    raw.rename_channels(ch_dict)
    raw.set_eeg_reference('average')
    raw.set_channel_types({name: 'seeg' for name in raw.ch_names})
    raw.set_montage(None)
    aligned_ct, coords = _fake_CT_coords
    trans = mne.read_trans(fname_trans)

    # test no seghead, fsaverage doesn't have seghead
    with pytest.warns(RuntimeWarning, match='`seghead` not found'):
        with catch_logging() as log:
            _locate_ieeg(raw.info,
                         trans,
                         aligned_ct,
                         subject='fsaverage',
                         subjects_dir=subjects_dir,
                         verbose=True)
    log = log.getvalue()
    assert 'Using marching cubes' in log

    # test functions
    with pytest.warns(RuntimeWarning, match='`pial` surface not found'):
        gui = _locate_ieeg(raw.info,
                           trans,
                           aligned_ct,
                           subject=subject,
                           subjects_dir=subjects_dir,
                           verbose=True)

    with pytest.raises(ValueError, match='read-only'):
        gui._ras[:] = coords[0]  # start in the right position
    gui._set_ras(coords[0])
    gui._mark_ch()
    assert not gui._lines and not gui._lines_2D  # no lines for one contact
    for ci, coord in enumerate(coords[1:], 1):
        coord_vox = apply_trans(gui._ras_vox_t, coord)
        with use_log_level('debug'):
            _fake_click(gui._figs[2],
                        gui._figs[2].axes[0],
                        coord_vox[:-1],
                        xform='data',
                        kind='release')
        assert_allclose(coord[:2],
                        gui._ras[:2],
                        atol=0.1,
                        err_msg=f'coords[{ci}][:2]')
        assert_allclose(coord[2],
                        gui._ras[2],
                        atol=2,
                        err_msg=f'coords[{ci}][2]')
        gui._mark_ch()

    # ensure a 3D line was made for each group
    assert len(gui._lines) == 2

    # test snap to center
    gui._ch_index = 0
    gui._set_ras(coords[0])  # move to first position
    gui._mark_ch()
    assert_allclose(coords[0], gui._chs['LAMY 1'], atol=0.2)
    gui._snap_button.click()
    assert gui._snap_button.text() == 'Off'
    # now make sure no snap happens
    gui._ch_index = 0
    gui._set_ras(coords[1] + 1)
    gui._mark_ch()
    assert_allclose(coords[1] + 1, gui._chs['LAMY 1'], atol=0.01)
    # check that it turns back on
    gui._snap_button.click()
    assert gui._snap_button.text() == 'On'

    # test remove
    gui._ch_index = 1
    gui._update_ch_selection()
    gui._remove_ch()
    assert np.isnan(gui._chs['LAMY 2']).all()

    # check that raw object saved
    assert not np.isnan(raw.info['chs'][0]['loc'][:3]).any()  # LAMY 1
    assert np.isnan(raw.info['chs'][1]['loc'][:3]).all()  # LAMY 2 (removed)

    # move sliders
    gui._alpha_slider.setValue(75)
    assert gui._ch_alpha == 0.75
    gui._radius_slider.setValue(5)
    assert gui._radius == 5
    ct_sum_before = np.nansum(gui._images['ct'][0].get_array().data)
    gui._ct_min_slider.setValue(500)
    assert np.nansum(gui._images['ct'][0].get_array().data) < ct_sum_before

    # test buttons
    gui._toggle_show_brain()
    assert 'mri' in gui._images
    assert 'local_max' not in gui._images
    gui._toggle_show_max()
    assert 'local_max' in gui._images
    assert 'mip' not in gui._images
    gui._toggle_show_mip()
    assert 'mip' in gui._images
    assert 'mip_chs' in gui._images
    assert len(gui._lines_2D) == 1  # LAMY only has one contact
コード例 #13
0
def save_epochs(p, subjects, in_names, in_numbers, analyses, out_names,
                out_numbers, must_match, decim, run_indices):
    """Generate epochs from raw data based on events.

    Can only complete after preprocessing is complete.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    in_names : list of str
        Names of input events.
    in_numbers : list of list of int
        Event numbers (in scored event files) associated with each name.
    analyses : list of str
        Lists of analyses of interest.
    out_names : list of list of str
        Event types to make out of old ones.
    out_numbers : list of list of int
        Event numbers to convert to (e.g., [[1, 1, 2, 3, 3], ...] would create
        three event types, where the first two and last two event types from
        the original list get collapsed over).
    must_match : list of int
        Indices from the original in_names that must match in event counts
        before collapsing. Should eventually be expanded to allow for
        ratio-based collapsing.
    decim : int | list of int
        Amount to decimate.
    run_indices : array-like | None
        Run indices to include.
    """
    in_names = np.asanyarray(in_names)
    old_dict = dict()
    for n, e in zip(in_names, in_numbers):
        old_dict[n] = e

    # let's do some sanity checks
    if len(in_names) != len(in_numbers):
        raise RuntimeError('in_names (%d) must have same length as '
                           'in_numbers (%d)'
                           % (len(in_names), len(in_numbers)))
    if np.any(np.array(in_numbers) <= 0):
        raise ValueError('in_numbers must all be > 0')
    if len(out_names) != len(out_numbers):
        raise RuntimeError('out_names must have same length as out_numbers')
    for name, num in zip(out_names, out_numbers):
        num = np.array(num)
        if len(name) != len(np.unique(num[num > 0])):
            raise RuntimeError('each entry in out_names must have length '
                               'equal to the number of unique elements in the '
                               'corresponding entry in out_numbers:\n%s\n%s'
                               % (name, np.unique(num[num > 0])))
        if len(num) != len(in_names):
            raise RuntimeError('each entry in out_numbers must have the same '
                               'length as in_names')
        if (np.array(num) == 0).any():
            raise ValueError('no element of out_numbers can be zero')

    ch_namess = list()
    drop_logs = list()
    sfreqs = set()
    for si, subj in enumerate(subjects):
        if p.disp_files:
            print('  Loading raw files for subject %s.' % subj)
        epochs_dir = op.join(p.work_dir, subj, p.epochs_dir)
        if not op.isdir(epochs_dir):
            os.mkdir(epochs_dir)
        evoked_dir = op.join(p.work_dir, subj, p.inverse_dir)
        if not op.isdir(evoked_dir):
            os.mkdir(evoked_dir)
        # read in raw files
        raw_names = get_raw_fnames(p, subj, 'pca', False, False,
                                   run_indices[si])
        raw, ratios = _concat_resamp_raws(p, subj, raw_names)
        assert ratios.shape == (len(raw_names),)
        # optionally calculate autoreject thresholds
        this_decim = _handle_decim(decim[si], raw.info['sfreq'])
        new_sfreq = raw.info['sfreq'] / this_decim
        if p.disp_files:
            print('    Epoching data (decim=%s -> sfreq=%0.1f Hz).'
                  % (this_decim, new_sfreq))
        if new_sfreq not in sfreqs:
            if len(sfreqs) > 0:
                warnings.warn('resulting new sampling frequency %s not equal '
                              'to previous values %s' % (new_sfreq, sfreqs))
            sfreqs.add(new_sfreq)
        epochs_fnames, evoked_fnames = get_epochs_evokeds_fnames(p, subj,
                                                                 analyses)
        mat_file, fif_file = epochs_fnames
        # handle regex-based reject_epochs_by_annot defs
        reject_epochs_by_annot = _check_reject_annot_regex(p, raw)
        if p.autoreject_thresholds:
            assert len(p.autoreject_types) > 0
            assert all(a in ('mag', 'grad', 'eeg', 'ecg', 'eog')
                       for a in p.autoreject_types)
            from autoreject import get_rejection_threshold
            picker = p.pick_events_autoreject
            if type(picker) is str:
                assert picker == 'restrict', \
                    'Only "restrict" is valid str for p.pick_events_autoreject'
            events = _read_events(
                p, subj, run_indices[si], raw, ratios, picker=picker)
            print('    Computing autoreject thresholds', end='')
            rtmin = p.reject_tmin if p.reject_tmin is not None else p.tmin
            rtmax = p.reject_tmax if p.reject_tmax is not None else p.tmax
            temp_epochs = Epochs(
                raw, events, event_id=None, tmin=rtmin, tmax=rtmax,
                baseline=_get_baseline(p), proj=True, reject=None,
                flat=None, preload=True, decim=this_decim,
                reject_by_annotation=reject_epochs_by_annot)
            kwargs = dict()
            if 'verbose' in get_args(get_rejection_threshold):
                kwargs['verbose'] = False
            new_dict = get_rejection_threshold(temp_epochs, **kwargs)
            use_reject = dict()
            msgs = list()
            for k in p.autoreject_types:
                msgs.append('%s=%d %s'
                            % (k, DEFAULTS['scalings'][k] * new_dict[k],
                               DEFAULTS['units'][k]))
                use_reject[k] = new_dict[k]
            print(': ' + ', '.join(msgs))
            hdf5_file = fif_file.replace('-epo.fif', '-reject.h5')
            assert hdf5_file.endswith('.h5')
            write_hdf5(hdf5_file, use_reject, overwrite=True)
        else:
            use_reject = _handle_dict(p.reject, subj)

        # read in events and create epochs
        events = _read_events(p, subj, run_indices[si], raw, ratios,
                              picker='restrict')
        if len(events) == 0:
            raise ValueError('No valid events found')
        flat = _handle_dict(p.flat, subj)
        use_reject, use_flat = _restrict_reject_flat(use_reject, flat, raw)
        epochs = Epochs(raw, events, event_id=old_dict, tmin=p.tmin,
                        tmax=p.tmax, baseline=_get_baseline(p),
                        reject=use_reject, flat=use_flat, proj=p.epochs_proj,
                        preload=True, decim=this_decim,
                        on_missing=p.on_missing,
                        reject_tmin=p.reject_tmin, reject_tmax=p.reject_tmax,
                        reject_by_annotation=reject_epochs_by_annot)
        if epochs.events.shape[0] < 1:
            _raise_bad_epochs(raw, epochs, events, plot=p.plot_drop_logs)
        del raw
        drop_logs.append(epochs.drop_log)
        ch_namess.append(epochs.ch_names)
        # only kept trials that were not dropped
        sfreq = epochs.info['sfreq']
        # now deal with conditions to save evoked
        if p.disp_files:
            print('    Matching trial counts and saving data to disk.')
        for var, name in ((out_names, 'out_names'),
                          (out_numbers, 'out_numbers'),
                          (must_match, 'must_match'),
                          (evoked_fnames, 'evoked_fnames')):
            if len(var) != len(analyses):
                raise ValueError('len(%s) (%s) != len(analyses) (%s)'
                                 % (name, len(var), len(analyses)))
        for analysis, names, numbers, match, fn in zip(analyses, out_names,
                                                       out_numbers, must_match,
                                                       evoked_fnames):
            # do matching
            numbers = np.asanyarray(numbers)
            nn = numbers[numbers >= 0]
            new_numbers = []
            for num in numbers:
                if num > 0 and num not in new_numbers:
                    # Eventually we could relax this requirement, but not
                    # having it in place is likely to cause people pain...
                    if any(num < n for n in new_numbers):
                        raise RuntimeError('each list of new_numbers must be '
                                           ' monotonically increasing')
                    new_numbers.append(num)
            new_numbers = np.array(new_numbers)
            in_names_match = list(in_names[match])
            # use some variables to allow safe name re-use
            offset = max(epochs.events[:, 2].max(), new_numbers.max()) + 1
            safety_str = '__mnefun_copy__'
            assert len(new_numbers) == len(names)  # checked above
            if p.match_fun is None:
                e = None
            else:  # use custom matching
                args = [epochs.copy(), analysis, nn, in_names_match, names]
                if len(get_args(p.match_fun)) > 5:
                    args = args + [numbers]
                e = p.match_fun(*args)
            if e is None:
                # first, equalize trial counts (this will make a copy)
                e = epochs[list(in_names[numbers > 0])]
                # some could be missing
                in_names_match = [
                    name for name in in_names_match if name in e.event_id]
                if len(in_names_match) > 1:
                    print(f'      Equalizing: {in_names_match}')
                    e.equalize_event_counts(in_names_match)

                # second, collapse relevant types
                for num, name in zip(new_numbers, names):
                    collapse = [x for x in in_names[num == numbers]
                                if x in e.event_id]
                    combine_event_ids(e, collapse,
                                      {name + safety_str: num + offset},
                                      copy=False)
                for num, name in zip(new_numbers, names):
                    e.events[e.events[:, 2] == num + offset, 2] -= offset
                    e.event_id[name] = num
                    del e.event_id[name + safety_str]

            # now make evoked for each out type
            evokeds = list()
            n_standard = 0
            kinds = ['standard']
            if p.every_other:
                kinds += ['even', 'odd']
            for kind in kinds:
                for name in names:
                    this_e = e[name]
                    if kind == 'even':
                        this_e = this_e[::2]
                    elif kind == 'odd':
                        this_e = this_e[1::2]
                    else:
                        assert kind == 'standard'
                    with use_log_level('error'):
                        with warnings.catch_warnings(record=True):
                            warnings.simplefilter('ignore')
                            ave = this_e.average(picks='all')
                            ave.comment = name
                            stde = this_e.standard_error(picks='all')
                            stde.comment = name
                    if kind != 'standard':
                        ave.comment += ' %s' % (kind,)
                        stde.comment += ' %s' % (kind,)
                    evokeds.append(ave)
                    evokeds.append(stde)
                    if kind == 'standard':
                        n_standard += 2
            write_evokeds(fn, evokeds)
            naves = [str(n) for n in sorted(set([
                evoked.nave for evoked in evokeds[:n_standard]]))]
            bad = [evoked.comment for evoked in evokeds[:n_standard:2]
                   if evoked.nave == 0]
            if bad:
                print(f'      Got 0 epochs for: {bad}')
            naves = ', '.join(naves)
            if p.disp_files:
                print('      Analysis "%s": %s epochs / condition'
                      % (analysis, naves))

        if p.disp_files:
            print('    Saving epochs to disk.')
        if 'mat' in p.epochs_type:
            spio.savemat(mat_file, dict(epochs=epochs.get_data(),
                                        events=epochs.events, sfreq=sfreq,
                                        drop_log=epochs.drop_log),
                         do_compression=True, oned_as='column')
        if 'fif' in p.epochs_type:
            epochs.save(fif_file, **_get_epo_kwargs())

    if p.plot_drop_logs:
        for subj, drop_log in zip(subjects, drop_logs):
            plot_drop_log(drop_log, threshold=p.drop_thresh, subject=subj)