def __init__(self, subject=None, date=None, mode='train', **kwargs): if subject is None: subject = cfg.subj_info.subjname self._subj_path = os.path.dirname(__file__) + '/../data/' + subject if date is None: self._date = utils.find_nearest_time(self._subj_path) else: if isinstance(date, datetime): # convert datetime to str self._date = date.strftime("%Y-%m-%d-%H-%M-%S") else: self._date = date self.mode = mode.lower() assert self.mode in ['train', 'test'] if self.mode == 'test': # loading trained coefficient self.data_dict = np.load(os.path.join(self._subj_path, self._date, 'coef.npz')) # loading trained model self.__cls = joblib.load(os.path.join(self._subj_path, self._date, 'model.pkl')) self._ch_ind = self.data_dict['ind_ch_scores'] else: self.data_dict = {} C = kwargs.pop('C', 1) n_components = kwargs.pop('n_components', 3) self.__cls = make_pipeline( _XdawnTransformer(n_components=n_components), ChannelScaler(), Vectorizer(), LogisticRegression(C=C, class_weight='balanced', solver='liblinear', multi_class='ovr') )
def test_XdawnTransformer(): """Test _XdawnTransformer.""" # Get data raw, events, picks = _get_data() raw.del_proj() epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, baseline=None, verbose=False) X = epochs._data y = epochs.events[:, -1] # Fit xdt = _XdawnTransformer() xdt.fit(X, y) pytest.raises(ValueError, xdt.fit, X, y[1:]) pytest.raises(ValueError, xdt.fit, 'foo') # Provide covariance object signal_cov = compute_raw_covariance(raw, picks=picks) xdt = _XdawnTransformer(signal_cov=signal_cov) xdt.fit(X, y) # Provide ndarray signal_cov = np.eye(len(picks)) xdt = _XdawnTransformer(signal_cov=signal_cov) xdt.fit(X, y) # Provide ndarray of bad shape signal_cov = np.eye(len(picks) - 1) xdt = _XdawnTransformer(signal_cov=signal_cov) pytest.raises(ValueError, xdt.fit, X, y) # Provide another type signal_cov = 42 xdt = _XdawnTransformer(signal_cov=signal_cov) pytest.raises(ValueError, xdt.fit, X, y) # Fit with y as None xdt = _XdawnTransformer() xdt.fit(X) # Compare xdawn and _XdawnTransformer xd = Xdawn(correct_overlap=False) xd.fit(epochs) xdt = _XdawnTransformer() xdt.fit(X, y) assert_array_almost_equal(xd.filters_['cond2'][:2, :], xdt.filters_.reshape(2, 2, 8)[0]) # Transform testing xdt.transform(X[1:, ...]) # different number of epochs xdt.transform(X[:, :, 1:]) # different number of time pytest.raises(ValueError, xdt.transform, X[:, 1:, :]) Xt = xdt.transform(X) pytest.raises(ValueError, xdt.transform, 42) # Inverse transform testing Xinv = xdt.inverse_transform(Xt) assert Xinv.shape == X.shape xdt.inverse_transform(Xt[1:, ...]) xdt.inverse_transform(Xt[:, :, 1:]) # should raise an error if not correct number of components pytest.raises(ValueError, xdt.inverse_transform, Xt[:, 1:, :]) pytest.raises(ValueError, xdt.inverse_transform, 42)
def test_XdawnTransformer(): """Test _XdawnTransformer.""" # Get data raw, events, picks = _get_data() epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, preload=True, baseline=None, verbose=False, add_eeg_ref=False) X = epochs._data y = epochs.events[:, -1] # Fit xdt = _XdawnTransformer() xdt.fit(X, y) assert_raises(ValueError, xdt.fit, X, y[1:]) assert_raises(ValueError, xdt.fit, 'foo') # Provide covariance object signal_cov = compute_raw_covariance(raw, picks=picks) xdt = _XdawnTransformer(signal_cov=signal_cov) xdt.fit(X, y) # Provide ndarray signal_cov = np.eye(len(picks)) xdt = _XdawnTransformer(signal_cov=signal_cov) xdt.fit(X, y) # Provide ndarray of bad shape signal_cov = np.eye(len(picks) - 1) xdt = _XdawnTransformer(signal_cov=signal_cov) assert_raises(ValueError, xdt.fit, X, y) # Provide another type signal_cov = 42 xdt = _XdawnTransformer(signal_cov=signal_cov) assert_raises(ValueError, xdt.fit, X, y) # Fit with y as None xdt = _XdawnTransformer() xdt.fit(X) # Compare xdawn and _XdawnTransformer xd = Xdawn(correct_overlap=False) xd.fit(epochs) xdt = _XdawnTransformer() xdt.fit(X, y) assert_array_almost_equal(xd.filters_['cond2'][:, :2], xdt.filters_.reshape(2, 2, 8)[0].T) # Transform testing xdt.transform(X[1:, ...]) # different number of epochs xdt.transform(X[:, :, 1:]) # different number of time assert_raises(ValueError, xdt.transform, X[:, 1:, :]) Xt = xdt.transform(X) assert_raises(ValueError, xdt.transform, 42) # Inverse transform testing Xinv = xdt.inverse_transform(Xt) assert_equal(Xinv.shape, X.shape) xdt.inverse_transform(Xt[1:, ...]) xdt.inverse_transform(Xt[:, :, 1:]) # should raise an error if not correct number of components assert_raises(ValueError, xdt.inverse_transform, Xt[:, 1:, :]) assert_raises(ValueError, xdt.inverse_transform, 42)
def test_xdawn_decoding_performance(): """Test decoding performance and extracted pattern on synthetic data.""" from sklearn.model_selection import KFold from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score n_xdawn_comps = 3 expected_accuracy = 0.98 epochs, mixing_mat = _simulate_erplike_mixed_data(n_epochs=100) y = epochs.events[:, 2] # results of Xdawn and _XdawnTransformer should match xdawn_pipe = make_pipeline( Xdawn(n_components=n_xdawn_comps), Vectorizer(), MinMaxScaler(), LogisticRegression(solver='liblinear')) xdawn_trans_pipe = make_pipeline( _XdawnTransformer(n_components=n_xdawn_comps), Vectorizer(), MinMaxScaler(), LogisticRegression(solver='liblinear')) cv = KFold(n_splits=3, shuffle=False) for pipe, X in ( (xdawn_pipe, epochs), (xdawn_trans_pipe, epochs.get_data())): predictions = np.empty_like(y, dtype=float) for train, test in cv.split(X, y): pipe.fit(X[train], y[train]) predictions[test] = pipe.predict(X[test]) cv_accuracy_xdawn = accuracy_score(y, predictions) assert_allclose(cv_accuracy_xdawn, expected_accuracy, atol=0.01) # for both event types, the first component should "match" the mixing fitted_xdawn = pipe.steps[0][1] if isinstance(fitted_xdawn, Xdawn): relev_patterns = np.concatenate( [comps[[0]] for comps in fitted_xdawn.patterns_.values()]) else: relev_patterns = fitted_xdawn.patterns_[::n_xdawn_comps] for i in range(len(relev_patterns)): r, _ = stats.pearsonr(relev_patterns[i, :], mixing_mat[0, :]) assert np.abs(r) > 0.99