예제 #1
0
def test_TangentSpace_inversetransform():
    """Test inverse transform of Tangent Space"""
    covset = generate_cov(10,3)
    ts = TangentSpace(metric='riemann')
    ts.fit(covset)
    t = ts.transform(covset)
    cov = ts.inverse_transform(t)
    assert_array_almost_equal(covset,cov)
def test_TangentSpace_inversetransform():
    """Test inverse transform of Tangent Space."""
    covset = generate_cov(10, 3)
    ts = TangentSpace(metric='riemann')
    ts.fit(covset)
    t = ts.transform(covset)
    cov = ts.inverse_transform(t)
    assert_array_almost_equal(covset, cov)
예제 #3
0
def test_TangentSpace_init(fit, tsupdate, metric, get_covmats):
    n_trials, n_channels = 4, 3
    n_ts = (n_channels * (n_channels + 1)) // 2
    covmats = get_covmats(n_trials, n_channels)
    ts = TangentSpace(metric=metric, tsupdate=tsupdate)
    if fit:
        ts.fit(covmats)
    Xtr = ts.transform(covmats)
    assert Xtr.shape == (n_trials, n_ts)
예제 #4
0
    def fit_representation(self):
        print(np.array(self.data).shape)
        for k in range(len(self.data)):
            subject_data = np.array(self.data[k])
            print(subject_data.shape)
            subject_labels = self.labels[k]
            model_xDawn_enCours = pyriemann.estimation.XdawnCovariances(
                4, xdawn_estimator='lwf')

            subject_data = model_xDawn_enCours.fit_transform(
                subject_data, subject_labels)
            self.model_xDawn.append(model_xDawn_enCours)
            model_tangentSpace_enCours = TangentSpace(metric='riemann')
            model_tangentSpace_enCours.fit(subject_data, subject_labels)
            self.model_tangentSpace.append(model_tangentSpace_enCours)
예제 #5
0
def test_TangentSpace_transform():
    """Test transform of Tangent Space."""
    covset = generate_cov(10, 3)
    ts = TangentSpace(metric='riemann')
    ts.fit(covset)
    ts.transform(covset)

    X = np.zeros(shape=(10, 9))
    assert_raises(ValueError, ts.transform, X)

    X = np.zeros(shape=(10, 9, 8))
    assert_raises(ValueError, ts.transform, X)

    X = np.zeros(shape=(10))
    assert_raises(ValueError, ts.transform, X)

    X = np.zeros(shape=(12, 8, 8))
    assert_raises(ValueError, ts.transform, X)
예제 #6
0
def test_TangentSpace_transform_with_ts_update():
    """Test transform of Tangent Space with TSupdate"""
    covset = generate_cov(10,3)
    ts = TangentSpace(metric='riemann',tsupdate=True)
    ts.fit(covset)
    ts.transform(covset)
예제 #7
0
def test_TangentSpace_transform():
    """Test transform of Tangent Space"""
    covset = generate_cov(10,3)
    ts = TangentSpace(metric='riemann')
    ts.fit(covset)
    ts.transform(covset)
예제 #8
0
def test_TangentSpace_fit():
    """Test Fit of Tangent Space"""
    covset = generate_cov(10,3)
    ts = TangentSpace(metric='riemann')
    ts.fit(covset)
def test_TangentSpace_transform_with_ts_update():
    """Test transform of Tangent Space with TSupdate."""
    covset = generate_cov(10, 3)
    ts = TangentSpace(metric='riemann', tsupdate=True)
    ts.fit(covset)
    ts.transform(covset)
def test_TangentSpace_transform():
    """Test transform of Tangent Space."""
    covset = generate_cov(10, 3)
    ts = TangentSpace(metric='riemann')
    ts.fit(covset)
    ts.transform(covset)
def test_TangentSpace_fit():
    """Test Fit of Tangent Space."""
    covset = generate_cov(10, 3)
    ts = TangentSpace(metric='riemann')
    ts.fit(covset)
예제 #12
0
 data = loadmat(f_name)
 data_IS = data[list(data.keys())[-1]]
 data_tensor = [data_IS[0][0]]
 for j in range(len(data_IS)):
     if j == 0:
         k = 1
     else:
         k = 0
     for i in range(k, len(data_IS[j])):
         temp = [data_IS[j][i]]
         data_tensor = np.concatenate((data_tensor, temp), axis=0)
 cov = Covariances(estimator='lwf')
 ts = TangentSpace()
 cov.fit(data_tensor, label)
 cov_train = cov.transform(data_tensor)
 ts.fit(cov_train, label)
 ts_train = ts.transform(cov_train)
 ts_shape = (np.shape(ts_train))
 pca = PCA()
 ann = MLPClassifier(max_iter=5000)
 clf = BaggingClassifier(base_estimator=ann, bootstrap=True)
 pipe = Pipeline(steps=[('pca', pca), ('clf', clf)])
 param_grid = {
     'pca__n_components': [20, 30, 40, 50, 60, 70, 80, 90, 100],
     'clf__base_estimator__hidden_layer_sizes':
     [(10), (20), (30), (40), (50), (60), (70), (80), (90), (100), (110),
      (120), (130), (140), (150), (160), (170), (180)],
     'clf__n_estimators': [
         10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150,
         160, 170, 180
     ]