Exemplo n.º 1
0
    def _Q_S_estim_riemann(self, data):
        # Check if X is a single trial (test data) or not
        if data.ndim == 2:
            data = data[np.newaxis, ...]

        # Get data shape
        n_trials, n_channels, n_samples = data.shape

        X = np.concatenate((data, data), axis=1)

        # Concatenate all the trials
        UX = np.empty((n_channels, n_samples * n_trials))

        for trial_n in range(n_trials):
            UX[:, trial_n * n_samples:(trial_n + 1) *
               n_samples] = data[trial_n, :, :]

        # Mean centering
        UX -= np.mean(UX, 1)[:, None]

        # Compute empirical variance of all data (to be bounded)
        cov = Covariances(estimator=self.estimator).fit_transform(
            UX[np.newaxis, ...])
        Q = np.squeeze(cov)

        cov = Covariances(estimator=self.estimator).fit_transform(X)
        S = cov[:, :n_channels, n_channels:] + cov[:, n_channels:, :n_channels]

        S = mean_covariance(S, metric=self.method)

        return S, Q
Exemplo n.º 2
0
    def _Q_S_estim(self, data):
        # Check if X is a single trial (test data) or not
        if data.ndim == 2:
            data = data[np.newaxis, ...]

        # Get data shape
        n_trials, n_channels, n_samples = data.shape

        X = np.concatenate((data, data), axis=1)

        # Initialize S matrix
        S = np.zeros((n_channels, n_channels))

        # Estimate covariance between every trial and the rest of the trials (excluding itself)
        for trial_i in range(n_trials - 1):
            x1 = np.squeeze(data[trial_i, :, :])

            # Mean centering for the selected trial
            x1 -= np.mean(x1, 0)

            # Select a second trial that is different
            for trial_j in range(trial_i + 1, n_trials):
                x2 = np.squeeze(data[trial_j, :, :])

                # Mean centering for the selected trial
                x2 -= np.mean(x2, 0)

                # Put the two trials together
                X = np.concatenate((x1, x2))

                if n_channels == 1:
                    X = X.reshape((n_channels, len(X)))

                # Regularized covariance estimate
                cov = Covariances(estimator=self.estimator).fit_transform(
                    X[np.newaxis, ...])
                cov = np.squeeze(cov)

                # Compute empirical covariance betwwen the two selected trials and sum it
                if n_channels > 1:
                    S = S + cov[:n_channels,
                                n_channels:] + cov[n_channels:, :n_channels]

                else:
                    S = S + cov + cov

        # Concatenate all the trials
        UX = np.empty((n_channels, n_samples * n_trials))

        for trial_n in range(n_trials):
            UX[:, trial_n * n_samples:(trial_n + 1) *
               n_samples] = data[trial_n, :, :]

        # Mean centering
        UX -= np.mean(UX, 1)[:, None]
        cov = Covariances(estimator=self.estimator).fit_transform(
            UX[np.newaxis, ...])
        Q = np.squeeze(cov)

        return S, Q
Exemplo n.º 3
0
def _train_raw(df):
    """Train a classifier on raw EEG data"""
    X, y = transform.signal_ndarray(df)
    # print(X, y)

    # Fixes non-convergence for binary classification
    dual = set(y) == 2

    clfs: Dict[str, Pipeline] = {
        # These four are from https://neurotechx.github.io/eeg-notebooks/auto_examples/visual_ssvep/02r__ssvep_decoding.html
        "CSP + Cov + TS":
        make_pipeline(
            Covariances(),
            CSP(4, log=False),
            TangentSpace(),
            LogisticRegression(dual=dual),
        ),
        "Cov + TS":
        make_pipeline(Covariances(), TangentSpace(),
                      LogisticRegression(dual=dual)),
        # Performs meh
        # "CSP + RegLDA": make_pipeline(
        #     Covariances(), CSP(4), LDA(shrinkage="auto", solver="eigen")
        # ),
        # Performs badly
        # "Cov + MDM": make_pipeline(Covariances(), MDM()),
    }

    for name, clf in clfs.items():
        logger.info(f"===== Training with {name} =====")
        _train(X, y, clf)
Exemplo n.º 4
0
def check_other_classifiers(train_X, train_y, test_X, test_y):
    from pyriemann.classification import MDM, TSclassifier
    from sklearn.linear_model import LogisticRegression
    from pyriemann.estimation import Covariances
    from sklearn.pipeline import Pipeline
    from mne.decoding import CSP
    import seaborn as sns
    import pandas as pd

    train_y = [np.where(i == 1)[0][0] for i in train_y]
    test_y = [np.where(i == 1)[0][0] for i in test_y]

    cov_data_train = Covariances().transform(train_X)
    cov_data_test = Covariances().transform(test_X)
    cv = KFold(n_splits=10, random_state=42)
    clf = TSclassifier()
    scores = cross_val_score(clf, cov_data_train, train_y, cv=cv, n_jobs=1)
    print("Tangent space Classification accuracy: ", np.mean(scores))

    clf = TSclassifier()
    clf.fit(cov_data_train, train_y)
    print(clf.score(cov_data_test, test_y))

    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    scores = cross_val_score(mdm, cov_data_train, train_y, cv=cv, n_jobs=1)
    print("MDM Classification accuracy: ", np.mean(scores))
    mdm = MDM()
    mdm.fit(cov_data_train, train_y)

    fig, axes = plt.subplots(1, 2)
    ch_names = [ch for ch in range(8)]

    df = pd.DataFrame(data=mdm.covmeans_[0], index=ch_names, columns=ch_names)
    g = sns.heatmap(df,
                    ax=axes[0],
                    square=True,
                    cbar=False,
                    xticklabels=2,
                    yticklabels=2)
    g.set_title('Mean covariance - feet')

    df = pd.DataFrame(data=mdm.covmeans_[1], index=ch_names, columns=ch_names)
    g = sns.heatmap(df,
                    ax=axes[1],
                    square=True,
                    cbar=False,
                    xticklabels=2,
                    yticklabels=2)
    plt.xticks(rotation='vertical')
    plt.yticks(rotation='horizontal')
    g.set_title('Mean covariance - hands')

    # dirty fix
    plt.sca(axes[0])
    plt.xticks(rotation='vertical')
    plt.yticks(rotation='horizontal')
    plt.savefig("meancovmat.png")
    plt.show()
def test_shrinkage():
    """Test Shrinkage"""
    x = np.random.randn(2, 3, 100)
    cov = Covariances()
    covs = cov.fit_transform(x)
    sh = Shrinkage()
    sh.fit(covs)
    sh.transform(covs)
    assert_equal(sh.get_params(), dict(shrinkage=0.1))
Exemplo n.º 6
0
def test_shrinkage():
    """Test Shrinkage"""
    x = np.random.randn(2, 3, 100)
    cov = Covariances()
    covs = cov.fit_transform(x)
    sh = Shrinkage()
    sh.fit(covs)
    sh.transform(covs)
    assert sh.get_params() == dict(shrinkage=0.1)
Exemplo n.º 7
0
def make_fig2(save=False):

    data_params = {}
    data_params[
        'path'] = '/research/vibs/Pedro/datasets/motorimagery/BCI-competitions/BCI-IV/2a/'
    data_params['session'] = 1
    data_params['task'] = 1
    data_params['tparams'] = [1.25, 3.75]
    data_params['fparams'] = [8.0, 35.0]

    data_params['subject'] = 5
    X, yworst = get_data(data_params)
    X = X[(yworst == 1) | (yworst == 2)]
    yworst = yworst[(yworst == 1) | (yworst == 2)]
    covs = Covariances().fit_transform(X)
    uworst, lworst = get_diffusionEmbedding(points=covs,
                                            distance=distance_riemann)

    data_params['subject'] = 1
    X, ybest = get_data(data_params)
    X = X[(ybest == 1) | (ybest == 2)]
    ybest = ybest[(ybest == 1) | (ybest == 2)]
    covs = Covariances().fit_transform(X)
    ubest, lbest = get_diffusionEmbedding(points=covs,
                                          distance=distance_riemann)

    fig = plt.figure(figsize=(10.5, 5))
    plt.subplots_adjust(wspace=0.020, hspace=0.025)

    plt.subplot(1, 2, 1)
    colorst = [['b', 'r'][int(t)] for t in (yworst - 2)]
    plt.scatter(uworst[:, 1], uworst[:, 2], color=colorst, s=44)
    plt.xlabel(r'$\phi_1$', fontsize=26)
    plt.ylabel(r'$\phi_2$', fontsize=26)
    plt.xticks([])
    plt.yticks([])
    ttl = plt.title('Worst subject', fontsize=20)
    ttl.set_position([.5, 1.025])

    ax = plt.subplot(1, 2, 2)
    colorst = [['b', 'r'][int(t)] for t in (ybest - 2)]
    plt.scatter(ubest[:, 1], ubest[:, 2], color=colorst, s=44)
    plt.xlabel(r'$\phi_1$', fontsize=26)
    plt.ylabel(r'$\phi_2$', fontsize=26)
    ax.yaxis.set_label_position("right")
    plt.xticks([])
    plt.yticks([])
    ttl = plt.title('Best subject', fontsize=20)
    ttl.set_position([.5, 1.025])

    if save:
        name = 'figure2'
        savefigure(name)

    return [uworst, lworst], [ubest, lbest]
Exemplo n.º 8
0
def compute_cov(state):
    """Computes the crosspectrum matrices per subjects."""
    for sub in SUBJECT_LIST:
        pattern = prefix + "_s{}_{}.mat"
        file_path = path(SAVE_PATH / pattern.format(sub, state))

        if not file_path.isfile():
            # data must be of shape n_trials x n_elec x n_samples
            data = load_samples(DATA_PATH, sub, state)
            if FULL_TRIAL:
                data = np.concatenate(data, axis=1)
                data = data.reshape(1, data.shape[0], data.shape[1])
            cov = Covariances()
            mat = cov.fit_transform(data)
            savemat(file_path, {"data": mat})
def xdawn_embedding(data, use_xdawn):
    """Perform embedding of EEG data in 2D Euclidean space
    with Laplacian Eigenmaps.

    Parameters
    ----------
    data : dict
        A dictionary containing training and testing data

    Returns
    -------
    array
        Embedded

    """

    if use_xdawn:
        nfilter = 3
        xdwn = XdawnCovariances(estimator='scm', nfilter=nfilter)
        covs = xdwn.fit(data['train_x'],
                        data['train_y']).transform(data['test_x'])

        lapl = Embedding(metric='riemann', n_components=3)
        embd = lapl.fit_transform(covs)
    else:
        tangent_space = Pipeline([
            ('cov_transform', Covariances(estimator='lwf')),
            ('tangent_space', TangentSpace(metric='riemann'))
        ])
        t_space = tangent_space.fit(data['train_x'],
                                    data['train_y']).transform(data['test_x'])
        reducer = umap.UMAP(n_neighbors=30, min_dist=1, spread=2)
        embd = reducer.fit_transform(t_space)

    return embd
Exemplo n.º 10
0
def svm_tangent_space_cross_validate(data):
    """A cross validated tangent space classifier with svm.

    Parameters
    ----------
    data : dict
        A dictionary containing training and testing data

    Returns
    -------
    cross validated scores
        A list of cross validated scores.

    """

    # Combine the dataset
    x = np.concatenate((data['train_x'], data['test_x']), axis=0)
    y = np.concatenate((data['train_y'], data['test_y']), axis=0)

    # Construct sklearn pipeline
    clf = Pipeline([('cov_transform', Covariances(estimator='lwf')),
                    ('tangent_space', TangentSpace(metric='riemann')),
                    ('svm_classify', SVC(kernel='rbf', gamma='auto'))])
    # cross validation
    scores = cross_val_score(clf, x, y, cv=KFold(5, shuffle=True))
    print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
    print('\n')

    return scores
Exemplo n.º 11
0
def get_trajectory(subject):

    print 'subject ' + str(subject)
    print ''

    data_params = {}
    data_params[
        'path'] = '/research/vibs/Pedro/datasets/motorimagery/Physionet/eegmmidb/'
    data_params['session'] = 1
    data_params['task'] = 4
    data_params['fparams'] = [8.0, 35.0]
    data_params['subject'] = subject
    data_params['tparams'] = [-13.8, +13.]

    X, y = get_data(data_params)

    L = 160
    nt, nc, ns = X.shape
    covm = []
    for w in tqdm(gen_windows(L, ns, step=20)):
        xw = X[:, :, w]
        covs = Covariances().fit_transform(xw)
        covm.append(mean_riemann(covs))

    print 'getting the diffusion embedding'
    covm = np.stack(covm)
    u, l = get_diffusionEmbedding(covm, distance_riemann, alpha=1.0, tdiff=0)

    filepath = './results/Physionet/'
    filepath = filepath + 'trajectory_subject' + str(subject) + '.pkl'
    embedding = [u, l]
    joblib.dump(embedding, filepath)

    print ''
Exemplo n.º 12
0
def test_tsclassifier_clf_error(get_covmats, get_labels):
    """Test TS if not Classifier"""
    n_matrices, n_channels, n_classes = 6, 3, 2
    covmats = get_covmats(n_matrices, n_channels)
    labels = get_labels(n_matrices, n_classes)
    with pytest.raises(TypeError):
        TSclassifier(clf=Covariances()).fit(covmats, labels)
Exemplo n.º 13
0
def estimate_covariance_matrices(raw_dataset):
    """
    Creates the simple covariance matrices for the raw data
    """
    covariance_matrices = []
    for subject in raw_dataset:
        covariance_matrices.append(Covariances("oas").transform(subject))
    return np.asarray(covariance_matrices)
Exemplo n.º 14
0
def plot_covs(XX, category, normalize=False, train_size=0.7, estimator='scm'):
    if normalize:
        XX = (XX - XX.mean(2)[:, :, None]) / XX.std(2)[:, :, None]

    n_trials = XX.shape[0]
    # covs = np.array([np.cov(XX[i]) for i in range(n_trials)])
    covs = Covariances(estimator=estimator).fit_transform(XX)
    vmin = None
    vmax = None
    if normalize:
        vmin = 0.0
        vmax = 1.0

    title = "Mean Covariance - Category %s" % category
    covs_mean = covs.mean(0)
    plot_cov(covs_mean, title, vmin, vmax)
    return covs
Exemplo n.º 15
0
def test_covariances():
    """Test fit Covariances"""
    x = np.random.randn(2, 3, 100)
    cov = Covariances()
    cov.fit(x)
    cov.fit_transform(x)
    assert_equal(cov.get_params(), dict(estimator='scm'))
def RHvsLH_cross(out_dir, pipelines):
    name = 'RHvsLH_cross'
    datasets = utils.dataset_search('imagery',
                                    events=['right_hand', 'left_hand'],
                                    has_all_events=True,
                                    min_subjects=2,
                                    multi_session=False)

    print(datasets)
    pipelines = OrderedDict()
    pipelines['TS'] = make_pipeline(Covariances('oas'), TSclassifier())
    pipelines['CSP+LDA'] = make_pipeline(Covariances('oas'), CSP(6), LDA())
    pipelines['CSP+SVM'] = make_pipeline(Covariances('oas'), CSP(6), SVC())  #

    context = LeftRightImagery(pipelines, CrossSubjectEvaluation(n_jobs=10),
                               datasets)

    results = context.process()
Exemplo n.º 17
0
def test_shrinkage(shrinkage, rndstate):
    """Test Shrinkage"""
    n_matrices, n_channels, n_times = 2, 3, 100
    x = rndstate.randn(n_matrices, n_channels, n_times)
    covmats = Covariances().fit_transform(x)
    sh = Shrinkage(shrinkage=shrinkage)
    covmats = sh.fit(covmats).transform(covmats)
    assert sh.get_params() == dict(shrinkage=shrinkage)
    assert covmats.shape == (n_matrices, n_channels, n_channels)
    assert is_spd(covmats)
Exemplo n.º 18
0
def create_mdm(raw, event_id):
    tmin, tmax = -1., 4.
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    baseline=None, preload=True, verbose=False)
    labels = epochs.events[:, -1]
    epochs_data_train = epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)
    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    mdm.fit(cov_data_train, labels)
    return mdm
Exemplo n.º 19
0
def find_reference(raw,
                   n_cluster,
                   pick_types=None,
                   copy=True,
                   flat_threshold=1e-15,
                   n_split=100,
                   plot=True):
    """ Computes covariance on splits of the raw data, and apply KMeans
    clustering to find the number of disjoint references.
    n_cluster is found with PCA if float
    """
    import matplotlib.pyplot as plt
    from pyriemann.estimation import Covariances
    from sklearn.cluster import KMeans
    from sklearn.metrics.pairwise import pairwise_distances

    if copy:
        raw = raw.copy()
    # Remove flat lines
    flat = np.where(np.std(raw._data, axis=1) < flat_threshold)[0]
    for ch in flat:
        raw.info['bads'] += [raw.ch_names[ch]]

    # Pick data channels only
    if pick_types is None:
        pick_types = dict(seeg=True, exclude='bads')
    raw.pick_types(**pick_types)

    # Compute covariance on data splits
    n_time = len(raw.times)
    t_max = raw.times[n_time - n_time % n_split - 1]
    raw.crop(0, t_max, copy=False)  # ensure regularly sized splits
    X = np.array(np.array_split(raw._data, n_split, axis=1))
    covs = Covariances().fit_transform(X)

    # Compute cluster for each data split
    cluster = KMeans(n_cluster)
    all_kmeans = list()
    for cov in covs:
        dist = pairwise_distances(cov)
        all_kmeans.append(cluster.fit_predict(dist))

    # Combine clusters
    dist = pairwise_distances(np.array(all_kmeans).T)
    idx = cluster.fit_predict(dist)

    if plot:
        idx_ = np.argsort(idx)
        cov = np.median(covs, axis=0)
        plt.matshow(np.log10(cov)[idx_, :][:, idx_])

    clusters = [np.array(raw.ch_names)[idx == ii] for ii in np.unique(idx)]
    return clusters
Exemplo n.º 20
0
def tangent_space_classifier(features, labels, classifier):
    """A tangent space classifier with svm for 3 classes.

    Parameters
    ----------
    features : array
        A array of features
    labels : array
        True labels
    classifier : string
        option : Support Vector Machines (svc) or Random Forest (rf)
    Returns
    -------
    sklearn classifier
        Learnt classifier.

    """
    # Construct sklearn pipeline

    if classifier == 'svc':
        clf = Pipeline([('covariance_transform', Covariances(estimator='scm')),
                        ('tangent_space', TangentSpace(metric='riemann')),
                        ('classifier',
                         SVC(kernel='rbf',
                             gamma='auto',
                             decision_function_shape='ovr'))])
    elif classifier == 'rf':
        clf = Pipeline([('covariance_transform', Covariances(estimator='scm')),
                        ('tangent_space', TangentSpace(metric='riemann')),
                        ('classifier',
                         RandomForestClassifier(n_estimators=100,
                                                oob_score=True))])
    else:
        print("Please select the appropriate classifier ")
        return

    # cross validation
    clf.fit(features, labels)

    return clf
Exemplo n.º 21
0
def test_covariances():
    """Test fit Covariances"""
    x = np.random.randn(2, 3, 100)
    cov = Covariances()
    cov.fit(x)
    cov.fit_transform(x)
    assert_equal(cov.get_params(), dict(estimator='scm'))
Exemplo n.º 22
0
def subject_independent_cov_data(config):
    """Get subject independent covariance data (pooled data).

    Parameters
    ----------
    config : yaml
        The configuration file

    Returns
    -------
    features, labels, leave_leave_tags
        2 arrays features and labels.
        A tag determines whether the data point is used in training.

    """

    path = str(Path(__file__).parents[2] / config['clean_emg_data'])
    data = dd.io.load(path)

    # Parameters
    subjects = config['subjects']

    # Empty array (list)
    x = []
    y = []
    leave_tags = np.empty((0, 1))

    for subject in subjects:
        cov_temp = Covariances().transform(data['subject_' +
                                                subject]['features'])
        x_temp = TangentSpace(metric='riemann').transform(cov_temp)
        y_temp = data['subject_' + subject]['labels']
        x.append(x_temp)
        y.append(y_temp)
        leave_tags = np.concatenate((leave_tags, y_temp[:, 0:1] * 0 + 1),
                                    axis=0)

    # Convert to array
    x = np.concatenate(x, axis=0)
    y = np.concatenate(y, axis=0)

    # Balance the dataset
    rus = RandomUnderSampler()
    rus.fit_resample(y, y)

    # Store them in dictionary
    features = x[rus.sample_indices_, :]
    labels = y[rus.sample_indices_, :]
    leave_tags = leave_tags[rus.sample_indices_, :]

    return features, labels, leave_tags
Exemplo n.º 23
0
def get_results(clf, source_dataset, source_subject, target_dataset,
                target_subject, ncovs_target_train_list):

    scores_target = {}

    print('target subject:', target_subject, ', source subject:',
          source_subject)

    # get the data from source dataset
    dataset_source, dataset_target, idx = utilities.get_source_target_dataset(
        source_dataset, source_subject, target_dataset, target_subject)

    # estimate the covariances
    source = {}
    source['org'] = {}
    source['org']['covs'] = Covariances(estimator='lwf').fit_transform(
        dataset_source['epochs'])
    source['org']['labels'] = dataset_source['labels']
    target = {}
    target['org'] = {}
    target['org']['covs'] = Covariances(estimator='lwf').fit_transform(
        dataset_target['epochs'])
    target['org']['labels'] = dataset_target['labels']

    # match the dimensions (reorder and expand)
    source['org-aug'], target[
        'org-aug'] = utilities.match_source_target_dimensions_motorimagery(
            source['org'], target['org'], idx)

    # get the scores
    scores_target[source_subject] = {}
    for ncovs_target_train in ncovs_target_train_list:
        scores_target[source_subject][
            ncovs_target_train] = scores_transfer_learning_cross_validation(
                clf, source, target, ncovs_target_train, nrzt=5)

    return scores_target
Exemplo n.º 24
0
def test_covariances(estimator, rndstate):
    """Test Covariances"""
    n_matrices, n_channels, n_times = 2, 3, 100
    x = rndstate.randn(n_matrices, n_channels, n_times)
    cov = Covariances(estimator=estimator)
    cov.fit(x)
    covmats = cov.fit_transform(x)
    assert cov.get_params() == dict(estimator=estimator)
    assert covmats.shape == (n_matrices, n_channels, n_channels)
    assert is_spd(covmats)
Exemplo n.º 25
0
def forest_tangent_space_cross_validate(data, cv=False):
    """A cross validated tangent space classifier with svm.

    Parameters
    ----------
    data : dict
        A dictionary containing training and testing data

    Returns
    -------
    cross validated scores
        A list of cross validated scores.

    """

    # Construct sklearn pipeline
    clf = Pipeline([('cov_transform', Covariances('lwf')),
                    ('tangent_space', TangentSpace(metric='riemann')),
                    ('random_forest_classify',
                     RandomForestClassifier(n_estimators=20,
                                            max_depth=10,
                                            random_state=43))])
    if cv:
        # Combine the dataset
        x = np.concatenate((data['train_x'], data['test_x']), axis=0)
        y = np.concatenate((data['train_y'], data['test_y']), axis=0)

        # cross validation
        scores = cross_val_score(clf, x, y, cv=KFold(5, shuffle=True))
        print("Accuracy: %0.4f (+/- %0.4f)" %
              (scores.mean(), scores.std() * 2))
        print('\n')
    else:
        clf = RandomForestClassifier(n_estimators=20,
                                     max_depth=10,
                                     random_state=43)
        plt.style.use('clean')
        y_train = np.argmax(data['train_y'], axis=1) + 1
        y_test = np.argmax(data['test_y'], axis=1) + 1
        classifier = clf.fit(data['train_x'], y_train)
        plot_confusion_matrix(classifier,
                              data['test_x'],
                              y_test,
                              normalize='true',
                              cmap=plt.cm.Blues)
    return None
Exemplo n.º 26
0
def get_score(subject, event):
    if event == "left_vs_right":
        runs = [4, 8, 12]
        event_id = dict(rest=1, left=2, right=3)
    else:
        runs = [6, 10, 14]
        event_id = dict(rest=1, hands=2, feet=3)
    raw = get_raw(subject, runs=runs)
    data = raw.get_data()

    mdm = create_mdm(raw, event_id)

    w_length = 160  # 学習epochのlength
    w_step = 80

    current_step = 0
    count = 0
    score = 0

    print("start")
    for i in range(len(data[0])):
        current_step += 1
        if i > w_length and current_step > w_step:
            window = np.array([data[0:16, i:i+w_length]])
            X_test = Covariances().transform(window)
            # print(X_test.shape)
            label = mdm.predict(X_test)

            current_step = 0
            count += 1

            window_labels = np.array(data[16][i:i+w_length], dtype=np.int64)
            label_count = np.bincount(window_labels)
            argmax_label = np.argmax(label_count)

            print(i, label, argmax_label)

            if label == argmax_label:
                score += 1
        # sleep(1./160)
    print(subject, score/count)
    print("end")
    return score/count
Exemplo n.º 27
0
def get_embedding():

    path = '/localdata/coelhorp/epilepsy/seizure_detection/'

    subjects = ['Dog_3', 'Dog_4', 'Patient_2', 'Patient_6', 'Patient_7']
    for subject in subjects:

        print 'processing subject: ' + subject
        filepaths = sorted(glob.glob(path + subject + '/*_ictal_*'))

        X = []
        lat = []
        for filepath in filepaths:
            struct = sp.io.loadmat(filepath)
            X.append(struct['data'])
            lat.append(struct['latency'][0])

        lat = np.array(lat)
        X = np.stack(X)

        fs = struct['freq']
        fini = 1.0
        fend = 40.0
        b, a = signal.butter(5, [fini / (fs / 2), fend / (fs / 2)],
                             btype='bandpass')
        for xt in X:
            f, pxx = welch(xt, fs=fs)
            xt = signal.filtfilt(b, a, xt)

        covs = Covariances(estimator='oas').fit_transform(X)
        print 'getting the diffusion embedding'
        u, l = get_diffusionEmbedding(points=covs, distance=distance_riemann)

        directory = './results/Epilepsy/'
        if not os.path.exists(directory):
            os.makedirs(directory)

        filepath = directory + 'embedding_subject-' + str(subject) + '.pkl'
        embedding = [u, l]
        joblib.dump([embedding, lat], filepath)

        print ''
Exemplo n.º 28
0
def aggregate_cov(X,
                  n_subjects,
                  metadata,
                  filterbank=True,
                  s_class='rest',
                  estimator='lwf'):
    """Compute covmat for concatenated signals"""
    subjX = np.array(metadata['subject'])
    cov_list = []
    for s in range(n_subjects):
        s_loc = subjX == (s + 1)
        X_sig = np.array(X[np.logical_and(s_loc, y == s_class)])
        if filterbank:
            sig_ext = X_sig[:, :, :, :-1].transpose((0, 3, 1, 2))
            n_trials, n_freqs, n_channels, n_times = sig_ext.shape
            X_sig = sig_ext.reshape((n_trials, n_channels * n_freqs, n_times))
        X_sig = np.concatenate([X_sig[i, :, :] for i in range(X_sig.shape[0])],
                               axis=1)
        cov = Covariances(estimator='lwf').transform(
            X_sig.reshape((1, *(X_sig.shape))))
        cov_list.append(cov)
    return np.concatenate(cov_list, axis=0)
Exemplo n.º 29
0
def get_twoclasses(subject):

    print 'subject ' + str(subject)
    print ''

    data_params = {}
    data_params[
        'path'] = '/research/vibs/Pedro/datasets/motorimagery/Physionet/eegmmidb/'
    data_params['session'] = 1
    data_params['task'] = 4
    data_params['tparams'] = [1.0, 2.0]
    data_params['fparams'] = [8.0, 35.0]
    data_params['subject'] = subject

    X, y = get_data(data_params)
    covs = Covariances().fit_transform(X)
    u, l = get_diffusionEmbedding(points=covs, distance=distance_riemann)

    filepath = './results/Physionet/'
    filepath = filepath + 'twoclasses_subject' + str(subject) + '.pkl'
    embedding = [u, l]
    joblib.dump([embedding, y], filepath)
Exemplo n.º 30
0
def get_twoclasses(subject):
    
    print 'subject ' + str(subject)
    print ''    

    data_params = {}
    data_params['path'] = '/research/vibs/Pedro/datasets/motorimagery/BCI-competitions/BCI-IV/2a/'
    data_params['session'] = 1
    data_params['task']    = 1
    data_params['tparams'] = [1.25, 3.75]
    data_params['fparams'] = [8.0, 35.0] 
    data_params['subject'] = subject
               
    X,y = get_data(data_params)
    X = X[(y == 1) | (y == 2)]
    y = y[(y == 1) | (y == 2)]
    covs = Covariances().fit_transform(X)
    u,l = get_diffusionEmbedding(points=covs, distance=distance_riemann)
    
    filepath  = './results/BCI-IV/'
    filepath  = filepath + 'twoclasses_subject' + str(subject) + '.pkl'
    embedding = [u,l]
    joblib.dump([embedding, y], filepath) 
Exemplo n.º 31
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    tmin, tmax = -1., 4.

    raw = get_raw(subject, runs)
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')

    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    baseline=None, preload=True, verbose=False)
    labels = epochs.events[:, -1]

    # cv = KFold(len(labels), 10, shuffle=True, random_state=42)
    epochs_data_train = epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)

    ###############################################################################
    # Classification with Minimum distance to mean
    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    pl = Pipeline([("mdm", mdm)])
    params = {"mdm__metric": [dict(mean='riemann', distance='riemann')]}
    clf = GridSearchCV(pl, params, n_jobs=-1, cv=5, return_train_score=True)
    clf.fit(cov_data_train, labels)
    df = pd.DataFrame(clf.cv_results_)
    return df
Exemplo n.º 32
0
def svm_tangent_space_classifier(features, labels):
    """A tangent space classifier with svm for 3 classes.

    Parameters
    ----------
    features : array
        A array of features
    labels : array
        True labels

    Returns
    -------
    sklearn classifier
        Learnt classifier.

    """
    # Construct sklearn pipeline
    clf = Pipeline([('cov_transform', Covariances('oas')),
                    ('tangent_space', TangentSpace(metric='riemann')),
                    ('svm_classify', SVC(kernel='rbf', gamma='auto'))])
    # cross validation
    clf.fit(features, labels)

    return clf
Exemplo n.º 33
0
raw.filter(7., 35., method='iir', picks=picks)


epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                baseline=None, preload=True, add_eeg_ref=False, verbose=False)
labels = epochs.events[:, -1] - 2

# get epochs
epochs_data = epochs.get_data()

###############################################################################
# Pairwise distance based permutation test
###############################################################################

covest = Covariances()

Fs = 160
window = 2*Fs
Nwindow = 20
Ns = epochs_data.shape[2]
step = int((Ns-window)/Nwindow)
time_bins = range(0, Ns-window, step)

pv = []
Fv = []
# For each frequency bin, estimate the stats
t_init = time()
for t in time_bins:
    covmats = covest.fit_transform(epochs_data[:, ::1, t:(t+window)])
    p_test = PermutationDistance(1000, metric='riemann', mode='pairwise')
Exemplo n.º 34
0
def test_covariances():
    """Test fit Covariances"""
    x = np.random.randn(2,3,100)
    cov = Covariances()
    cov.fit(x)
    cov.fit_transform(x)