def test_TSclassifier_classifier(get_covmats, get_labels):
    """Test TS Classifier"""
    n_trials, n_channels, n_classes = 6, 3, 2
    covmats = get_covmats(n_trials, n_channels)
    labels = get_labels(n_trials, n_classes)
    clf = TSclassifier(clf=DummyClassifier())
    clf.fit(covmats, labels).predict(covmats)
class wrapper_TSclassifier(machine_learning_method):
    """wrapper for pyriemann TSclassifier"""
    def __init__(self, method_name, method_args):
        super(wrapper_TSclassifier, self).__init__(method_name, method_args)
        self.init_method()

    def init_method(self):
        self.classifier = TSclassifier(metric=self.method_args['metric'],
                                       tsupdate=self.method_args['tsupdate'])

    def fit(self, X, y):
        return self.classifier.fit(X, y)

    def predict(self, X):
        return self.classifier.predict(X)
def test_tsclassifier_clf_error(get_covmats, get_labels):
    """Test TS if not Classifier"""
    n_matrices, n_channels, n_classes = 6, 3, 2
    covmats = get_covmats(n_matrices, n_channels)
    labels = get_labels(n_matrices, n_classes)
    with pytest.raises(TypeError):
        TSclassifier(clf=Covariances()).fit(covmats, labels)
Beispiel #4
0
def test_TSclassifier():
    """Test TS Classifier"""
    covset = generate_cov(40, 3)
    labels = np.array([0, 1]).repeat(20)

    with pytest.raises(TypeError):
        TSclassifier(clf='666')

    clf = TSclassifier()
    clf.fit(covset, labels)
    assert_array_equal(clf.classes_, np.array([0, 1]))
    clf.predict(covset)
    clf.predict_proba(covset)
def test_TSclassifier():
    """Test TS Classifier"""
    covset = generate_cov(40, 3)
    labels = np.array([0, 1]).repeat(20)

    assert_raises(TypeError, TSclassifier, clf='666')
    clf = TSclassifier()
    clf.fit(covset, labels)
    clf.predict(covset)
    clf.predict_proba(covset)
Beispiel #6
0
def main(state, freq):
    """Where the magic happens"""
    print(state, freq)
    if FULL_TRIAL:
        labels = np.concatenate((np.ones(18), np.zeros(18)))
        groups = range(36)
    elif SUBSAMPLE:
        info_data = pd.read_csv(SAVE_PATH.parent / "info_data.csv")[STATE_LIST]
        n_trials = info_data.min().min()
        n_subs = len(info_data) - 1
        groups = [i for i in range(n_subs) for _ in range(n_trials)]
        n_total = n_trials * n_subs
        labels = [0 if i < n_total / 2 else 1 for i in range(n_total)]
    else:
        labels = loadmat(LABEL_PATH / state + "_labels.mat")["y"].ravel()
        labels, groups = create_groups(labels)

    file_path = (SAVE_PATH / "results" / PREFIX + NAME +
                 "_{}_{}_{}_{:.2f}.mat".format(state, freq, WINDOW, OVERLAP))

    if not file_path.isfile():
        file_name = NAME + "_{}_{}_{}_{:.2f}.mat".format(
            state, freq, WINDOW, OVERLAP)
        data_file_path = SAVE_PATH / file_name

        if data_file_path.isfile():
            final_save = {}

            random_seed = 0
            data = loadmat(data_file_path)
            if FULL_TRIAL:
                data = data["data"]
            elif SUBSAMPLE:
                data = prepare_data(data,
                                    n_trials=n_trials,
                                    random_state=random_seed)
            else:
                data = prepare_data(data)

            sl2go = StratifiedLeave2GroupsOut()
            lda = LDA()
            clf = TSclassifier(clf=lda)
            best_combin, best_score = backward_selection(
                clf, data, labels, sl2go, groups)

            final_save = {
                "best_combin_index": best_combin,
                "best_combin": CHANNEL_NAMES[best_combin],
                "score": best_score,
            }
            savemat(file_path, final_save)

            print(
                f"Best combin: {CHANNEL_NAMES[best_combin]}, score: {best_score}"
            )

        else:
            print(data_file_path.NAME + " Not found")
Beispiel #7
0
def check_other_classifiers(train_X, train_y, test_X, test_y):
    from pyriemann.classification import MDM, TSclassifier
    from sklearn.linear_model import LogisticRegression
    from pyriemann.estimation import Covariances
    from sklearn.pipeline import Pipeline
    from mne.decoding import CSP
    import seaborn as sns
    import pandas as pd

    train_y = [np.where(i == 1)[0][0] for i in train_y]
    test_y = [np.where(i == 1)[0][0] for i in test_y]

    cov_data_train = Covariances().transform(train_X)
    cov_data_test = Covariances().transform(test_X)
    cv = KFold(n_splits=10, random_state=42)
    clf = TSclassifier()
    scores = cross_val_score(clf, cov_data_train, train_y, cv=cv, n_jobs=1)
    print("Tangent space Classification accuracy: ", np.mean(scores))

    clf = TSclassifier()
    clf.fit(cov_data_train, train_y)
    print(clf.score(cov_data_test, test_y))

    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    scores = cross_val_score(mdm, cov_data_train, train_y, cv=cv, n_jobs=1)
    print("MDM Classification accuracy: ", np.mean(scores))
    mdm = MDM()
    mdm.fit(cov_data_train, train_y)

    fig, axes = plt.subplots(1, 2)
    ch_names = [ch for ch in range(8)]

    df = pd.DataFrame(data=mdm.covmeans_[0], index=ch_names, columns=ch_names)
    g = sns.heatmap(df,
                    ax=axes[0],
                    square=True,
                    cbar=False,
                    xticklabels=2,
                    yticklabels=2)
    g.set_title('Mean covariance - feet')

    df = pd.DataFrame(data=mdm.covmeans_[1], index=ch_names, columns=ch_names)
    g = sns.heatmap(df,
                    ax=axes[1],
                    square=True,
                    cbar=False,
                    xticklabels=2,
                    yticklabels=2)
    plt.xticks(rotation='vertical')
    plt.yticks(rotation='horizontal')
    g.set_title('Mean covariance - hands')

    # dirty fix
    plt.sca(axes[0])
    plt.xticks(rotation='vertical')
    plt.yticks(rotation='horizontal')
    plt.savefig("meancovmat.png")
    plt.show()
def test_TSclassifier():
    """Test TS Classifier"""
    covset = generate_cov(40, 3)
    labels = np.array([0, 1]).repeat(20)

    assert_raises(TypeError, TSclassifier, clf='666')
    clf = TSclassifier()
    clf.fit(covset, labels)
    clf.predict(covset)
    clf.predict_proba(covset)
def RHvsLH_cross(out_dir, pipelines):
    name = 'RHvsLH_cross'
    datasets = utils.dataset_search('imagery',
                                    events=['right_hand', 'left_hand'],
                                    has_all_events=True,
                                    min_subjects=2,
                                    multi_session=False)

    print(datasets)
    pipelines = OrderedDict()
    pipelines['TS'] = make_pipeline(Covariances('oas'), TSclassifier())
    pipelines['CSP+LDA'] = make_pipeline(Covariances('oas'), CSP(6), LDA())
    pipelines['CSP+SVM'] = make_pipeline(Covariances('oas'), CSP(6), SVC())  #

    context = LeftRightImagery(pipelines, CrossSubjectEvaluation(n_jobs=10),
                               datasets)

    results = context.process()
def test_TSclassifier_classifier_error():
    """Test TS if not Classifier"""
    with pytest.raises(TypeError):
        TSclassifier(clf=Covariances())
    def fit(self, X, y):
        # validate
        X, y = check_X_y(X, y, allow_nd=True)
        X = check_array(X, allow_nd=True)

        # set internal vars
        self.classes_ = unique_labels(y)
        self.X_ = X
        self.y_ = y

        ##################################################
        # split X into train and test sets, so that
        # grid search can be performed on train set only
        seed = 7
        np.random.seed(seed)
        #X_TRAIN, X_TEST, y_TRAIN, y_TEST = train_test_split(X, y, test_size=0.25, random_state=seed)

        for epoch_trim in self.epoch_bounds:
            for bandpass in self.bandpass_filters:

                X_train, X_test, y_train, y_test = train_test_split(
                    X, y, test_size=0.25, random_state=seed)

                # X_train = np.copy(X_TRAIN)
                # X_test = np.copy(X_TEST)
                # y_train = np.copy(y_TRAIN)
                # y_test = np.copy(y_TEST)

                # separate out inputs that are tuples
                bandpass_start, bandpass_end = bandpass
                epoch_trim_start, epoch_trim_end = epoch_trim

                # bandpass filter coefficients
                b, a = butter(
                    5,
                    np.array([bandpass_start, bandpass_end]) /
                    (self.sfreq * 0.5), 'bandpass')

                # filter and crop TRAINING SET
                X_train = self.preprocess_X(X_train, b, a, epoch_trim_start,
                                            epoch_trim_end)
                # validate
                X_train, y_train = check_X_y(X_train, y_train, allow_nd=True)
                X_train = check_array(X_train, allow_nd=True)

                # filter and crop TEST SET
                X_test = self.preprocess_X(X_test, b, a, epoch_trim_start,
                                           epoch_trim_end)
                # validate
                X_test, y_test = check_X_y(X_test, y_test, allow_nd=True)
                X_test = check_array(X_test, allow_nd=True)

                ###########################################################################
                # self-tune CSP to find optimal number of filters to use at these settings
                #[best_num_filters, best_num_filters_score] = self.self_tune(X_train, y_train)
                best_num_filters = 5

                # as an option, we could tune optimal CSP filter num against complete train set
                #X_tune = self.preprocess_X(X, b, a, epoch_trim_start, epoch_trim_end)
                #[best_num_filters, best_num_filters_score] = self.self_tune(X_tune, y)

                # now use this insight to really fit with optimal CSP spatial filters
                """
				reg : float | str | None (default None)
			        if not None, allow regularization for covariance estimation
			        if float, shrinkage covariance is used (0 <= shrinkage <= 1).
			        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
			        or Oracle Approximating Shrinkage ('oas').
				"""
                transformer = CSP(n_components=best_num_filters,
                                  reg='ledoit_wolf')
                transformer.fit(X_train, y_train)

                # use these CSP spatial filters to transform train and test
                spatial_filters_train = transformer.transform(X_train)
                spatial_filters_test = transformer.transform(X_test)

                # put this back in as failsafe if NaN or inf starts cropping up
                # spatial_filters_train = np.nan_to_num(spatial_filters_train)
                # check_X_y(spatial_filters_train, y_train)
                # spatial_filters_test = np.nan_to_num(spatial_filters_test)
                # check_X_y(spatial_filters_test, y_test)

                # train LDA
                classifier = LinearDiscriminantAnalysis()
                classifier.fit(spatial_filters_train, y_train)
                score = classifier.score(spatial_filters_test, y_test)

                #print "current score",score
                print "bandpass:"******"epoch window:", epoch_trim_start, epoch_trim_end
                #print best_num_filters,"filters chosen"

                # put in ranked order Top 10 list
                idx = bisect(self.ranked_scores, score)
                self.ranked_scores.insert(idx, score)
                self.ranked_scores_opts.insert(
                    idx,
                    dict(bandpass=bandpass,
                         epoch_trim=epoch_trim,
                         filters=best_num_filters))
                self.ranked_classifiers.insert(idx, classifier)
                self.ranked_transformers.insert(idx, transformer)

                if len(self.ranked_scores) > self.num_votes:
                    self.ranked_scores.pop(0)
                if len(self.ranked_scores_opts) > self.num_votes:
                    self.ranked_scores_opts.pop(0)
                if len(self.ranked_classifiers) > self.num_votes:
                    self.ranked_classifiers.pop(0)
                if len(self.ranked_transformers) > self.num_votes:
                    self.ranked_transformers.pop(0)
                """
				Covariance computation
				"""
                # compute covariance matrices
                cov_data_train = covariances(X=X_train)
                cov_data_test = covariances(X=X_test)

                clf_mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
                clf_mdm.fit(cov_data_train, y_train)
                score_mdm = clf_mdm.score(cov_data_test, y_test)
                # print "MDM prediction score:",score_mdm
                # put in ranked order Top 10 list
                idx = bisect(self.ranked_scores_mdm, score_mdm)
                self.ranked_scores_mdm.insert(idx, score_mdm)
                self.ranked_scores_opts_mdm.insert(
                    idx,
                    dict(bandpass=bandpass,
                         epoch_trim=epoch_trim,
                         filters=best_num_filters))
                self.ranked_classifiers_mdm.insert(idx, clf_mdm)

                if len(self.ranked_scores_mdm) > self.num_votes:
                    self.ranked_scores_mdm.pop(0)
                if len(self.ranked_scores_opts_mdm) > self.num_votes:
                    self.ranked_scores_opts_mdm.pop(0)
                if len(self.ranked_classifiers_mdm) > self.num_votes:
                    self.ranked_classifiers_mdm.pop(0)

                clf_ts = TSclassifier()
                clf_ts.fit(cov_data_train, y_train)
                score_ts = clf_ts.score(cov_data_test, y_test)
                # put in ranked order Top 10 list
                idx = bisect(self.ranked_scores_ts, score_ts)
                self.ranked_scores_ts.insert(idx, score_ts)
                self.ranked_scores_opts_ts.insert(
                    idx,
                    dict(bandpass=bandpass,
                         epoch_trim=epoch_trim,
                         filters=best_num_filters))
                self.ranked_classifiers_ts.insert(idx, clf_ts)

                if len(self.ranked_scores_ts) > self.num_votes:
                    self.ranked_scores_ts.pop(0)
                if len(self.ranked_scores_opts_ts) > self.num_votes:
                    self.ranked_scores_opts_ts.pop(0)
                if len(self.ranked_classifiers_ts) > self.num_votes:
                    self.ranked_classifiers_ts.pop(0)

                print "CSP+LDA score:", score, "Tangent space w/LR score:", score_ts

                print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
                print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
                print "    T O P  ", self.num_votes, "  C L A S S I F I E R S"
                print
                #j=1
                for i in xrange(len(self.ranked_scores)):
                    print i, ",", round(self.ranked_scores[i], 4), ",",
                    print self.ranked_scores_opts[i]
                print "-------------------------------------"
                for i in xrange(len(self.ranked_scores_ts)):
                    print i, ",", round(self.ranked_scores_ts[i], 4), ",",
                    print self.ranked_scores_opts_ts[i]
                print "-------------------------------------"
                for i in xrange(len(self.ranked_scores_mdm)):
                    print i, ",", round(self.ranked_scores_mdm[i], 4), ",",
                    print self.ranked_scores_opts_mdm[i]

        # finish up, set the flag to indicate "fitted" state
        self.fit_ = True

        # Return the classifier
        return self
Beispiel #12
0
def get_score(subject=7):
    ###############################################################################
    # Set parameters and read data

    # avoid classification of evoked responses by using epochs that start 1s after
    # cue onset.
    tmin, tmax = 1., 2.
    event_id = dict(hands=2, feet=3)

    runs = [6, 10, 14]  # motor imagery: hands vs feet

    raw_files = [
        read_raw_edf(f, preload=True) for f in eegbci.load_data(subject, runs)
    ]
    raw = concatenate_raws(raw_files)

    picks = pick_types(raw.info,
                       meg=False,
                       eeg=True,
                       stim=False,
                       eog=False,
                       exclude='bads')
    # subsample elecs
    picks = picks[::2]

    # Apply band-pass filter
    raw.filter(7., 35., method='iir', picks=picks)

    events = find_events(raw, shortest_event=0, stim_channel='STI 014')

    # Read epochs (train will be done only between 1 and 2s)
    # Testing will be done with a running classifier
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=None,
                    preload=True,
                    verbose=False)
    labels = epochs.events[:, -1] - 2

    # cross validation
    cv = KFold(len(labels), 10, shuffle=True, random_state=42)
    # get epochs
    epochs_data_train = 1e6 * epochs.get_data()

    # compute covariance matrices
    cov_data_train = Covariances().transform(epochs_data_train)

    ###############################################################################
    # Classification with Minimum distance to mean
    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))

    # Use scikit-learn Pipeline with cross_val_score function
    mdm.fit(cov_data_train, labels)

    print(123)

    ###############################################################################
    # Classification with Tangent Space Logistic Regression
    clf = TSclassifier()
    # Use scikit-learn Pipeline with cross_val_score function
    scores = cross_val_score(clf, cov_data_train, labels, cv=cv, n_jobs=1)

    # Printing the results
    class_balance = np.mean(labels == labels[0])
    class_balance = max(class_balance, 1. - class_balance)
    ts_score = np.mean(scores)
    print("Tangent space Classification accuracy: %f / Chance level: %f" %
          (ts_score, class_balance))

    ###############################################################################

    return [subject, mdm_score, ts_score]
 def init_method(self):
     self.classifier = TSclassifier(metric=self.method_args['metric'],
                                    tsupdate=self.method_args['tsupdate'])
Beispiel #14
0
import logging
import coloredlogs
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
coloredlogs.install(level=logging.DEBUG)

datasets = utils.dataset_search('imagery',
                                events=['supination', 'hand_close'],
                                has_all_events=False,
                                min_subjects=2,
                                multi_session=False)

for d in datasets:
    d.subject_list = d.subject_list[:10]

paradigm = ImageryNClass(2)
context = WithinSessionEvaluation(paradigm=paradigm,
                                  datasets=datasets,
                                  random_state=42)

pipelines = OrderedDict()
pipelines['av+TS'] = make_pipeline(Covariances(estimator='oas'),
                                   TSclassifier())
pipelines['av+CSP+LDA'] = make_pipeline(Covariances(estimator='oas'), CSP(8),
                                        LDA())

results = context.process(pipelines, overwrite=True)

analyze(results, './')
N_SUBS = len(info_data) - 1
groups = [i for i in range(N_SUBS) for _ in range(N_TRIALS)]
N_TOTAL = N_TRIALS * N_SUBS
labels = [0 if i < N_TOTAL / 2 else 1 for i in range(N_TOTAL)]

file_name = prefix + name + "n153_{}.mat".format(state)

save_file_path = SAVE_PATH / "results" / file_name

data_file_path = SAVE_PATH / name + "_{}.mat".format(state)

final_save = None

data = loadmat(data_file_path)
data = prepare_data(data, n_trials=N_TRIALS, random_state=0)

sl2go = StratifiedLeave2GroupsOut()
lda = LDA()
clf = TSclassifier(clf=lda)
score = cross_val_score(clf, data, labels, groups, cv=sl2go, n_jobs=-1)
print(score)
# save['acc_bootstrap'] = [save['acc_score']]
# save['auc_bootstrap'] = [save['auc_score']]
# if final_save is None:
#     final_save = save
# else:
#     for key, value in final_save.items():
#         final_save[key] = final_save[key] + save[key]

# savemat(save_file_path, final_save)
Beispiel #16
0
def classif_cov(state):
    """Where the magic happens"""
    print(state)
    if FULL_TRIAL:
        labels = np.concatenate((np.ones(18), np.zeros(18)))
        groups = range(36)
    elif SUBSAMPLE:
        info_data = pd.read_csv(SAVE_PATH.parent / "info_data.csv")[STATE_LIST]
        n_trials = info_data.min().min()
        n_subs = len(info_data) - 1
        groups = [i for i in range(n_subs) for _ in range(n_trials)]
        n_total = n_trials * n_subs
        labels = [0 if i < n_total / 2 else 1 for i in range(n_total)]
    else:
        labels = loadmat(LABEL_PATH / state + "_labels.mat")["y"].ravel()
        labels, groups = create_groups(labels)

    file_path = SAVE_PATH / "results" / PREFIX + NAME + "_{}.mat".format(state)
    if not file_path.isfile():
        n_rep = 0
    else:
        final_save = proper_loadmat(file_path)
        n_rep = final_save["n_rep"]
    print("starting from i={}".format(n_rep))

    file_name = NAME + "_{}.mat".format(state)
    data_file_path = SAVE_PATH / file_name

    if data_file_path.isfile():
        data_og = loadmat(data_file_path)
        for i in range(n_rep, N_BOOTSTRAPS):
            if FULL_TRIAL:
                data = data_og["data"]
            elif SUBSAMPLE:
                data = prepare_data(data_og, n_trials=n_trials, random_state=i)
            else:
                data = prepare_data(data_og)

            if REDUCED:
                reduced_data = []
                for submat in data:
                    temp_a = np.delete(submat, i, 0)
                    temp_b = np.delete(temp_a, i, 1)
                    reduced_data.append(temp_b)
                data = np.asarray(reduced_data)

            if FULL_TRIAL:
                crossval = SSS(9)
            else:
                crossval = StratifiedLeave2GroupsOut()
            lda = LDA()
            clf = TSclassifier(clf=lda)
            save = classification(clf,
                                  crossval,
                                  data,
                                  labels,
                                  groups,
                                  N_PERM,
                                  n_jobs=-1)

            print(save["acc_score"])
            if i == 0:
                final_save = save
            elif BOOTSTRAP or REDUCED:
                for key, value in save.items():
                    final_save[key] += value

            final_save["n_rep"] = i + 1
            savemat(file_path, final_save)

        final_save["n_rep"] = N_BOOTSTRAPS
        if BOOTSTRAP:
            final_save["auc_score"] = np.mean(final_save["auc_score"])
            final_save["acc_score"] = np.mean(final_save["acc_score"])
        savemat(file_path, final_save)

        print("accuracy for %s %s : %0.2f (+/- %0.2f)" %
              (state, np.mean(save["acc_score"]), np.std(save["acc"])))
        if PERM:
            print("pval = {}".format(save["acc_pvalue"]))

    else:
        print(data_file_path.name + " Not found")
Beispiel #17
0
def main(state):
    """Where the magic happens"""
    print(state)
    if FULL_TRIAL:
        labels = np.concatenate((np.ones(18), np.zeros(18)))
        groups = range(36)
    elif SUBSAMPLE:
        info_data = pd.read_csv(SAVE_PATH.parent / "info_data.csv")[STATE_LIST]
        ##### FOR A TEST #####
        info_data = info_data["SWS"]
        ##### FOR A TEST #####
        N_TRIALS = info_data.min().min()
        N_SUBS = len(info_data) - 1
        groups = [i for _ in range(N_TRIALS) for i in range(N_SUBS)]
        N_TOTAL = N_TRIALS * N_SUBS
        labels = [0 if i < N_TOTAL / 2 else 1 for i in range(N_TOTAL)]
    else:
        labels = loadmat(LABEL_PATH / state + "_labels.mat")["y"].ravel()
        labels, groups = create_groups(labels)

    file_name = prefix + name + "n153_{}.mat".format(state)

    save_file_path = SAVE_PATH / "results" / file_name

    if not save_file_path.isfile():
        data_file_path = SAVE_PATH / name + "_{}.mat".format(state)

        if data_file_path.isfile():
            final_save = None

            for i in range(N_BOOTSTRAPS):
                data = loadmat(data_file_path)
                if FULL_TRIAL:
                    data = data["data"]
                elif SUBSAMPLE:
                    data = prepare_data(data,
                                        n_trials=N_TRIALS,
                                        random_state=i)
                else:
                    data = prepare_data(data)

                sl2go = StratifiedLeave2GroupsOut()
                lda = LDA()
                clf = TSclassifier(clf=lda)
                save = classification(clf,
                                      sl2go,
                                      data,
                                      labels,
                                      groups,
                                      N_PERM,
                                      n_jobs=-1)
                save["acc_bootstrap"] = [save["acc_score"]]
                save["auc_bootstrap"] = [save["auc_score"]]
                if final_save is None:
                    final_save = save
                else:
                    for key, value in final_save.items():
                        final_save[key] = final_save[key] + save[key]

            savemat(save_file_path, final_save)

            print("accuracy for %s : %0.2f (+/- %0.2f)" %
                  (state, save["acc_score"], np.std(save["acc"])))

        else:
            print(data_file_path.name + " Not found")
##    
##    # make kernels
#    K_train = np.dot(training_data, np.transpose(training_data))
#    K_test = np.dot(testing_data, np.transpose(training_data))
##    K_train = K_lin_timecourse[train_index, :][:, train_index]
##    K_test = K_lin_timecourse[test_index, :][:, train_index]
#    
#    
#    # train classifier and predict
#    clf.fit(K_train, training_labels)
#    preds_rep = np.append(preds_rep, clf.predict(K_test))
#    labels_rep = np.append(labels_rep, testing_labels)
    
#print np.transpose(np.vstack((np.transpose(labels), np.transpose(preds_rep))))
print metrics.accuracy_score(labels_rep, preds_rep)

# try pyriemann's tangent space classifier
# adapted from http://pythonhosted.org/pyriemann/auto_examples/motor-imagery/plot_single.html#sphx-glr-auto-examples-motor-imagery-plot-single-py
# reshape data for TSclassifier
timecourse_connectivity_data_TSclassifier = np.reshape(timecourse_connectivity_data, (100, 90, 90))
clf = TSclassifier()
clf = MDM()
#cv = cross_validation.KFold(len(labels), 10, shuffle=True, random_state=42)
cv = cross_validation.StratifiedShuffleSplit(labels, n_iter=100, test_size=0.1)
scores = cross_validation.cross_val_score(clf, timecourse_connectivity_data_TSclassifier, labels, cv=cv, n_jobs=1)
print scores
print np.mean(scores)

    
    
Beispiel #19
0
def classif_cosp(state, n_jobs=-1):
    global CHANGES
    print(state, "multif")
    if SUBSAMPLE or ADAPT:
        info_data = pd.read_csv(SAVE_PATH.parent / "info_data.csv")[STATE_LIST]
        if SUBSAMPLE:
            n_trials = info_data.min().min()
            # n_trials = 30
        elif ADAPT:
            n_trials = info_data.min()[state]
    elif FULL_TRIAL:
        groups = range(36)
    labels_og = INIT_LABELS

    file_path = (SAVE_PATH / "results" / PREFIX + NAME +
                 "_{}_{}_{:.2f}.mat".format(state, WINDOW, OVERLAP))

    if not file_path.isfile():
        n_rep = 0
    else:
        final_save = proper_loadmat(file_path)
        n_rep = int(final_save["n_rep"])
        n_splits = int(final_save["n_splits"])
    print("Starting from i={}".format(n_rep))

    if FULL_TRIAL:
        crossval = SSS(9)
    else:
        crossval = StratifiedShuffleGroupSplit(2)
    lda = LDA()
    clf = TSclassifier(clf=lda)

    for i in range(n_rep, N_BOOTSTRAPS):
        CHANGES = True
        data_freqs = []
        for freq in FREQ_DICT:
            file_name = NAME + "_{}_{}_{}_{:.2f}.mat".format(
                state, freq, WINDOW, OVERLAP)
            data_file_path = SAVE_PATH / file_name
            data_og = loadmat(data_file_path)["data"].ravel()
            data_og = np.asarray([sub.squeeze() for sub in data_og])
            if SUBSAMPLE or ADAPT:
                data, labels, groups = prepare_data(data_og,
                                                    labels_og,
                                                    n_trials=n_trials,
                                                    random_state=i)
            else:
                data, labels, groups = prepare_data(data_og, labels_og)
            data_freqs.append(data)
            n_splits = crossval.get_n_splits(None, labels, groups)

        data_freqs = np.asarray(data_freqs).swapaxes(0, 1).swapaxes(
            1, 3).swapaxes(1, 2)
        save = classification(clf,
                              crossval,
                              data,
                              labels,
                              groups,
                              N_PERM,
                              n_jobs=n_jobs)

        if i == 0:
            final_save = save
        elif BOOTSTRAP:
            for key, value in save.items():
                if key != "n_splits":
                    final_save[key] += value

        final_save["n_rep"] = i + 1
        if n_jobs == -1:
            savemat(file_path, final_save)

    final_save["auc_score"] = np.mean(final_save.get("auc_score", 0))
    final_save["acc_score"] = np.mean(final_save["acc_score"])
    if CHANGES:
        savemat(file_path, final_save)

    to_print = "accuracy for {} {} : {:.2f}".format(state, freq,
                                                    final_save["acc_score"])
    if BOOTSTRAP:
        standev = np.std([
            np.mean(final_save["acc"][i * n_splits:(i + 1) * n_splits])
            for i in range(N_BOOTSTRAPS)
        ])
        to_print += " (+/- {:.2f})".format(standev)
    print(to_print)
    if PERM:
        print("pval = {}".format(final_save["acc_pvalue"]))
Beispiel #20
0
from pyriemann.spatialfilters import CSP
from pyriemann.classification import TSclassifier, MDM
from sklearn.pipeline import make_pipeline

from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

from collections import OrderedDict

context = BNCI2014001MIHands()

pipelines = OrderedDict()
pipelines['MDM'] = make_pipeline(Covariances('oas'), MDM())
pipelines['TS'] = make_pipeline(Covariances('oas'), TSclassifier())
pipelines['CSP+LDA'] = make_pipeline(Covariances('oas'), CSP(8), LDA())

results = context.evaluate(pipelines, verbose=True)

for p in results.keys():
    results[p].to_csv('../../results/MotorImagery/BNCI_2014_001/%s.csv' % p)

results = pd.concat(results.values())
print(results.groupby('Pipeline').mean())

res = results.pivot(values='Score', columns='Pipeline', index='Subject')
sns.lmplot(data=res, x='CSP+LDA', y='TS', fit_reg=False)
plt.xlim(0.4, 1)
plt.ylim(0.4, 1)
plt.plot([0.4, 1], [0.4, 1], ls='--', c='k')
Beispiel #21
0
def classif_subcosp(state, freq, elec, n_jobs=-1):
    global CHANGES
    print(state, freq)
    if SUBSAMPLE or ADAPT:
        info_data = pd.read_csv(SAVE_PATH.parent / "info_data.csv")[STATE_LIST]
        if SUBSAMPLE:
            n_trials = info_data.min().min()
            n_trials = 61
        elif ADAPT:
            n_trials = info_data.min()[state]
    elif FULL_TRIAL:
        groups = range(36)
    labels_og = INIT_LABELS

    file_path = (
        SAVE_PATH / "results" / PREFIX + NAME +
        "_{}_{}_{}_{}_{:.2f}.npy".format(state, freq, elec, WINDOW, OVERLAP))

    if not file_path.isfile():
        n_rep = 0
    else:
        final_save = np.load(file_path)
        n_rep = int(final_save["n_rep"])
        n_splits = int(final_save["n_splits"])
    print("Starting from i={}".format(n_rep))

    file_name = NAME + "_{}_{}_{}_{}_{:.2f}.npy".format(
        state, freq, elec, WINDOW, OVERLAP)
    data_file_path = SAVE_PATH / file_name

    data_og = np.load(data_file_path)
    if FULL_TRIAL:
        cv = SSS(9)
    else:
        cv = StratifiedShuffleGroupSplit(2)
    lda = LDA()
    clf = TSclassifier(clf=lda)

    for i in range(n_rep, N_BOOTSTRAPS):
        CHANGES = True
        if FULL_TRIAL:
            data = data_og["data"]
        elif SUBSAMPLE or ADAPT:
            data, labels, groups = prepare_data(data_og,
                                                labels_og,
                                                n_trials=n_trials,
                                                random_state=i)
        else:
            data, labels, groups = prepare_data(data_og, labels_og)
        n_splits = cv.get_n_splits(None, labels, groups)

        save = classification(clf,
                              cv,
                              data,
                              labels,
                              groups,
                              N_PERM,
                              n_jobs=n_jobs)

        if i == 0:
            final_save = save
        elif BOOTSTRAP:
            for key, value in save.items():
                if key != "n_splits":
                    final_save[key] += value

        final_save["n_rep"] = i + 1
        np.save(file_path, final_save)

    final_save["auc_score"] = np.mean(final_save.get("auc_score", 0))
    final_save["acc_score"] = np.mean(final_save["acc_score"])
    if CHANGES:
        np.save(file_path, final_save)

    to_print = "accuracy for {} {} : {:.2f}".format(state, freq,
                                                    final_save["acc_score"])
    if BOOTSTRAP:
        standev = np.std([
            np.mean(final_save["acc"][i * n_splits:(i + 1) * n_splits])
            for i in range(N_BOOTSTRAPS)
        ])
        to_print += " (+/- {:.2f})".format(standev)
    print(to_print)
    if PERM:
        print("pval = {}".format(final_save["acc_pvalue"]))
            print('MDM: {:4f}'.format(np.sum(pred == test_label) / box_length))
            MDM_record.append(np.sum(pred == test_label) / box_length)
            print('-----------------------------------------')

            Fgmdm = FgMDM(metric=dict(mean='riemann', distance='riemann'))

            Fgmdm.fit(train, train_label)
            pred = Fgmdm.predict(test)

            print('FGMDM: {:4f}'.format(
                np.sum(pred == test_label) / box_length))
            FGMDM_record.append(np.sum(pred == test_label) / box_length)
            print('-----------------------------------------')

            clf = TSclassifier()
            clf.fit(train, train_label)
            pred = clf.predict(test)

            print('TSC: {:4f}'.format(np.sum(pred == test_label) / box_length))
            TSC_record.append(np.sum(pred == test_label) / box_length)
            print('-----------------------------------------')

            lr = LogisticRegression()
            csp = CSP(n_components=4, reg='ledoit_wolf', log=True)
            clf = Pipeline([('CSP', csp), ('LogisticRegression', lr)])
            clf.fit(train_CSP, train_label)
            pred = clf.predict(test_CSP)

            print('CSP_lr: {:4f}'.format(
                np.sum(pred == test_label) / box_length))