コード例 #1
0
ファイル: cross_val.py プロジェクト: chromee/mne-example
def get_cross_val_score(subject,
                        runs=[6, 10, 14],
                        event_id=dict(rest=1, hands=2, feet=3),
                        n_components=4):
    tmin, tmax = -1., 4.
    raw = get_raw(subject, runs)
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    picks = pick_types(raw.info,
                       meg=False,
                       eeg=True,
                       stim=False,
                       eog=False,
                       exclude='bads')

    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=None,
                    preload=True)
    epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
    labels = epochs.events[:, -1] - 1

    epochs_data_train = epochs_train.get_data()

    cv = KFold(n_splits=5)

    svc = svm.SVC(kernel="linear")
    csp = mne.decoding.CSP(n_components=n_components,
                           reg=None,
                           log=True,
                           norm_trace=False)

    scores = []
    self_scores = []
    for train, test in cv.split(epochs_data_train):
        # fit
        x = epochs_data_train[train]
        y = labels[train]
        x = csp.fit_transform(x, y)
        svc.fit(x, y)
        self_scores.append(svc.score(x, y))

        # estimate
        x_test = epochs_data_train[test]
        y_test = labels[test]
        x_test = csp.transform(x_test)
        score = svc.score(x_test, y_test)
        scores.append(score)
    return np.mean(self_scores), np.mean(scores)
コード例 #2
0
def grid_search(subject=1):
    runs = [6, 10, 14]
    event_id = dict(rest=1, hands=2, feet=3)
    tmin, tmax = -1., 4.

    raw = get_raw(subject, runs)
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    picks = pick_types(raw.info,
                       meg=False,
                       eeg=True,
                       stim=False,
                       eog=False,
                       exclude='bads')

    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=None,
                    preload=True)
    epochs_data_train = epochs.copy().crop(tmin=1., tmax=2.).get_data()
    labels = epochs.events[:, -1]

    csp = mne.decoding.CSP(reg=None,
                           norm_trace=False,
                           transform_into="csp_space")
    fft = FFT()
    svm = SVC()
    pl = Pipeline([("csp", csp), ("fft", fft), ("svm", svm)])

    params = {
        "csp__n_components": np.arange(4, 10),
        "fft__R": np.arange(40, 110, 20),
        "svm__C": np.arange(1, 101, 25),
        "svm__gamma": np.logspace(-2, 2, 4)
    }

    clf = GridSearchCV(pl, params, n_jobs=3, cv=5, return_train_score=True)
    clf.fit(epochs_data_train, labels)
    df = pd.DataFrame(clf.cv_results_)
    df.to_excel("data/grid_fft/grid_fft_%s_1.xlsx" % subject,
                index=False,
                header=True)
    print("%s end" % subject)
コード例 #3
0
def get_score(subject, event):
    if event == "left_vs_right":
        runs = [4, 8, 12]
    else:
        runs = [6, 10, 14]

    raw = get_raw(subject, runs=runs)
    data = raw.get_data()

    with open(
            "./data/models/three/" + event + "/csp/csp_subject" +
            str(subject) + ".pickle", 'rb') as pickle_file:
        csp = pickle.load(pickle_file)
    with open(
            "./data/models/three/" + event + "/svm/svm_subject" +
            str(subject) + ".pickle", 'rb') as pickle_file:
        svm = pickle.load(pickle_file)

    w_length = 160  # 学習epochのlength
    w_step = 80

    current_step = 0
    count = 0
    score = 0

    for i in range(len(data[0])):
        current_step += 1
        if i > w_length and current_step > w_step:
            window = np.array([data[0:16, i:i + w_length]])
            X_test = csp.transform(window)
            label = svm.predict(X_test)

            current_step = 0
            count += 1

            window_labels = np.array(data[16][i:i + w_length], dtype=np.int64)
            label_count = np.bincount(window_labels)
            argmax_label = np.argmax(label_count)

            # print(i, label, argmax_label)

            if label == argmax_label:
                score += 1
        # sleep(interval)
    # print(i, score/count)
    return score / count
コード例 #4
0
ファイル: grid_search.py プロジェクト: chromee/mne-example
def grid_search(subject=1):
    runs = [6, 10, 14]
    event_id = dict(rest=1, hands=2, feet=3)
    tmin, tmax = -1., 4.

    raw = get_raw(subject, runs)
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    picks = pick_types(raw.info,
                       meg=False,
                       eeg=True,
                       stim=False,
                       eog=False,
                       exclude='bads')

    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    picks=picks,
                    baseline=None,
                    preload=True)
    epochs_data_train = epochs.copy().crop(tmin=1., tmax=2.).get_data()
    labels = epochs.events[:, -1]

    csp = mne.decoding.CSP(reg=None, log=True, norm_trace=False)
    svm = SVC()
    pl = Pipeline([("csp", csp), ("svm", svm)])

    params = {
        "csp__n_components": np.arange(4, 10),
        "svm__C": np.arange(1000, 10000, 1000),
        "svm__gamma": np.logspace(-4, 0, 5)
    }

    def main1():
        clf = GridSearchCV(pl, params, n_jobs=-1, cv=10)
        clf.fit(epochs_data_train, labels)
        df = pd.DataFrame(clf.cv_results_)
        df.to_excel("data/grid/grid_default_%s.xlsx" % subject,
                    index=False,
                    header=True)
        print("%s end" % subject)

    print(timeit.timeit(main1, number=1))
コード例 #5
0
def get_score(subject, event):
    if event == "left_vs_right":
        runs = [4, 8, 12]
        event_id = dict(rest=1, left=2, right=3)
    else:
        runs = [6, 10, 14]
        event_id = dict(rest=1, hands=2, feet=3)
    raw = get_raw(subject, runs=runs)
    data = raw.get_data()

    mdm = create_mdm(raw, event_id)

    w_length = 160  # 学習epochのlength
    w_step = 80

    current_step = 0
    count = 0
    score = 0

    print("start")
    for i in range(len(data[0])):
        current_step += 1
        if i > w_length and current_step > w_step:
            window = np.array([data[0:16, i:i+w_length]])
            X_test = Covariances().transform(window)
            # print(X_test.shape)
            label = mdm.predict(X_test)

            current_step = 0
            count += 1

            window_labels = np.array(data[16][i:i+w_length], dtype=np.int64)
            label_count = np.bincount(window_labels)
            argmax_label = np.argmax(label_count)

            print(i, label, argmax_label)

            if label == argmax_label:
                score += 1
        # sleep(1./160)
    print(subject, score/count)
    print("end")
    return score/count
コード例 #6
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    tmin, tmax = -1., 4.

    raw = get_raw(subject, runs)
    events = find_events(raw, shortest_event=0, stim_channel='STI 014')

    epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                    baseline=None, preload=True, verbose=False)
    labels = epochs.events[:, -1]

    # cv = KFold(len(labels), 10, shuffle=True, random_state=42)
    epochs_data_train = epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)

    ###############################################################################
    # Classification with Minimum distance to mean
    mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
    pl = Pipeline([("mdm", mdm)])
    params = {"mdm__metric": [dict(mean='riemann', distance='riemann')]}
    clf = GridSearchCV(pl, params, n_jobs=-1, cv=5, return_train_score=True)
    clf.fit(cov_data_train, labels)
    df = pd.DataFrame(clf.cv_results_)
    return df
コード例 #7
0
ファイル: gradually_tl.py プロジェクト: chromee/mne-example
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    if subject in EXCLUDE_SUBJECTS:
        return

    tmin, tmax = -1., 4.
    weights = np.arange(0.1, 1.0, 0.1)

    for weight in weights:
        first_sub = 2 if subject == 1 else 1
        raw = get_raw(subject, runs)
        scores = []
        for i in range(first_sub, TRANS_SUBJECT_COUNT):
            print(i)
            if i == subject or (i in EXCLUDE_SUBJECTS):
                continue
            raw.append(get_raw(i, runs))

            events = find_events(raw, shortest_event=0, stim_channel='STI 014')
            epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                            baseline=None, preload=True, verbose=False)
            labels = epochs.events[:, -1]
            epochs_data_train = 1e6*epochs.get_data()[:, :-1]
            cov_data_train = Covariances().transform(epochs_data_train)

            target_sample_weight_base = np.ones(EPOCH_COUNT)*weight
            others_sample_weight_base = np.ones(
                len(epochs)-EPOCH_COUNT)*(1.-weight)
            sample_weight = np.hstack(
                (target_sample_weight_base, others_sample_weight_base))

            others_size = others_sample_weight_base.size
            others_index = np.arange(others_size)

            mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
            cv = KFold(n_splits=5, shuffle=True, random_state=42)

            train_scores = []
            test_scores = []
            dumy_array = np.ones(EPOCH_COUNT)

            for train_index, test_index in cv.split(dumy_array):
                train_index = np.hstack(
                    (others_index, train_index+others_size))
                x = cov_data_train[train_index]
                y = labels[train_index]
                mdm.fit(x, y, sample_weight=sample_weight[train_index])

                score = (mdm.predict(x) == y).sum()/len(train_index)
                train_scores.append(score)

                test_index = test_index + others_size
                y = mdm.predict(cov_data_train[test_index])
                score = (y == labels[test_index]).sum()/len(test_index)
                test_scores.append(score)

            train_score = np.mean(train_scores)
            test_score = np.mean(test_scores)
            scores.append([subject, i, train_score, test_score])
        df = pd.DataFrame(
            scores, columns=["subject", "transfer_count", "train_score", "test_score"])
        df.to_excel("data/riemann/gradually/test_subject_%d_weight_%e.xlsx" %
                    (subject, weight), index=False)
コード例 #8
0
ファイル: realtime_wave.py プロジェクト: chromee/mne-example
# EEGの時系列データをリアルタイムにとってるっぽくmatplotlibで表示するプログラム

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from time import sleep
from mne_wrapper import get_raw

subject = 1
raw = get_raw(subject)
data = raw.get_data()

sfreq = 40  # 本当は160Hzだけどなぜか時間が合うのは40
interval = 1. / sfreq
time_range = 10

ch = 1
fig, ax = plt.subplots(1, 1)
x = np.arange(0, time_range, interval)
y = data[ch][0:time_range * sfreq]
lines, = ax.plot(x, y)

for i in range(len(data[ch])):
    x += interval
    y = data[ch][i:i + time_range * sfreq]
    lines.set_data(x, y)
    ax.set_xlim((x.min(), x.max()))
    ax.set_ylim((y.min(), y.max()))
    plt.pause(interval)
コード例 #9
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    tmin, tmax = -1., 4.

    # learn all suject exclude target subject. #############################
    first_sub = 2 if subject == 1 else 1
    raw = get_raw(first_sub, runs)
    for i in range(first_sub + 1, 3):
        if i != subject and not (i in [88, 89, 92, 100]):
            # print(i)
            raw.append(get_raw(i, runs))
    raw.append(get_raw(subject, runs))

    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    baseline=None,
                    preload=True,
                    verbose=False)

    labels = epochs.events[:, -1]
    epochs_data_train = 1e6 * epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)

    weights = np.arange(0.1, 1.0, 0.1)
    scores = []
    for weight in weights:
        mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
        others_sample_weight_base = np.ones(len(epochs) -
                                            EPOCH_COUNT) * (1. - weight)
        target_sample_weight_base = np.ones(EPOCH_COUNT) * weight
        sample_weight = np.hstack(
            (others_sample_weight_base, target_sample_weight_base))

        others_size = others_sample_weight_base.size
        others_index = np.arange(others_size)

        cv = KFold(n_splits=5, shuffle=True, random_state=42)
        train_scores = []
        test_scores = []
        dumy_array = np.ones(EPOCH_COUNT)
        for train_index, test_index in cv.split(dumy_array):
            train_index = np.hstack((others_index, train_index + others_size))
            x = cov_data_train[train_index]
            y = labels[train_index]
            mdm.fit(x, y, sample_weight=sample_weight[train_index])
            score = (mdm.predict(x) == y).sum() / len(train_index)
            train_scores.append(score)

            test_index = test_index + others_size
            y = mdm.predict(cov_data_train[test_index])
            score = (y == labels[test_index]).sum() / len(test_index)
            test_scores.append(score)

        train_score = np.mean(train_scores)
        test_score = np.mean(test_scores)
        scores.append([subject, weight, train_score, test_score])
        # print("train:%s test:%s" % (train_score, test_score))
    return scores