def test_MDM_predict():
    """Test prediction of MDM"""
    covset = generate_cov(100, 3)
    labels = np.array([0, 1]).repeat(50)
    mdm = MDM(metric='riemann')
    mdm.fit(covset, labels)
    mdm.predict(covset)
def test_MDM_predict():
    """Test prediction of MDM"""
    covset = generate_cov(100,3)
    labels = np.array([0,1]).repeat(50)
    mdm = MDM(metric='riemann')
    mdm.fit(covset,labels)
    mdm.predict(covset)
def test_MDM_predict():
    """Test prediction of MDM"""
    covset = generate_cov(100, 3)
    labels = np.array([0, 1]).repeat(50)
    mdm = MDM(metric='riemann')
    mdm.fit(covset, labels)
    mdm.predict(covset)

    # test fit_predict
    mdm = MDM(metric='riemann')
    mdm.fit_predict(covset, labels)

    # test transform
    mdm.transform(covset)

    # predict proba
    mdm.predict_proba(covset)

    # test n_jobs
    mdm = MDM(metric='riemann', n_jobs=2)
    mdm.fit(covset, labels)
    mdm.predict(covset)
Ejemplo n.º 4
0
def erp_cov_vr_pc(X_training, labels_training, X_test, labels_test, class_name,
                  class_info):
    # estimate the extended ERP covariance matrices with Xdawn
    erpc = ERPCovariances(classes=[class_info[class_name]], estimator='lwf')
    erpc.fit(X_training, labels_training)
    covs_training = erpc.transform(X_training)
    covs_test = erpc.transform(X_test)

    # get the AUC for the classification
    clf = MDM()
    clf.fit(covs_training, labels_training)
    labels_pred = clf.predict(covs_test)
    return roc_auc_score(labels_test, labels_pred)
Ejemplo n.º 5
0
def test_MDM_predict():
    """Test prediction of MDM"""
    covset = generate_cov(100, 3)
    labels = np.array([0, 1]).repeat(50)
    mdm = MDM(metric='riemann')
    mdm.fit(covset, labels)
    mdm.predict(covset)

    # test fit_predict
    mdm = MDM(metric='riemann')
    mdm.fit_predict(covset, labels)

    # test transform
    mdm.transform(covset)

    # predict proba
    mdm.predict_proba(covset)

    # test n_jobs
    mdm = MDM(metric='riemann', n_jobs=2)
    mdm.fit(covset, labels)
    mdm.predict(covset)
Ejemplo n.º 6
0
class FgMDM2(BaseEstimator, ClassifierMixin, TransformerMixin):
    def __init__(self, metric='riemann', tsupdate=False, n_jobs=1):
        """Init."""
        self.metric = metric
        self.n_jobs = n_jobs
        self.tsupdate = tsupdate

        if isinstance(metric, str):
            self.metric_mean = metric

        elif isinstance(metric, dict):
            # check keys
            for key in ['mean', 'distance']:
                if key not in metric.keys():
                    raise KeyError('metric must contain "mean" and "distance"')

            self.metric_mean = metric['mean']

        else:
            raise TypeError('metric must be dict or str')

    def fit(self, X, y):
        self.classes_ = unique_labels(y)
        self._mdm = MDM(metric=self.metric, n_jobs=self.n_jobs)
        self._fgda = FGDA(metric=self.metric_mean, tsupdate=self.tsupdate)
        cov = self._fgda.fit_transform(X, y)
        self._mdm.fit(cov, y)
        return self

    def predict(self, X):
        cov = self._fgda.transform(X)
        return self._mdm.predict(cov)

    def predict_proba(self, X):
        cov = self._fgda.transform(X)
        return self._mdm.predict_proba(cov)

    def transform(self, X):
        cov = self._fgda.transform(X)
        return self._mdm.transform(cov)
class wrapper_MDM(machine_learning_method):
    """wrapper for pyriemann MDM"""
    def __init__(self, method_name, method_args):
        super(wrapper_MDM, self).__init__(method_name, method_args)
        self.init_method()

    def init_method(self, n_jobs=1):
        self.classifier = MDM(metric=self.method_args['metric'], n_jobs=n_jobs)

    def set_parallel(self, is_parallel=False, n_jobs=8):
        logging.warning(
            'The call to this set_parallel method is reseting the class, and must be fitted again'
        )
        self.parallel = is_parallel
        self.n_jobs = n_jobs

        if self.parallel:
            self.init_method(n_jobs)

    def fit(self, X, y):
        return self.classifier.fit(X, y)

    def predict(self, X):
        return self.classifier.predict(X)
Ejemplo n.º 8
0
                        tmax=5,
                        baseline=None)
offline_epochs_data = offline_epochs.get_data()

# Creating ML model
offline_cov_matrix = Covariances(
    estimator='lwf').transform(offline_epochs_data)
mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
mdm.fit(offline_cov_matrix, labels)

# Evoking trials to simulate online input
iter_evoked = epochs.iter_evoked()
epochs_data = offline_epochs_data
time_array = []

pre_predict = mdm.predict(offline_cov_matrix)
print("Labels: ")
print(labels)

for i, evoked in enumerate(iter_evoked):

    evoked_raw = createRaw(evoked.data, raw, filtered=False)

    ## Start Time Counting
    time_1 = time.time()

    ## Filtering
    evoked_filtered_signal = _bandpass_filter(evoked_raw, frequencies,
                                              frequency_range)
    evoked_filtered_signal = np.array(evoked_filtered_signal)
    evoked_filtered_signal = np.expand_dims(evoked_filtered_signal, axis=0)
Ejemplo n.º 9
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    if subject in EXCLUDE_SUBJECTS:
        return

    tmin, tmax = -1., 4.
    weights = np.arange(0.1, 1.0, 0.1)

    for weight in weights:
        first_sub = 2 if subject == 1 else 1
        raw = get_raw(subject, runs)
        scores = []
        for i in range(first_sub, TRANS_SUBJECT_COUNT):
            print(i)
            if i == subject or (i in EXCLUDE_SUBJECTS):
                continue
            raw.append(get_raw(i, runs))

            events = find_events(raw, shortest_event=0, stim_channel='STI 014')
            epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True,
                            baseline=None, preload=True, verbose=False)
            labels = epochs.events[:, -1]
            epochs_data_train = 1e6*epochs.get_data()[:, :-1]
            cov_data_train = Covariances().transform(epochs_data_train)

            target_sample_weight_base = np.ones(EPOCH_COUNT)*weight
            others_sample_weight_base = np.ones(
                len(epochs)-EPOCH_COUNT)*(1.-weight)
            sample_weight = np.hstack(
                (target_sample_weight_base, others_sample_weight_base))

            others_size = others_sample_weight_base.size
            others_index = np.arange(others_size)

            mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
            cv = KFold(n_splits=5, shuffle=True, random_state=42)

            train_scores = []
            test_scores = []
            dumy_array = np.ones(EPOCH_COUNT)

            for train_index, test_index in cv.split(dumy_array):
                train_index = np.hstack(
                    (others_index, train_index+others_size))
                x = cov_data_train[train_index]
                y = labels[train_index]
                mdm.fit(x, y, sample_weight=sample_weight[train_index])

                score = (mdm.predict(x) == y).sum()/len(train_index)
                train_scores.append(score)

                test_index = test_index + others_size
                y = mdm.predict(cov_data_train[test_index])
                score = (y == labels[test_index]).sum()/len(test_index)
                test_scores.append(score)

            train_score = np.mean(train_scores)
            test_score = np.mean(test_scores)
            scores.append([subject, i, train_score, test_score])
        df = pd.DataFrame(
            scores, columns=["subject", "transfer_count", "train_score", "test_score"])
        df.to_excel("data/riemann/gradually/test_subject_%d_weight_%e.xlsx" %
                    (subject, weight), index=False)
Ejemplo n.º 10
0
    epochs_data = np.concatenate((epochs_data, filtered_signal), axis=0)

    ## No Filtering
    # raw_evoked_signal = evoked.data
    # raw_evoked_signal = np.array(raw_evoked_signal)
    # raw_evoked_signal = np.expand_dims(raw_evoked_signal, axis=0)

    cov_ext_trials = Covariances(estimator='lwf').transform(epochs_data)

    labels = np.append(labels, labels[count])

    if (count % 4 == 0 and count != 0 and retrain == True):
        mdm.fit(cov_ext_trials, labels)
        print("retrained")

    prediction_labeled = mdm.predict(cov_ext_trials)

    # Finish Time Counter
    time_2 = time.time()

    time_array.append(time_2 - time_1)
    count += 1
    print("Predictions: ")
    print(prediction_labeled[:32])
    print(prediction_labeled[32:])
    # print ("Label: " + str(labels[i]))
    print("Time: " + str(time_2 - time_1) + '\n')

print("Predictions: ")
print(prediction_labeled[:32])
print(prediction_labeled[32:])
Ejemplo n.º 11
0
    for train_index, test_index in kf.split(X_train):

        logging.info(f'Doing fold {i}')
        clf_knn = KNearestNeighbor(n_neighbors, metric, n_jobs)
        clf_mdm = MDM(metric, n_jobs)
        X_train_fold, X_test_fold = X_train[train_index], X_train[test_index]
        y_train_fold, y_test_fold = y_train[train_index], y_train[test_index]

        clf_knn.fit(X_train_fold, y_train_fold)
        y_predicted = clf_knn.predict(X_test_fold)
        accuracy = (y_test_fold == y_predicted).sum() / len(y_test_fold)
        clf_knn_k_fold.append(clf_knn)
        accuracy_list_training_knn.append(accuracy)

        clf_mdm.fit(X_train_fold, y_train_fold)
        y_predicted = clf_mdm.predict(X_test_fold)
        accuracy = (y_test_fold == y_predicted).sum() / len(y_test_fold)
        clf_mdm_k_fold.append(clf_mdm)
        accuracy_list_training_mdm.append(accuracy)

        i += 1

    # Testing on test dataset
    logging.info('Doing testing')
    accuracy_list_testing_knn = []
    accuracy_list_testing_mdm = []
    X_test, y_test = shuffle(X_test, y_test, random_state=args.seed)

    for clf_knn in clf_knn_k_fold:
        y_predicted = clf_knn.predict(X_test)
        accuracy = (y_test == y_predicted).sum() / len(y_test)
Ejemplo n.º 12
0
def get_score(subject=7, runs=[6, 10, 14], event_id=dict(hands=2, feet=3)):
    tmin, tmax = -1., 4.

    # learn all suject exclude target subject. #############################
    first_sub = 2 if subject == 1 else 1
    raw = get_raw(first_sub, runs)
    for i in range(first_sub + 1, 3):
        if i != subject and not (i in [88, 89, 92, 100]):
            # print(i)
            raw.append(get_raw(i, runs))
    raw.append(get_raw(subject, runs))

    events = find_events(raw, shortest_event=0, stim_channel='STI 014')
    epochs = Epochs(raw,
                    events,
                    event_id,
                    tmin,
                    tmax,
                    proj=True,
                    baseline=None,
                    preload=True,
                    verbose=False)

    labels = epochs.events[:, -1]
    epochs_data_train = 1e6 * epochs.get_data()[:, :-1]
    cov_data_train = Covariances().transform(epochs_data_train)

    weights = np.arange(0.1, 1.0, 0.1)
    scores = []
    for weight in weights:
        mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
        others_sample_weight_base = np.ones(len(epochs) -
                                            EPOCH_COUNT) * (1. - weight)
        target_sample_weight_base = np.ones(EPOCH_COUNT) * weight
        sample_weight = np.hstack(
            (others_sample_weight_base, target_sample_weight_base))

        others_size = others_sample_weight_base.size
        others_index = np.arange(others_size)

        cv = KFold(n_splits=5, shuffle=True, random_state=42)
        train_scores = []
        test_scores = []
        dumy_array = np.ones(EPOCH_COUNT)
        for train_index, test_index in cv.split(dumy_array):
            train_index = np.hstack((others_index, train_index + others_size))
            x = cov_data_train[train_index]
            y = labels[train_index]
            mdm.fit(x, y, sample_weight=sample_weight[train_index])
            score = (mdm.predict(x) == y).sum() / len(train_index)
            train_scores.append(score)

            test_index = test_index + others_size
            y = mdm.predict(cov_data_train[test_index])
            score = (y == labels[test_index]).sum() / len(test_index)
            test_scores.append(score)

        train_score = np.mean(train_scores)
        test_score = np.mean(test_scores)
        scores.append([subject, weight, train_score, test_score])
        # print("train:%s test:%s" % (train_score, test_score))
    return scores
			blocks = np.arange(1, 12+1)
			for train_idx, test_idx in kf.split(np.arange(12)):

				# split in training and testing blocks
				X_training, labels_training, _ = get_block_repetition(X, labels, meta, blocks[train_idx], repetitions)
				X_test, labels_test, _ = get_block_repetition(X, labels, meta, blocks[test_idx], repetitions)

				# estimate the extended ERP covariance matrices with Xdawn
				dict_labels = {'Target':1, 'NonTarget':0}
				erpc = ERPCovariances(classes=[dict_labels['Target']], estimator='lwf')
				erpc.fit(X_training, labels_training)
				covs_training = erpc.transform(X_training)
				covs_test = erpc.transform(X_test)

				# get the AUC for the classification
				clf = MDM()
				clf.fit(covs_training, labels_training)
				labels_pred = clf.predict(covs_test)
				auc.append(roc_auc_score(labels_test, labels_pred))

			# stock scores
			scores_subject.append(np.mean(auc))

		scores.append(scores_subject)

	# print results
	df[tmax] = pd.DataFrame(scores, columns=['subject', 'VR', 'PC'])

filename = './results.pkl'
joblib.dump(df, filename)
Ejemplo n.º 14
0
def score_ensemble_rot(settings, subject_target, ntop):

    dataset = settings['dataset']
    paradigm = settings['paradigm']
    session = settings['session']
    storage = settings['storage']
    filepath = '../results/' + dataset + '/TL_intra-subject_scores.pkl'
    acc_intra_dict = joblib.load(filepath)

    scores = []
    subject_sources = []
    for subject in settings['subject_list']:
        if subject == subject_target:
            continue
        else:
            scores.append(acc_intra_dict[subject])
            subject_sources.append(subject)
    scores = np.array(scores)

    subject_sources = np.array(subject_sources)
    idx_sort = scores.argsort()[::-1]
    scores = scores[idx_sort]
    subject_sources = subject_sources[idx_sort]
    subject_sources_ntop = subject_sources[:ntop]

    # get the geometric means for each subject (each class and also the center)
    filename = '../results/' + dataset + '/subject_means.pkl'
    subj_means = joblib.load(filename)

    # get the data for the target subject
    target_org = GD.get_dataset(dataset, subject_target, session, storage)
    if paradigm == 'MI':
        # things here are only implemented for MI for now
        target_org['covs'] = Covariances(estimator='oas').fit_transform(
            target_org['signals'])
        target_org['labels'] = target_org['labels']

    ncovs = settings['ncovs_list'][0]
    nrzt = 10
    score_rzt = 0.0
    for rzt in range(nrzt):

        # split randomly the target dataset
        target_org_train, target_org_test = get_target_split_motorimagery(
            target_org, ncovs)

        covs_train_target = target_org_train['covs']
        labs_train_target = target_org_train['labels']

        MC_target = mean_riemann(covs_train_target)
        M1_target = mean_riemann(
            covs_train_target[labs_train_target == 'left_hand'])
        M2_target = mean_riemann(
            covs_train_target[labs_train_target == 'right_hand'])
        M1_target_rct = np.dot(invsqrtm(MC_target),
                               np.dot(M1_target, invsqrtm(MC_target)))
        M2_target_rct = np.dot(invsqrtm(MC_target),
                               np.dot(M2_target, invsqrtm(MC_target)))
        covs_train_target = np.stack([M1_target_rct, M2_target_rct])
        labs_train_target = np.array(['left_hand', 'right_hand'])

        clf = []
        for subj_source in subject_sources_ntop:

            MC_source = subj_means[subj_source]['center']
            M1_source = subj_means[subj_source]['left_hand']
            M2_source = subj_means[subj_source]['right_hand']
            M1_source_rct = np.dot(invsqrtm(MC_source),
                                   np.dot(M1_source, invsqrtm(MC_source)))
            M2_source_rct = np.dot(invsqrtm(MC_source),
                                   np.dot(M2_source, invsqrtm(MC_source)))

            M = [M1_target_rct, M2_target_rct]
            Mtilde = [M1_source_rct, M2_source_rct]
            R = manifoptim.get_rotation_matrix(M, Mtilde)
            M1_source_rot = np.dot(R, np.dot(M1_source_rct, R.T))
            M2_source_rot = np.dot(R, np.dot(M2_source_rct, R.T))

            covs_train_source = np.stack([M1_source_rot, M2_source_rot])
            labs_train_source = np.array(['left_hand', 'right_hand'])

            covs_train = np.concatenate([covs_train_source, covs_train_target])
            labs_train = np.concatenate([labs_train_source, labs_train_target])
            clfi = MDM()

            # problems here when using integer instead of floats on the sample_weight
            clfi.fit(covs_train,
                     labs_train,
                     sample_weight=np.array(
                         [200.0, 200.0, 2.0 * ncovs, 2.0 * ncovs]))
            clf.append(clfi)

        covs_test = target_org_test['covs']
        labs_test = target_org_test['labels']

        ypred = []
        for clfi in clf:
            yi = clfi.predict(covs_test)
            ypred.append(yi)
        ypred = np.array(ypred)

        majorvoting = []
        for j in range(ypred.shape[1]):
            ypredj = ypred[:, j]
            values_unique, values_count = np.unique(ypredj, return_counts=True)
            majorvoting.append(values_unique[np.argmax(values_count)])
        majorvoting = np.array(majorvoting)

        score_rzt = score_rzt + np.mean(majorvoting == labs_test)

    score = score_rzt / nrzt

    return score
        for fold in range(1, 6):
            train = cov_data_bad[index[bad_subject_index] != fold]
            train_CSP = epochs_data_train_bad[index[bad_subject_index] != fold]
            train_label = labels_bad[index[bad_subject_index] != fold]

            test = cov_data_bad[index[bad_subject_index] == fold]
            test_CSP = epochs_data_train_bad[index[bad_subject_index] == fold]
            test_label = labels_bad[index[bad_subject_index] == fold]

            box_length = np.sum([index[bad_subject_index] == fold])

            mdm = MDM(metric=dict(mean='riemann', distance='riemann'))

            mdm.fit(train, train_label)
            pred = mdm.predict(test)

            print('MDM: {:4f}'.format(np.sum(pred == test_label) / box_length))
            MDM_record.append(np.sum(pred == test_label) / box_length)
            print('-----------------------------------------')

            Fgmdm = FgMDM(metric=dict(mean='riemann', distance='riemann'))

            Fgmdm.fit(train, train_label)
            pred = Fgmdm.predict(test)

            print('FGMDM: {:4f}'.format(
                np.sum(pred == test_label) / box_length))
            FGMDM_record.append(np.sum(pred == test_label) / box_length)
            print('-----------------------------------------')