def score_ensemble_rot(settings, subject_target, ntop): dataset = settings['dataset'] paradigm = settings['paradigm'] session = settings['session'] storage = settings['storage'] filepath = '../results/' + dataset + '/TL_intra-subject_scores.pkl' acc_intra_dict = joblib.load(filepath) scores = [] subject_sources = [] for subject in settings['subject_list']: if subject == subject_target: continue else: scores.append(acc_intra_dict[subject]) subject_sources.append(subject) scores = np.array(scores) subject_sources = np.array(subject_sources) idx_sort = scores.argsort()[::-1] scores = scores[idx_sort] subject_sources = subject_sources[idx_sort] subject_sources_ntop = subject_sources[:ntop] # get the geometric means for each subject (each class and also the center) filename = '../results/' + dataset + '/subject_means.pkl' subj_means = joblib.load(filename) # get the data for the target subject target_org = GD.get_dataset(dataset, subject_target, session, storage) if paradigm == 'MI': # things here are only implemented for MI for now target_org['covs'] = Covariances(estimator='oas').fit_transform( target_org['signals']) target_org['labels'] = target_org['labels'] ncovs = settings['ncovs_list'][0] nrzt = 10 score_rzt = 0.0 for rzt in range(nrzt): # split randomly the target dataset target_org_train, target_org_test = get_target_split_motorimagery( target_org, ncovs) covs_train_target = target_org_train['covs'] labs_train_target = target_org_train['labels'] MC_target = mean_riemann(covs_train_target) M1_target = mean_riemann( covs_train_target[labs_train_target == 'left_hand']) M2_target = mean_riemann( covs_train_target[labs_train_target == 'right_hand']) M1_target_rct = np.dot(invsqrtm(MC_target), np.dot(M1_target, invsqrtm(MC_target))) M2_target_rct = np.dot(invsqrtm(MC_target), np.dot(M2_target, invsqrtm(MC_target))) covs_train_target = np.stack([M1_target_rct, M2_target_rct]) labs_train_target = np.array(['left_hand', 'right_hand']) clf = [] for subj_source in subject_sources_ntop: MC_source = subj_means[subj_source]['center'] M1_source = subj_means[subj_source]['left_hand'] M2_source = subj_means[subj_source]['right_hand'] M1_source_rct = np.dot(invsqrtm(MC_source), np.dot(M1_source, invsqrtm(MC_source))) M2_source_rct = np.dot(invsqrtm(MC_source), np.dot(M2_source, invsqrtm(MC_source))) M = [M1_target_rct, M2_target_rct] Mtilde = [M1_source_rct, M2_source_rct] R = manifoptim.get_rotation_matrix(M, Mtilde) M1_source_rot = np.dot(R, np.dot(M1_source_rct, R.T)) M2_source_rot = np.dot(R, np.dot(M2_source_rct, R.T)) covs_train_source = np.stack([M1_source_rot, M2_source_rot]) labs_train_source = np.array(['left_hand', 'right_hand']) covs_train = np.concatenate([covs_train_source, covs_train_target]) labs_train = np.concatenate([labs_train_source, labs_train_target]) clfi = MDM() # problems here when using integer instead of floats on the sample_weight clfi.fit(covs_train, labs_train, sample_weight=np.array( [200.0, 200.0, 2.0 * ncovs, 2.0 * ncovs])) clf.append(clfi) covs_test = target_org_test['covs'] labs_test = target_org_test['labels'] ypred = [] for clfi in clf: yi = clfi.predict(covs_test) ypred.append(yi) ypred = np.array(ypred) majorvoting = [] for j in range(ypred.shape[1]): ypredj = ypred[:, j] values_unique, values_count = np.unique(ypredj, return_counts=True) majorvoting.append(values_unique[np.argmax(values_count)]) majorvoting = np.array(majorvoting) score_rzt = score_rzt + np.mean(majorvoting == labs_test) score = score_rzt / nrzt return score
def score_pooling_rct(settings, subject_target, ntop): dataset = settings['dataset'] paradigm = settings['paradigm'] session = settings['session'] storage = settings['storage'] filepath = '../results/' + dataset + '/TL_intra-subject_scores.pkl' acc_intra_dict = joblib.load(filepath) scores = [] subject_sources = [] for subject in settings['subject_list']: if subject == subject_target: continue else: scores.append(acc_intra_dict[subject]) subject_sources.append(subject) scores = np.array(scores) subject_sources = np.array(subject_sources) idx_sort = scores.argsort()[::-1] scores = scores[idx_sort] subject_sources = subject_sources[idx_sort] subject_sources_ntop = subject_sources[:ntop] # get the geometric means for each subject (each class and also the center) filename = '../results/' + dataset + '/subject_means.pkl' subj_means = joblib.load(filename) # get the data for the target subject target_org = GD.get_dataset(dataset, subject_target, session, storage) if paradigm == 'MI': # things here are only implemented for MI for now target_org['covs'] = Covariances(estimator='oas').fit_transform( target_org['signals']) target_org['labels'] = target_org['labels'] ncovs = settings['ncovs_list'][0] score_rzt = 0.0 nrzt = 10 for rzt in range(nrzt): # split randomly the target dataset target_org_train, target_org_test = get_target_split_motorimagery( target_org, ncovs) # get the data from the sources and pool it all together class_mean_1 = [] class_mean_2 = [] for subj_source in subject_sources_ntop: MC_source = subj_means[subj_source]['center'] M1_source = subj_means[subj_source]['left_hand'] M2_source = subj_means[subj_source]['right_hand'] M1_source_rct = np.dot(invsqrtm(MC_source), np.dot(M1_source, invsqrtm(MC_source))) class_mean_1.append(M1_source_rct) M2_source_rct = np.dot(invsqrtm(MC_source), np.dot(M2_source, invsqrtm(MC_source))) class_mean_2.append(M2_source_rct) class_mean_1_source = np.stack(class_mean_1) class_mean_2_source = np.stack(class_mean_2) covs_train_source = np.concatenate( [class_mean_1_source, class_mean_2_source]) labs_train_source = np.concatenate([ len(class_mean_1_source) * ['left_hand'], len(class_mean_2_source) * ['right_hand'] ]) # re-center data for the target covs_train_target = target_org['covs'] MC_target = mean_riemann(covs_train_target) labs_train_target = target_org['labels'] class_mean_1_target = mean_riemann( covs_train_target[labs_train_target == 'left_hand']) class_mean_1_target = np.dot( invsqrtm(MC_target), np.dot(class_mean_1_target, invsqrtm(MC_target))) class_mean_2_target = mean_riemann( covs_train_target[labs_train_target == 'right_hand']) class_mean_2_target = np.dot( invsqrtm(MC_target), np.dot(class_mean_2_target, invsqrtm(MC_target))) covs_train_target = np.stack( [class_mean_1_target, class_mean_2_target]) labs_train_target = np.array(['left_hand', 'right_hand']) covs_train = np.concatenate([covs_train_source, covs_train_target]) labs_train = np.concatenate([labs_train_source, labs_train_target]) covs_test = target_org_test['covs'] labs_test = target_org_test['labels'] # do the classification clf = MDM() clf.fit(covs_train, labs_train) score_rzt = score_rzt + clf.score(covs_test, labs_test) score = score_rzt / nrzt return score