def transform_rct2str(source, target_train, target_test): covs_source = source['covs'] covs_target_train = target_train['covs'] covs_target_test = target_test['covs'] source_pow = {} source_pow['covs'] = source['covs'] source_pow['labels'] = source['labels'] n = covs_source.shape[1] disp_source = np.sum( [distance_riemann(covi, np.eye(n))**2 for covi in covs_source]) / len(covs_source) disp_target = np.sum( [distance_riemann(covi, np.eye(n))**2 for covi in covs_target_train]) / len(covs_target_train) p = np.sqrt(disp_target / disp_source) target_pow_train = {} target_pow_train['covs'] = np.stack( [powm(covi, 1.0 / p) for covi in covs_target_train]) target_pow_train['labels'] = target_train['labels'] target_pow_test = {} target_pow_test['covs'] = np.stack( [powm(covi, 1.0 / p) for covi in covs_target_test]) target_pow_test['labels'] = target_test['labels'] return source_pow, target_pow_train, target_pow_test
def test_sigma_gaussian_spd(): """Test sigma parameter from Riemannian Gaussian sampling.""" n_matrices, n_dim, sig_1, sig_2 = 50, 8, 1., 2. mean = np.eye(n_dim) X1 = sample_gaussian_spd(n_matrices, mean, sig_1, random_state=42) X2 = sample_gaussian_spd(n_matrices, mean, sig_2, random_state=66) avg_d1 = np.mean([distance_riemann(X1_i, mean) for X1_i in X1]) avg_d2 = np.mean([distance_riemann(X2_i, mean) for X2_i in X2]) assert avg_d1 < avg_d2
def cost_distance_to_landmark(W, Xi, Lj): dist = 0 for Xk in Xi: Xk_ = np.dot(W.T, np.dot(Xk, W)) Lj_ = np.dot(W.T, np.dot(Lj, W)) dist = dist + distance_riemann(Xk_, Lj_)**2 return dist
def argmin_distance(self, sample): min_dist = np.inf for i in range(self.nb_classes): dist = distance_riemann(sample, self.cov_centers[i]) if min_dist > dist: min_dist = dist idx = i return idx
def distance_lowrank(A, B, p, alpha=100): RA,UA = eigendecomp_rank(A, p) RB,UB = eigendecomp_rank(B, p) dist_r = distance_riemann(RA, RB) # dist_l = distance_subspace(UA, UB)**2 # k = alpha*dist_l/dist_r # return dist_l + k * dist_r return dist_r
def wrapper_distance_riemann(A, B, args): """ A wrapper for pyriemann.utils.distance.distance_riemann to handle I/O with regards to reducing samples classes. For function doc refer to the doc of pyriemann.utils.mean.distance_riemann. """ return distance_riemann(A, B)
def cost_(W, X, A): Nt, Nc, Nc = X.shape c = 0 for i, Xi in enumerate(X): for j, Xj in enumerate(X[(i + 1):]): if (np.abs(A[i, j]) > 1e-3) and (i != j): Xi_ = np.dot(W.T, np.dot(Xi, W)) Xj_ = np.dot(W.T, np.dot(Xj, W)) c = c + A[i, j] * distance_riemann(Xi_, Xj_)**2 return c
def get_distmatrix(covs): Nt, Nc, Nc = covs.shape D = np.zeros((Nt, Nt)) for i, covi in enumerate(covs): for j, covj in enumerate(covs[(i + 1):]): D[i, j] = distance_riemann(covi, covj) D[j, i] = D[i, j] return D
def distance_spectrum(x, y): _, px = cross_spectrum(x) _, py = cross_spectrum(y) df = 0 for f in range(len(px)): df = df + distance_riemann(px[f], py[f])**2 return df
def mean_riemann(covmats, tol=10e-9, maxiter=50, init=None, u_prime=lambda x: 1): Nt, Ne, Ne = covmats.shape if init is None: C = np.mean(covmats, axis=0) else: C = init k = 0 nu = 1.0 tau = np.finfo(np.float64).max crit = np.finfo(np.float64).max # stop when J<10^-9 or max iteration = 50 while (crit > tol) and (k < maxiter) and (nu > tol): k = k + 1 C12 = sqrtm(C) Cm12 = invsqrtm(C) J = np.zeros((Ne, Ne)) for i in range(Nt): tmp = (Cm12 @ covmats[i, :, :]) @ Cm12 if type(u_prime(1)) == list: J += logm(tmp) * u_prime( distance_riemann(C, covmats[i, :, :])**2)[i] / Nt else: J += logm(tmp) * u_prime( distance_riemann(C, covmats[i, :, :])**2) / Nt crit = np.linalg.norm(J, ord='fro') h = nu * crit C = np.dot(np.dot(C12, expm(nu * J)), C12) if h < tau: nu = 0.95 * nu tau = h else: nu = 0.5 * nu return C
def compute_covariances(self): import numpy as np from pyriemann.utils.mean import mean_riemann from pyriemann.utils.distance import distance_riemann data = self.windows.get_data() if len(data.shape)==3: trialwise_covs_list = list() for i_window in range(data.shape[0]): trial_cov = np.cov(data[i_window]) trialwise_covs_list.append(trial_cov) trialwise_covs = np.array(trialwise_covs_list) else: raise ValueError('Shape of windows.get_data() is not 3D') global_cov = mean_riemann(trialwise_covs, tol=1e-08, maxiter=50, init=None, sample_weight=None) unique_targets = np.unique(self.y) classwise_covs = dict() for target in unique_targets: indices_target = np.where(self.y==target)[0] target_covs_list = list() for index in indices_target: target_covs_list.append(trialwise_covs[index]) target_covs = np.array(target_covs_list) # https://github.com/pyRiemann/pyRiemann/issues/65 # if you can not fix the root cause of the problem, then add regularization : # cov = Covariances('oas').fit_transform(X_train) riemann_mean_cov = mean_riemann(target_covs, tol=1e-08, maxiter=50, init=None, sample_weight=None) classwise_covs[target] = riemann_mean_cov self.trialwise_covs = trialwise_covs self.classwise_covs = classwise_covs self.global_cov = global_cov distances = np.zeros(data.shape[0]) for i_window in range(data.shape[0]): trial_cov = trialwise_covs[i_window] distances[i_window] = distance_riemann(trial_cov, global_cov) # covar_target = classwise_covs[self.y[i_window]] # distances[i_window] = distance_riemann(covar_trial, covar_target) # print('Min: {:.3f} | Max: {:.3f}'.format(np.min(distances), np.max(distances))) alpha = 1.0 beta = 0.5 # self.trialwise_weights = np.ones(data.shape[0]) distances = (distances - np.min(distances)) / (np.max(distances) - np.min(distances) + 0.00001) # beta==0 | range = [0, alpha] # beta!=0 | range = [beta, alpha+beta] self.trialwise_weights = beta + alpha*(1 - distances)
def _predict_distances(self, covtest): """Helper to predict the distance. equivalent to transform.""" dist = [] for covi in covtest: m = {} for p in self.plist: m[p] = [] for label in self.classes_: m[p].append(distance_riemann(covi, self.covmeans_[p][label])**2) pmin = min(m.items(), key=lambda x: np.sum(x[1]))[0] dist.append(np.array(m[pmin])) return np.stack(dist)
def means_field_test(covs, plist, means_train): labs = sorted(means_train[plist[0]].keys()) labs_pred = [] for covi in covs: m = {} for p in plist: m[p] = [] for label in np.unique(labs): m[p].append(distance_riemann(covi, means_train[p][label])**2) pmin = min(m.items(), key=lambda x: np.sum(x[1]))[0] yi = np.unique(labs)[np.argmin(m[pmin])] labs_pred.append(yi) labs_pred = np.array(labs_pred) return labs_pred
def cal_riemann_distance(X, Cmean, rType): # find the distance of the covariances in X with the average covariance Cmean # two options to use: rType == riem or rType == log-eucl num_trials = np.size(X, axis=0) output = np.zeros((num_trials, 1)) if rType == 'riem': for ik in range(num_trials): output[ik] = dst.distance_riemann(X[ik], Cmean) elif rType == 'log-eucl': for ik in range(num_trials): output[ik] = dst.distance_logeuclid(X[ik], Cmean) return output
def _predict_distances(self, covtest): """Helper to predict the distance. equivalent to transform.""" Nc = len(self.covmeans_) dist = np.zeros((covtest.shape[0], Nc)) #shape= (n_trials,n_classes) for j in range(covtest.shape[0]): if self.n_jobs == 1: dist_j = [ distance_riemann(covtest[j, :, :], self.covmeans_[m]) for m in range(Nc) ] else: dist_j = Parallel(n_jobs=self.n_jobs)(delayed( distance_riemann)(covtest[j, :, :], self.covmeans_[m]) for m in range(Nc)) dist_j = np.asarray(dist_j) dist[j, :] = dist_j return dist
def test_distance_riemann(): """Test riemannian distance""" A = 2*np.eye(3) B = 2*np.eye(3) assert_array_almost_equal(distance_riemann(A, B), 0)
def test_distance_riemann(): """Test riemannian distance""" A = 2 * np.eye(3) B = 2 * np.eye(3) assert_array_almost_equal(distance_riemann(A, B), 0)
def test_distance_generic_riemann(): """Test riemannian distance for generic function""" A = 2 * np.eye(3) B = 2 * np.eye(3) assert_equal(distance(A, B, metric='riemann'), distance_riemann(A, B))
def transform_org2opt(source, target_train, target_test): target_opt_train = {} target_opt_test = {} target_opt_train['labels'] = target_train['labels'] target_opt_test['labels'] = target_test['labels'] # get cost matrix Cs = source['covs'] ys = source['labels'] Ct_train = target_train['covs'] Ct_test = target_test['covs'] M = np.zeros((len(Cs), len(Ct_train))) for i, Cs_i in enumerate(Cs): for j, Ct_j in enumerate(Ct_train): M[i, j] = distance_riemann(Cs_i, Ct_j)**2 # get the transportation plan mu_s = distribution_estimation_uniform(Cs) mu_t = distribution_estimation_uniform(Ct_train) gamma = sinkhorn_lpl1_mm(mu_s, ys, mu_t, M, reg=1.0) # transport the target matrices (train) Ct_train_transported = np.zeros(Ct_train.shape) for j in range(len(Ct_train_transported)): Ct_train_transported[j] = mean_riemann(Cs, sample_weight=gamma[:, j]) target_opt_train['covs'] = Ct_train_transported # transport the target matrices (test) D = np.zeros((len(Ct_test), len(Ct_train))) for k, Ct_k in enumerate(Ct_test): for l, Ct_l in enumerate(Ct_train): D[k, l] = distance_riemann(Ct_k, Ct_l)**2 idx = np.argmin(D, axis=1) # nearest neighbour to each target test matrix Ct_test_transported = np.zeros(Ct_test.shape) for i in range(len(Ct_test)): j = idx[i] Ci = Ct_test[i] Ri = Ct_train[j] Rf = Ct_train_transported[j] Ri_sqrt = sqrtm(Ri) Ri_invsqrt = invsqrtm(Ri) Li = logm(np.dot(Ri_invsqrt, np.dot(Ci, Ri_invsqrt))) eta_i = np.dot(Ri_sqrt, np.dot(Li, Ri_sqrt)) Ri_Rf = geodesic_riemann(Rf, Ri, alpha=0.5) Ri_inv = np.linalg.inv(Ri) eta_f = np.dot(Ri_inv, np.dot(eta_i, Ri_inv)) eta_f = np.dot(Ri_Rf, np.dot(eta_f, Ri_Rf)) Rf_sqrt = sqrtm(Rf) Rf_invsqrt = invsqrtm(Rf) Ef = expm(np.dot(Rf_invsqrt, np.dot(eta_f, Rf_invsqrt))) Ct_test_transported[i] = np.dot(Rf_sqrt, np.dot(Ef, Rf_sqrt)) target_opt_test['covs'] = Ct_test_transported return source, target_opt_train, target_opt_test
C_nanriem = nanmean_riemann(covmats) # Riemannian mean, after matrix deletion: average only uncorrupted matrices isnan = np.isnan(np.sum(covmats, axis=(1, 2))) covmats_ = np.delete(covmats, np.where(isnan), axis=0) perc = len(covmats_) / n_matrices * 100 print("Percentage of uncorrupted matrices: {:.2f} %".format(perc)) C_mdriem = mean_riemann(covmats_) ############################################################################### # Compare covariance means # ------------------------ # # Compare distances between the different means and the reference. d_naneucl = distance_riemann(C_ref, C_naneucl) print(f"Euclidean NaN-mean, distance to ref = {d_naneucl:.3f}") d_nanriem = distance_riemann(C_ref, C_nanriem) print(f"Riemannian NaN-mean, distance to ref = {d_nanriem:.3f}") d_mdriem = distance_riemann(C_ref, C_mdriem) print(f"Riemannian mean after deletion, distance to ref = {d_mdriem:.3f}") # Riemannian NaN-mean gives the best result, and Riemannian mean after matrix # deletion is worst than Euclidean NaN-mean. ############################################################################### # Evaluate influence of corrupted channels # ---------------------------------------- #
def cost_function_pair_rie(M, M_tilde, Q): t1 = M t2 = np.dot(Q, np.dot(M_tilde, Q.T)) return distance_riemann(t1, t2)**2
def cost_kj(W, Xk, Lj): Xk_ = np.dot(W.T, np.dot(Xk, W)) Lj_ = np.dot(W.T, np.dot(Lj, W)) return -1 * distance_riemann(Xk_, Lj_)**2 # becomes a maximization
y_test = labels cov_test = cov_ext_trials # y_train = labels[::2] # take even indexes # y_test = labels[1::2] # take odd indexes # cov_train = cov_ext_trials[::2] # cov_test = cov_ext_trials[1::2] cov_centers = empty((len(classes), 24, 24)) for i, l in enumerate(classes): cov_centers[i, :, :] = mean_riemann(cov_train[y_train == l, :, :]) ## Mean calculated with Affine-Invariant Riemannian metric (AIRM) ## implemented in pyRiemann accuracy = list() for sample, labels in zip(cov_test, y_test): dist = [distance_riemann(sample, cov_centers[m]) for m in range(len(classes))] if classes[array(dist).argmin()] == labels: accuracy.append(1) else: accuracy.append(0) test_accuracy = 100.*array(accuracy).sum()/len(y_test) t2 = time() print ('tempo: ' + str(t2-t1)) print ('Evaluation accuracy on test set is %.2f%%' % test_accuracy)
def test_distance_generic_riemann(): """Test riemannian distance for generic function""" A = 2*np.eye(3) B = 2*np.eye(3) assert_equal(distance(A, B, metric='riemann'), distance_riemann(A, B))
s = gen_mvar(coeffs, Ns, Nt, sig=1.0) #%% m = 16 Q = np.random.randn(m, m) Q = Q + Q.T w, v = np.linalg.eig(Q) A = v[:, :n] x = project_sources(s, A) #%% L = 128 st = 1 Cw = [] for w in gen_windows(L, Ns, step=st): xw = x[:, :, w] covs = Covariances().fit_transform(xw) Cw.append(np.mean(covs, axis=0)) #%% from pyriemann.utils.distance import distance_riemann stat = Cw[:250] Cstat = mean_riemann(np.stack(stat)) dist = [] for Cwi in Cw: dist.append(distance_riemann(Cstat, Cwi)) plt.plot(L / 2 + np.arange(0, Ns - L + 1), dist)
def distance(A, B): return distance_riemann(A, B)**2
data[condition]['y'] = y # estimate xDawn covs ncomps = 4 erp = XdawnCovariances(classes=[1], estimator='lwf', nfilter=ncomps, xdawn_estimator='lwf') #erp = ERPCovariances(classes=[1], estimator='lwf', svd=ncomps) split = train_test_split(X, y, train_size=0.50, random_state=42) Xtrain, Xtest, ytrain, ytest = split covs = erp.fit(Xtrain, ytrain).transform(Xtest) Mtarget = mean_riemann(covs[ytest == 1]) Mnontarget = mean_riemann(covs[ytest == 0]) stats[condition]['distance'] = distance_riemann(Mtarget, Mnontarget) stats[condition]['dispersion_target'] = np.sum( [distance_riemann(covi, Mtarget)**2 for covi in covs[ytest == 1]]) / len(covs[ytest == 1]) stats[condition]['dispersion_nontarget'] = np.sum([ distance_riemann(covi, Mnontarget)**2 for covi in covs[ytest == 0] ]) / len(covs[ytest == 0]) print('subject', subject) print(stats) results[subject] = stats # covs = np.concatenate([covs, Mtarget[None,:,:], Mnontarget[None,:,:]]) # ytest = np.concatenate([ytest, [1], [0]]) # data[condition]['ytest'] = ytest