Beispiel #1
0
def transform_rct2rot_p300(source, target_train, target_test, class_weights, distance='euc'):

    source_rot = {}
    source_rot['covs'] = source['covs']
    source_rot['labels'] = source['labels']

    target_rot_train = {}
    target_rot_train['labels'] = target_train['labels']

    target_rot_test = {}
    target_rot_test['labels'] = target_test['labels']

    M_source = []
    for i in np.unique(source['labels']):
        M_source_i = mean_riemann(source['covs'][source['labels'] == i])
        M_source.append(M_source_i)

    M_target_train = []
    for j in np.unique(target_train['labels']):
        M_target_train_j = mean_riemann(target_train['covs'][target_train['labels'] == j])
        M_target_train.append(M_target_train_j)

    #R = manifoptim.get_rotation_matrix(M=M_source, Mtilde=M_target_train, dist='euc', weights=[1, 5])
    R = manifoptim.get_rotation_matrix(M=M_source, Mtilde=M_target_train, dist=distance, weights=class_weights)    

    covs_target_train = np.stack([np.dot(R, np.dot(covi, R.T)) for covi in target_train['covs']])
    target_rot_train['covs'] = covs_target_train

    covs_target_test = np.stack([np.dot(R, np.dot(covi, R.T)) for covi in target_test['covs']])
    target_rot_test['covs'] = covs_target_test

    return source_rot, target_rot_train, target_rot_test
Beispiel #2
0
def transform_org2rct(source_org, target_org_train, target_org_test):
    '''
        transform from original matrices to the parallel transported ones
        in this case, we're transporting reference (resting state) to the Identity
        org (original) -> rct (parallel transport)
    '''

    source_rct = {}
    T_source = np.stack([mean_riemann(source_org['covs'])] *
                        len(source_org['covs']))
    source_rct['covs'] = parallel_transport_covariances(
        source_org['covs'], T_source)
    source_rct['labels'] = source_org['labels']

    M_target = mean_riemann(target_org_train['covs'])

    target_rct_train = {}
    T_target_train = np.stack([M_target] * len(target_org_train['covs']))
    target_rct_train['covs'] = parallel_transport_covariances(
        target_org_train['covs'], T_target_train)
    target_rct_train['labels'] = target_org_train['labels']

    target_rct_test = {}
    T_target_test = np.stack([M_target] * len(target_org_test['covs']))
    target_rct_test['covs'] = parallel_transport_covariances(
        target_org_test['covs'], T_target_test)
    target_rct_test['labels'] = target_org_test['labels']

    return source_rct, target_rct_train, target_rct_test
Beispiel #3
0
def transform_org2rct_p300(source,
                           target_train,
                           target_test,
                           weight_samples=False):

    source_rct = {}
    source_rct['labels'] = source['labels']
    weights = np.ones(len(source['labels']))
    if weight_samples:
        weights[source['labels'] == 2] = 5
    T = mean_riemann(source['covs'], sample_weight=weights)
    T_source = np.stack([T] * len(source['covs']))
    source_rct['covs'] = parallel_transport_covariances(
        source['covs'], T_source)

    target_rct_train = {}
    target_rct_train['labels'] = target_train['labels']
    weights = np.ones(len(target_train['labels']))
    if weight_samples:
        weights[target_train['labels'] == 2] = 5
    M_train = mean_riemann(target_train['covs'], sample_weight=weights)
    T_target = np.stack([M_train] * len(target_train['covs']))
    target_rct_train['covs'] = parallel_transport_covariances(
        target_train['covs'], T_target)

    target_rct_test = {}
    target_rct_test['labels'] = target_test['labels']
    M_test = M_train
    T_target = np.stack([M_test] * len(target_test['covs']))
    target_rct_test['covs'] = parallel_transport_covariances(
        target_test['covs'], T_target)

    return source_rct, target_rct_train, target_rct_test
Beispiel #4
0
def transform_org2talmon(source, target_train, target_test):

    covs_source = source['covs']
    covs_target_train = target_train['covs']
    covs_target_test = target_test['covs']

    M_source = mean_riemann(covs_source)
    M_target_train = mean_riemann(covs_target_train)
    M = geodesic_riemann(M_source, M_target_train, alpha=0.5)

    Gamma = ParallelTransport(reference_old=M_source, reference_new=M)
    covs_source_transp = Gamma.fit_transform(covs_source)

    Gamma = ParallelTransport(reference_old=M_target_train, reference_new=M)
    covs_target_train_transp = Gamma.fit_transform(covs_target_train)
    covs_target_test_transp = Gamma.transform(covs_target_test)

    source_talmon = {}
    source_talmon['labels'] = source['labels']
    source_talmon['covs'] = covs_source_transp

    target_talmon_train = {}
    target_talmon_train['labels'] = target_train['labels']
    target_talmon_train['covs'] = covs_target_train_transp

    target_talmon_test = {}
    target_talmon_test['labels'] = target_test['labels']
    target_talmon_test['covs'] = covs_target_test_transp

    return source_talmon, target_talmon_train, target_talmon_test
Beispiel #5
0
def test_check_raise():
    """Test chech SPD matrices"""
    C = 2 * np.ones((10, 3, 3))
    # This is an indirect check, the riemannian mean must crash when the
    # matrices are not SPD.
    with pytest.raises(ValueError):
        mean_riemann(C)
Beispiel #6
0
def transform_org2rct(source, target_train, target_test, weights_classes=None):

    weights_source = np.ones(len(source['labels']))
    weights_target = np.ones(len(target_train['labels']))
    if weights_classes is not None:
        for label in weights_classes.keys():
            weights_source[source['labels'] == label] = weights_classes[label]
            weights_target[target_train['labels'] ==
                           label] = weights_classes[label]

    source_rct = {}
    source_rct['labels'] = source['labels']
    T = mean_riemann(source['covs'], sample_weight=weights_source)
    T_source = np.stack([T] * len(source['covs']))
    source_rct['covs'] = parallel_transport_covariances(
        source['covs'], T_source)

    target_rct_train = {}
    target_rct_train['labels'] = target_train['labels']
    M_train = mean_riemann(target_train['covs'], sample_weight=weights_target)
    T_target = np.stack([M_train] * len(target_train['covs']))
    target_rct_train['covs'] = parallel_transport_covariances(
        target_train['covs'], T_target)

    target_rct_test = {}
    target_rct_test['labels'] = target_test['labels']
    M_test = M_train
    T_target = np.stack([M_test] * len(target_test['covs']))
    target_rct_test['covs'] = parallel_transport_covariances(
        target_test['covs'], T_target)

    return source_rct, target_rct_train, target_rct_test
def test_riemann_mean(init, get_covmats_params):
    """Test the riemannian mean"""
    n_matrices, n_channels = 100, 3
    covmats, diags, A = get_covmats_params(n_matrices, n_channels)
    if init:
        C = mean_riemann(covmats, init=covmats[0])
    else:
        C = mean_riemann(covmats)
    Ctrue = np.exp(np.log(diags).mean(0))
    Ctrue = A @ np.diag(Ctrue) @ A.T
    assert C == approx(Ctrue)
Beispiel #8
0
	def compute_covariances(self):
		import numpy as np
		from pyriemann.utils.mean import mean_riemann
		from pyriemann.utils.distance import distance_riemann
		data = self.windows.get_data()
		if len(data.shape)==3:
			trialwise_covs_list = list()
			for i_window in range(data.shape[0]):
				trial_cov = np.cov(data[i_window])
				trialwise_covs_list.append(trial_cov)
			trialwise_covs = np.array(trialwise_covs_list)
		else:
			raise ValueError('Shape of windows.get_data() is not 3D')

		global_cov = mean_riemann(trialwise_covs, tol=1e-08, maxiter=50, init=None, sample_weight=None)

		unique_targets = np.unique(self.y)
		classwise_covs = dict()
		for target in unique_targets:
			indices_target = np.where(self.y==target)[0]
			target_covs_list = list()
			for index in indices_target:
				target_covs_list.append(trialwise_covs[index])
			target_covs = np.array(target_covs_list)
			# https://github.com/pyRiemann/pyRiemann/issues/65
			# if you can not fix the root cause of the problem, then add regularization :
			# cov = Covariances('oas').fit_transform(X_train)
			riemann_mean_cov = mean_riemann(target_covs, tol=1e-08, maxiter=50, init=None, sample_weight=None)
			classwise_covs[target] = riemann_mean_cov
		self.trialwise_covs = trialwise_covs
		self.classwise_covs = classwise_covs
		self.global_cov = global_cov

		distances = np.zeros(data.shape[0])
		for i_window in range(data.shape[0]):
			trial_cov = trialwise_covs[i_window]
			distances[i_window] = distance_riemann(trial_cov, global_cov)
			# covar_target = classwise_covs[self.y[i_window]]
			# distances[i_window] = distance_riemann(covar_trial, covar_target)
		# print('Min: {:.3f} | Max: {:.3f}'.format(np.min(distances), np.max(distances)))

		alpha = 1.0
		beta = 0.5

		# self.trialwise_weights = np.ones(data.shape[0])

		distances = (distances - np.min(distances)) / (np.max(distances) - np.min(distances) + 0.00001)
		# beta==0 | range = [0, alpha]
		# beta!=0 | range = [beta, alpha+beta]
		self.trialwise_weights = beta + alpha*(1 - distances)
Beispiel #9
0
def transform_str2rot(source,
                      target_train,
                      target_test,
                      weights_classes=None,
                      distance='euc'):

    source_rot = {}
    source_rot['covs'] = source['covs']
    source_rot['labels'] = source['labels']

    target_rot_train = {}
    target_rot_train['labels'] = target_train['labels']

    target_rot_test = {}
    target_rot_test['labels'] = target_test['labels']

    class_labels = np.unique(source['labels'])

    M_source = []
    for i in class_labels:
        M_source_i = mean_riemann(source['covs'][source['labels'] == i])
        M_source.append(M_source_i)

    M_target_train = []
    for j in class_labels:
        M_target_train_j = mean_riemann(
            target_train['covs'][target_train['labels'] == j])
        M_target_train.append(M_target_train_j)

    if weights_classes is None:
        weights = [1] * len(class_labels)
    else:
        weights = []
        for label in class_labels:
            weights.append(weights_classes[label])

    R = manifoptim.get_rotation_matrix(M=M_source,
                                       Mtilde=M_target_train,
                                       dist=distance,
                                       weights=weights)

    covs_target_train = np.stack(
        [np.dot(R, np.dot(covi, R.T)) for covi in target_train['covs']])
    target_rot_train['covs'] = covs_target_train

    covs_target_test = np.stack(
        [np.dot(R, np.dot(covi, R.T)) for covi in target_test['covs']])
    target_rot_test['covs'] = covs_target_test

    return source_rot, target_rot_train, target_rot_test
def test_alm_mean(get_covmats):
    """Test the ALM mean"""
    n_matrices, n_channels = 3, 3
    covmats = get_covmats(n_matrices, n_channels)
    C_alm = mean_alm(covmats)
    C_riem = mean_riemann(covmats)
    assert C_alm == approx(C_riem)
Beispiel #11
0
def _reduction_landmarks_sup(X, P, labels):
    Xc = [X[labels == i] for i in np.unique(labels)]
    Lc = [mean_riemann(Xi) for Xi in Xc]
    c = []
    g = []
    for Xc_, Lc_ in zip(Xc, Lc):
        c_, g_ = _distance_to_landmarks_riemann(Xc_, [Lc_], P)
        c.append(c_)
        g.append(g_)

    def cost_sum(W, c):
        cst = 0
        for c_ in c:
            cst += c_(W)
        return cst

    def egrad_sum(W, g):
        grd = np.zeros(W.shape)
        for g_ in g:
            grd += g_(W)
        return grd

    cost = partial(cost_sum, c=c)
    egrad = partial(egrad_sum, g=g)
    W = solve_manopt(X, P, cost, egrad)
    return W
def mean_semidefi(A, B, p):

    RsqA, VA = eigendecomp_rank(A, p)
    RsqB, VB = eigendecomp_rank(B, p)

    OA, Sigma, OB = np.linalg.svd(np.dot(VA.T, VB))
    Sigma = np.clip(Sigma, 0, 1)
    Theta = np.arccos(Sigma)

    UA = np.dot(VA, OA)
    UB = np.dot(VB, OB)

    X = np.dot(np.eye(n) - np.dot(UA, UA.T), UB)
    sTheta = np.diag(np.sin(Theta))
    X = np.dot(X, np.linalg.pinv(sTheta))

    cTheta = np.diag(np.cos(Theta / 2))
    Wl = np.dot(UA, cTheta)

    sTheta = np.diag(np.sin(Theta / 2))
    Wr = np.dot(X, sTheta)

    W = Wl + Wr
    K = mean_riemann(np.stack([RsqA, RsqB]))
    avg = np.dot(W, np.dot(K, W.T))

    return avg
def alter_mean_cov(mean_cov_in, num_in_class_in, X_val, label_v):
    # # # Changes all mean covariance matrices by weighting the previous mean
    # # # by a constant amount, then considering all new data individually.
    # Returns: the new mean covariance matrix, the number of data assigned to each class, how much time the operation took

    label_val_j = label_v

    mean_cov_n = mean_cov_in
    mean_cov_out = mean_cov_in
    num_in_class_out = num_in_class_in
    tic = time.clock()
    for l in range(num_classes):
        if X_val[label_val_j == l].shape[0] > 0:
            sample_weight_n = np.ones(X_val[label_val_j == l].shape[0] + 1)
            sample_weight_n[0] = 10
            sample_weight_n = sample_weight_n / num_in_class_in[l]
            X_val_n = np.vstack([
                mean_cov_n[l].reshape(1, num_channels, num_channels),
                X_val[label_val_j == l]
            ])
            mean_cov_n[l] = mean_riemann(X_val_n,
                                         sample_weight=sample_weight_n,
                                         init=mean_cov_n[l])
            mean_cov_out[l] = mean_cov_n[l]

    time_out = time.clock() - tic
    print(time_out)

    return mean_cov_out, num_in_class_out, time_out
def alter_mean_cov_2(mean_cov_in, num_in_class_in, X_val, label_v):
    # # # Changes a mean covariance matrix by weighting the previous mean
    # # # as much as the amount of previous datapoints seen, then considering
    # # # all new data individually
    # Returns: the new mean covariance matrix, the number of data assigned to each class, how much time the operation took

    label_val_j = label_v

    mean_cov_n = mean_cov_in
    mean_cov_out = mean_cov_in
    num_in_class_n = np.zeros((num_in_class_in.shape))
    num_in_class_out = num_in_class_in

    tic = time.clock()
    for l in range(num_classes):
        if X_val[label_val_j == l].shape[0] > 0:
            sample_weight_n = np.ones(X_val[label_val_j == l].shape[0] + 1)
            sample_weight_n[0] = num_in_class_in[l]
            sample_weight_n = sample_weight_n / num_in_class_in[l]
            X_val_n = np.vstack([
                mean_cov_n[l].reshape(1, mean_cov_in.shape[1],
                                      mean_cov_in.shape[1]),
                X_val[label_val_j == l]
            ])
            mean_cov_n[l] = mean_riemann(X_val_n,
                                         sample_weight=sample_weight_n,
                                         init=mean_cov_n[l])
            num_in_class_n[l] = X_val[label_val_j == l].shape[0]
            num_in_class_out[l] = num_in_class_out[l] + num_in_class_n[l]
            mean_cov_out[l] = mean_cov_n[l]

    time_out = time.clock() - tic
    print(time_out)

    return mean_cov_out, num_in_class_out, time_out
    def _fit_binary(self, X, y, random_state, fit_params):
        """Fit a binary LogitBoost model.

        This is Algorithm 3 in Friedman, Hastie, & Tibshirani (2000).
        """
        # Initialize with uniform class probabilities
        p = np.full(shape=X.shape[0], fill_value=0.5, dtype=np.float64)

        # Initialize zero scores for each observation
        scores = np.zeros(X.shape[0], dtype=np.float64)

        # Do the boosting iterations to build the ensemble of estimators
        for i in range(self.n_estimators):
            # Update the working response and weights for this iteration
            sample_weight, z = self._weights_and_response(y, p)

            # Mapping the data to tangent space of the Riemannian mean
            mu = mean_riemann(X, sample_weight=sample_weight)
            self.mean_spd_matrices.append(mu)
            X_tspace = tangent_space(X, mu)

            # Create and fit a new base estimator
            X_train, z_train, kwargs = self._boost_fit_args(
                X_tspace, z, sample_weight, random_state)
            estimator = self._make_estimator(append=True,
                                             random_state=random_state)
            kwargs.update(fit_params)
            estimator.fit(X_train, z_train, **kwargs)

            # Update the scores and the probability estimates, unless we're
            # doing the last iteration
            if i < self.n_estimators - 1:
                new_scores = estimator.predict(X_tspace)
                scores += self.learning_rate * new_scores
                p = expit(scores)
Beispiel #16
0
def test_riemann_mean_with_init():
    """Test the riemannian mean with init"""
    covmats, diags, A = generate_cov(100, 3)
    C = mean_riemann(covmats, init=covmats[0])
    Ctrue = np.exp(np.log(diags).mean(0))
    Ctrue = np.dot(np.dot(A, np.diag(Ctrue)), A.T)
    assert_array_almost_equal(C, Ctrue)
def get_trajectory(subject):

    print 'subject ' + str(subject)
    print ''

    data_params = {}
    data_params[
        'path'] = '/research/vibs/Pedro/datasets/motorimagery/Physionet/eegmmidb/'
    data_params['session'] = 1
    data_params['task'] = 4
    data_params['fparams'] = [8.0, 35.0]
    data_params['subject'] = subject
    data_params['tparams'] = [-13.8, +13.]

    X, y = get_data(data_params)

    L = 160
    nt, nc, ns = X.shape
    covm = []
    for w in tqdm(gen_windows(L, ns, step=20)):
        xw = X[:, :, w]
        covs = Covariances().fit_transform(xw)
        covm.append(mean_riemann(covs))

    print 'getting the diffusion embedding'
    covm = np.stack(covm)
    u, l = get_diffusionEmbedding(covm, distance_riemann, alpha=1.0, tdiff=0)

    filepath = './results/Physionet/'
    filepath = filepath + 'trajectory_subject' + str(subject) + '.pkl'
    embedding = [u, l]
    joblib.dump(embedding, filepath)

    print ''
def test_riemann_mean_with_init():
    """Test the riemannian mean with init"""
    covmats, diags, A = generate_cov(100, 3)
    C = mean_riemann(covmats, init=covmats[0])
    Ctrue = np.exp(np.log(diags).mean(0))
    Ctrue = np.dot(np.dot(A, np.diag(Ctrue)), A.T)
    assert_array_almost_equal(C, Ctrue)
def dim_reduction_gpca_euclid(X, P, labels=None, params=None):
    
    def egrad(W, X, M):
        grad = np.zeros(W.shape)
        for Xi in X:
            grad += 4 * (Xi - M) @ W @ W.T @ (Xi - M) @ W
        return -1*grad # becomes a maximization    
    
    def cost(W, X, M):
            
        def distance(A, B):
            return np.trace(A - B)**2
        
        cost = 0
        for Xk in X:
            Xk_ = np.dot(W.T, np.dot(Xk, W))
            M_ = np.dot(W.T, np.dot(M, W)) 
            cost += distance(Xk_, M_) 
            
        return -1*cost # becomes a maximization    

    M = mean_riemann(X)
    w,v = np.linalg.eig(np.sum(X, axis=0) - len(X)*M) # comes from theoretical analysis
    idx = w.argsort()[::-1]
    v_ = v[:,idx]
    Wo = v_[:,:P]    
    
    cost = partial(cost, X=X, M=M)   
    egrad = partial(egrad, X=X, M=M)
    W = solve_manopt(X, P, cost, egrad, Wo=Wo)
    
    return W
Beispiel #20
0
def transform_rct2rot(source,
                      target_train,
                      target_test,
                      weights=None,
                      distance='euc'):
    '''
        rotate the re-centered matrices from the target so they match with those from source
        note that we use information from class labels of some of the target covariances (not all but some)
        rct (re-centered matrices) -> rot (rotated re-centered matrices)
    '''

    source_rot = {}
    source_rot['covs'] = source['covs']
    source_rot['labels'] = source['labels']

    target_rot_train = {}
    target_rot_train['labels'] = target_train['labels']

    target_rot_test = {}
    target_rot_test['labels'] = target_test['labels']

    M_source = []
    for i in np.unique(source['labels']):
        M_source_i = mean_riemann(source['covs'][source['labels'] == i])
        M_source.append(M_source_i)

    M_target_train = []
    for j in np.unique(target_train['labels']):
        M_target_train_j = mean_riemann(
            target_train['covs'][target_train['labels'] == j])
        M_target_train.append(M_target_train_j)

    R = manifoptim.get_rotation_matrix(M=M_source,
                                       Mtilde=M_target_train,
                                       weights=weights,
                                       dist=distance)

    covs_target_train = np.stack(
        [np.dot(R, np.dot(covi, R.T)) for covi in target_train['covs']])
    target_rot_train['covs'] = covs_target_train

    covs_target_test = np.stack(
        [np.dot(R, np.dot(covi, R.T)) for covi in target_test['covs']])
    target_rot_test['covs'] = covs_target_test

    return source_rot, target_rot_train, target_rot_test
    def _fit_multiclass(self, X, y, random_state, fit_params):
        """Fit a multiclass LogitBoost model.

        This is Algorithm 6 in Friedman, Hastie, & Tibshirani (2000).
        """
        # Initialize with uniform class probabilities
        p = np.full(shape=(X.shape[0], self.n_classes_),
                    fill_value=(1. / self.n_classes_),
                    dtype=np.float64)

        # Initialize zero scores for each observation
        scores = np.zeros((X.shape[0], self.n_classes_), dtype=np.float64)

        # Convert y to a one-hot-encoded vector
        y = np.eye(self.n_classes_)[y]

        # Do the boosting iterations to build the ensemble of estimators
        for iboost in range(self.n_estimators):
            # List of estimators for this boosting iteration
            new_estimators = []
            new_mean_spd_matrices = []

            # Create a new estimator for each class
            new_scores = []
            for j in range(self.n_classes_):
                # Compute the working response and weights
                sample_weight, z = self._weights_and_response(y[:, j], p[:, j])

                # Mapping the data to tangent space of the Riemannian mean
                mu = mean_riemann(X, sample_weight=sample_weight)
                new_mean_spd_matrices.append(mu)
                X_tspace = tangent_space(X, mu)

                # Fit a new base estimator
                X_train, z_train, kwargs = self._boost_fit_args(
                    X_tspace, z, sample_weight, random_state)
                estimator = self._make_estimator(append=False,
                                                 random_state=random_state)
                kwargs.update(fit_params)
                estimator.fit(X_train, z_train, **kwargs)
                new_estimators.append(estimator)

                # Update the scores and the probability estimates
                if iboost < self.n_estimators - 1:
                    new_scores.append(estimator.predict(X_tspace))

            if iboost < self.n_estimators - 1:
                new_scores = np.asarray(new_scores).T
                new_scores -= new_scores.mean(axis=1, keepdims=True)
                new_scores *= (self.n_classes_ - 1) / self.n_classes_

                scores += self.learning_rate * new_scores
                p = softmax(scores, axis=1)

            self.estimators_.append(new_estimators)
            self.mean_spd_matrices.append(new_mean_spd_matrices)
Beispiel #22
0
def test_distance_wrapper_random(met, gfunc, get_covmats):
    n_trials, n_channels = 2, 5
    covmats = get_covmats(n_trials, n_channels)
    A, B = covmats[0], covmats[1]
    if gfunc is geodesic_euclid:
        Ctrue = mean_euclid(covmats)
    elif gfunc is geodesic_logeuclid:
        Ctrue = mean_logeuclid(covmats)
    elif gfunc is geodesic_riemann:
        Ctrue = mean_riemann(covmats)
    assert geodesic(A, B, 0.5, metric=met) == approx(Ctrue)
def dim_reduction_gpca_riemann(X, P, labels=None, params=None):
    
    def egrad(W, X, M):
        
        def log(X):
            w,v = np.linalg.eig(X)
            w_ = np.diag(np.log(w))
            return np.dot(v, np.dot(w_, v.T))    
        
        grad = np.zeros(W.shape)
        for Xk in X:
            
            M_red = W.T @ M @ W
            M_red_inv = np.linalg.inv(M_red)            
            Xk_red = W.T @ Xk @ W
            Xk_red_inv = np.linalg.inv(Xk_red)   
            
            argL = Xk @ W @ Xk_red_inv
            argL = argL - M @ W @ M_red_inv         
            argR = log(Xk_red @ M_red_inv)           
            grd  = 4 * argL @ argR    
            
            grad += grd
            
        return -1*grad # becomes a maximization    
    
    def cost(W, X, M):
            
        def distance(A, B):
            return distance_riemann(A, B)**2
        
        cost = 0
        for Xk in X:
            Xk_ = np.dot(W.T, np.dot(Xk, W))
            M_ = np.dot(W.T, np.dot(M, W)) 
            cost += distance(Xk_, M_) 
            
        return -1*cost # becomes a maximization    

    M = mean_riemann(X)    
    
    nrzt = 5
    cost_list = [] 
    W_list = []
    for _ in range(nrzt):
        cost = partial(cost, X=X, M=M)   
        egrad = partial(egrad, X=X, M=M)
        W = solve_manopt(X, P, cost, egrad)
        cost_list.append(cost(W))
        W_list.append(W)
    cost_list = np.array(cost_list)
    W = W_list[cost_list.argmin()] # get the maximum cost value
    
    return W
Beispiel #24
0
def cal_riem_means(X1, X2, tORs, rType):

    # find the mean of the covariances in X
    # two options to use: rType == riem or rType == log-eucl

    X = np.concatenate((X1, X2), axis=0)
    reg_param = cal_shrinkage(X, tORs)

    W1 = cal_covariance(X1, tORs, reg_param)
    W2 = cal_covariance(X2, tORs, reg_param)

    # find the riemannian means of the covariances
    if rType == 'riem':
        C1_mean = mr.mean_riemann(W1)
        C2_mean = mr.mean_riemann(W2)
    elif rType == 'log-eucl':
        C1_mean = mr.mean_logeuclid(W1)
        C2_mean = mr.mean_logeuclid(W2)

    return np.array([C1_mean, C2_mean]), reg_param
Beispiel #25
0
 def test_random_mat(self, geodesic_func, get_covmats):
     n_trials, n_channels = 2, 5
     covmats = get_covmats(n_trials, n_channels)
     A, B = covmats[0], covmats[1]
     if geodesic_func is geodesic_euclid:
         Ctrue = mean_euclid(covmats)
     elif geodesic_func is geodesic_logeuclid:
         Ctrue = mean_logeuclid(covmats)
     elif geodesic_func is geodesic_riemann:
         Ctrue = mean_riemann(covmats)
     self.geodesic_0(geodesic_func, A, B)
     self.geodesic_1(geodesic_func, A, B)
     self.geodesic_middle(geodesic_func, A, B, Ctrue)
Beispiel #26
0
def test_power_mean(get_covmats):
    """Test the power mean"""
    n_matrices, n_channels = 3, 3
    covmats = get_covmats(n_matrices, n_channels)
    C_power_1 = mean_power(covmats, 1)
    C_power_0 = mean_power(covmats, 0)
    C_power_m1 = mean_power(covmats, -1)
    C_arithm = mean_euclid(covmats)
    C_geom = mean_riemann(covmats)
    C_harm = mean_harmonic(covmats)
    assert C_power_1 == approx(C_arithm)
    assert C_power_0 == approx(C_geom)
    assert C_power_m1 == approx(C_harm)
Beispiel #27
0
def transform_org2talmon_p300(source, target_train, target_test):

    covs_source = source['covs']
    covs_target_train = target_train['covs']
    covs_target_test = target_test['covs']

    weights = np.ones(len(source['labels']))
    weights[source['labels'] == 2] = 5
    M_source = mean_riemann(covs_source, sample_weight=weights)

    weights = np.ones(len(target_train['labels']))
    weights[target_train['labels'] == 2] = 5
    M_target_train = mean_riemann(covs_target_train, sample_weight=weights)

    M = geodesic_riemann(M_source, M_target_train, alpha=0.5)

    Gamma = ParallelTransport(reference_old=M_source, reference_new=M)
    covs_source_transp = Gamma.fit_transform(covs_source)

    Gamma = ParallelTransport(reference_old=M_target_train, reference_new=M)
    covs_target_train_transp = Gamma.fit_transform(covs_target_train)
    covs_target_test_transp = Gamma.transform(covs_target_test)

    source_talmon = {}
    source_talmon['labels'] = source['labels']
    source_talmon['covs'] = covs_source_transp

    target_talmon_train = {}
    target_talmon_train['labels'] = target_train['labels']
    target_talmon_train['covs'] = covs_target_train_transp

    target_talmon_test = {}
    target_talmon_test['labels'] = target_test['labels']
    target_talmon_test['covs'] = covs_target_test_transp

    return source_talmon, target_talmon_train, target_talmon_test
Beispiel #28
0
def solve_manopt(X, d, cost, egrad):

    D = X.shape[1]
    manifold = Grassmann(height=D, width=d)
    problem = Problem(manifold=manifold, cost=cost, egrad=egrad, verbosity=0)

    solver = ConjugateGradient(mingradnorm=1e-3)

    M = mean_riemann(X)
    w, v = np.linalg.eig(M)
    idx = w.argsort()[::-1]
    v_ = v[:, idx]
    Wo = v_[:, :d]
    W = solver.solve(problem, x=Wo)
    return W
Beispiel #29
0
def dim_reduction_bootstrap_means(X, P, labels, params):

    nc = X.shape[1]
    K = X.shape[0]

    nmeans = params['nmeans']
    npoints = params['npoints']

    # calculate the means
    Xm = np.zeros((nmeans, nc, nc))
    for n, sn in enumerate(range(nmeans)):
        selectmeans = [randrange(0, K) for _ in range(npoints)]
        Xm[n] = mean_riemann(X[selectmeans])

    W = dim_reduction_nrmeuns(Xm, P, labels, params)

    return W
Beispiel #30
0
def dim_reduction_nrmelandmark(X, P, labels, params):

    K = X.shape[0]
    nc = X.shape[1]

    S = np.zeros((nc, nc))
    M = mean_riemann(X)
    for i in range(K):
        Ci = X[i, :, :]
        Sij = np.dot(invsqrtm(Ci), np.dot(M, invsqrtm(Ci)))
        S = S + powm(logm(Sij), 2)

    l, v = np.linalg.eig(S)
    idx = l.argsort()[::-1]
    l = l[idx]
    v = v[:, idx]

    W = v[:, :P]

    return W
Beispiel #31
0
def mean_semidefinite(A, p):    

    Rsq = []
    U = []          
    for Ai in A:    
        Rsqi, Ui = eigendecomp_rank(Ai, p)
        Rsq.append(Rsqi)
        U.append(Ui)
    W = mean_subspace(U)    
    
    T = []
    for Ai, Ui in zip(A, U):
        Oi,_,OiWt = np.linalg.svd(np.dot(Ui.T, W))
        OiW = OiWt.T
        Yi = np.dot(Ui, Oi)
        Wi = np.dot(W, OiW)    
        Si2 = np.dot(Yi.T, np.dot(Ai, Yi))
        _ = np.dot(Wi, np.dot(Si2, Wi.T))
        Ti2 = np.dot(W.T, np.dot(_, W))
        T.append(Ti2)
    M = mean_riemann(np.stack(T))
    avg = np.dot(W, np.dot(M, W.T)) 
    
    return avg     
def test_mean_covariance_riemann():
    """Test mean_covariance for riemannian metric"""
    covmats, diags, A = generate_cov(100, 3)
    C = mean_covariance(covmats, metric='riemann')
    Ctrue = mean_riemann(covmats)
    assert_array_equal(C, Ctrue)
Beispiel #33
0
def test_riemann_mean():
    """Test the riemannian mean"""
    covmats = generate_cov(100,3)
    C = mean_riemann(covmats)
Beispiel #34
0
def test_riemann_mean_with_init():
    """Test the riemannian mean with init"""
    covmats = generate_cov(100,3)
    C = mean_riemann(covmats,init=covmats[0])