Esempio n. 1
0
def untangent_space(T, Cref):
    """Project a set of Tangent space vectors in the manifold according to the given reference point Cref
    
    Parameters
    ----------
    T:    {array-like} ,The Tangent space , shape= ( NWindows X Mfeatures(*(Mfeatures-1)/2))
    
    Cref: {array-like} ,The reference covariance matrix (Mfeatures X Mfeatures )
    
    Returns
    ----------
    
    covmats: {array-like} ,(Mfeatures X Mfeatures)  SPD Matrice

    """
    Nt, Nd = T.shape
    Ne = (1+np.sqrt(1+8*Nd))/2
    C12 = sqrtm(Cref)
    idx = np.triu_indices_from(Cref,0)
    covmats = np.zeros((Nt, Ne, Ne))
    covmats[:, idx[0], idx[1]] = T
    for i in range(Nt):
        covmats[i] = np.diag(np.diag(covmats[i])) + np.triu(
            covmats[i], 0) + np.triu(covmats[i], 0).T 
        covmats[i] = expm(covmats[i])
        covmats[i] = np.dot(np.dot(C12, covmats[i]), C12)
    return covmats
def mean_riemann_custom(covmats, mean_args):
    """
        A custom version of pyriemann.utils.mean.mean_riemann to handle singular matrices
        and I/O with regards to reducing samples classes.

        For function doc refer to the doc of pyriemann.utils.mean.mean_riemann.
    """

    # Taking arguments
    tol, maxiter, init, sample_weight = mean_args

    # init
    sample_weight = _get_sample_weight(sample_weight, covmats)
    Nt, Ne, Ne = covmats.shape
    if init is None:
        C = np.mean(covmats, axis=0)
    else:
        C = init
    k = 0
    nu = 1.0
    tau = np.finfo(np.float64).max
    crit = np.finfo(np.float64).max

    # stop when J<10^-9 or max iteration = 50
    while (crit > tol) and (k < maxiter) and (nu > tol):
        k = k + 1
        C12 = sqrtm(C)
        Cm12 = invsqrtm(C)
        J = np.zeros((Ne, Ne))

        for index in range(Nt):
            tmp = np.dot(np.dot(Cm12, covmats[index, :, :]), Cm12)
            with warnings.catch_warnings():
                warnings.filterwarnings('error')
                try:
                    J += sample_weight[index] * logm(tmp)
                except RuntimeWarning:
                    pass

        crit = np.linalg.norm(J, ord='fro')
        h = nu * crit
        C = np.dot(np.dot(C12, expm(nu * J)), C12)
        if h < tau:
            nu = 0.95 * nu
            tau = h
        else:
            nu = 0.5 * nu

    return C
def mean_riemann(covmats,
                 tol=10e-9,
                 maxiter=50,
                 init=None,
                 u_prime=lambda x: 1):

    Nt, Ne, Ne = covmats.shape
    if init is None:
        C = np.mean(covmats, axis=0)
    else:
        C = init
    k = 0
    nu = 1.0
    tau = np.finfo(np.float64).max
    crit = np.finfo(np.float64).max
    # stop when J<10^-9 or max iteration = 50
    while (crit > tol) and (k < maxiter) and (nu > tol):
        k = k + 1
        C12 = sqrtm(C)
        Cm12 = invsqrtm(C)
        J = np.zeros((Ne, Ne))

        for i in range(Nt):
            tmp = (Cm12 @ covmats[i, :, :]) @ Cm12
            if type(u_prime(1)) == list:
                J += logm(tmp) * u_prime(
                    distance_riemann(C, covmats[i, :, :])**2)[i] / Nt
            else:
                J += logm(tmp) * u_prime(
                    distance_riemann(C, covmats[i, :, :])**2) / Nt

        crit = np.linalg.norm(J, ord='fro')
        h = nu * crit
        C = np.dot(np.dot(C12, expm(nu * J)), C12)
        if h < tau:
            nu = 0.95 * nu
            tau = h
        else:
            nu = 0.5 * nu

    return C
def mean_logeuclid_custom(covmats, sample_weight=None):
    """
        A custom version of pyriemann.utils.mean.mean_logeuclid to handle singular matrices
        and I/O with regards to reducing samples classes.

        For function doc refer to the doc of pyriemann.utils.mean.mean_logeuclid.
    """

    sample_weight = _get_sample_weight(sample_weight, covmats)
    Nt, Ne, Ne = covmats.shape
    T = np.zeros((Ne, Ne))
    for index in range(Nt):
        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            try:
                T += sample_weight[index] * logm(covmats[index, :, :])
            except RuntimeWarning:
                pass
    C = expm(T)

    return C
Esempio n. 5
0
    def _transform(self, X):

        W = self.transporter_
        Ri = self.reference_old
        Rf = self.reference_new
        Nt = X.shape[0]

        # detect which kind of input : tangent vectors or cov matrices
        if self.tangent_old:
            # if tangent vectors are given, transform them back to covs
            # (easier to have tg vectors in the form of symmetric matrices later)
            X = untangent_space(X, Ri)

        # transform covariances to their tangent vectors with respect to Ri
        # (these tangent vectors are in the form of symmetric matrices)
        eta_i = np.zeros(X.shape)
        Ri_sqrt = sqrtm(Ri)
        Ri_invsqrt = invsqrtm(Ri)
        for i in range(Nt):
            Li = logm(np.dot(Ri_invsqrt, np.dot(X[i], Ri_invsqrt)))
            eta_i[i, :, :] = np.dot(Ri_sqrt, np.dot(Li, Ri_sqrt))

        # multiply the tangent vectors by the transport matrix W
        eta_f = np.zeros(X.shape)
        for i in range(Nt):
            eta_f[i, :, :] = np.dot(W, np.dot(eta_i[i], W.T))

        # transform tangent vectors to covariance matrices with respect to Rf
        Xnew = np.zeros(X.shape)
        Rf_sqrt = sqrtm(Rf)
        Rf_invsqrt = invsqrtm(Rf)
        for i in range(Nt):
            Ef = expm(np.dot(Rf_invsqrt, np.dot(eta_f[i], Rf_invsqrt)))
            Xnew[i, :, :] = np.dot(Rf_sqrt, np.dot(Ef, Rf_sqrt))

        # transform back to tangent vectors (flat form, not sym matrix) if needed
        if self.tangent_new:
            Xnew = tangent_space(Xnew, Rf)

        return Xnew
Esempio n. 6
0
def test_expm():
    """Test matrix exponential"""
    C = 2 * np.eye(3)
    Ctrue = np.exp(2) * np.eye(3)
    assert_array_almost_equal(expm(C), Ctrue)
Esempio n. 7
0
def transform_org2opt(source, target_train, target_test):

    target_opt_train = {}
    target_opt_test = {}

    target_opt_train['labels'] = target_train['labels']
    target_opt_test['labels'] = target_test['labels']

    # get cost matrix
    Cs = source['covs']
    ys = source['labels']
    Ct_train = target_train['covs']
    Ct_test = target_test['covs']
    M = np.zeros((len(Cs), len(Ct_train)))
    for i, Cs_i in enumerate(Cs):
        for j, Ct_j in enumerate(Ct_train):
            M[i, j] = distance_riemann(Cs_i, Ct_j)**2

    # get the transportation plan
    mu_s = distribution_estimation_uniform(Cs)
    mu_t = distribution_estimation_uniform(Ct_train)
    gamma = sinkhorn_lpl1_mm(mu_s, ys, mu_t, M, reg=1.0)

    # transport the target matrices (train)
    Ct_train_transported = np.zeros(Ct_train.shape)
    for j in range(len(Ct_train_transported)):
        Ct_train_transported[j] = mean_riemann(Cs, sample_weight=gamma[:, j])
    target_opt_train['covs'] = Ct_train_transported

    # transport the target matrices (test)
    D = np.zeros((len(Ct_test), len(Ct_train)))
    for k, Ct_k in enumerate(Ct_test):
        for l, Ct_l in enumerate(Ct_train):
            D[k, l] = distance_riemann(Ct_k, Ct_l)**2
    idx = np.argmin(D, axis=1)  # nearest neighbour to each target test matrix

    Ct_test_transported = np.zeros(Ct_test.shape)
    for i in range(len(Ct_test)):
        j = idx[i]

        Ci = Ct_test[i]
        Ri = Ct_train[j]
        Rf = Ct_train_transported[j]

        Ri_sqrt = sqrtm(Ri)
        Ri_invsqrt = invsqrtm(Ri)
        Li = logm(np.dot(Ri_invsqrt, np.dot(Ci, Ri_invsqrt)))
        eta_i = np.dot(Ri_sqrt, np.dot(Li, Ri_sqrt))

        Ri_Rf = geodesic_riemann(Rf, Ri, alpha=0.5)
        Ri_inv = np.linalg.inv(Ri)
        eta_f = np.dot(Ri_inv, np.dot(eta_i, Ri_inv))
        eta_f = np.dot(Ri_Rf, np.dot(eta_f, Ri_Rf))

        Rf_sqrt = sqrtm(Rf)
        Rf_invsqrt = invsqrtm(Rf)
        Ef = expm(np.dot(Rf_invsqrt, np.dot(eta_f, Rf_invsqrt)))
        Ct_test_transported[i] = np.dot(Rf_sqrt, np.dot(Ef, Rf_sqrt))

    target_opt_test['covs'] = Ct_test_transported

    return source, target_opt_train, target_opt_test
def exp_riemann(X, Y):
    """ exp_X(Y) = X exp(X^{-1}Y) = X^{1/2} exp(X^{-1/2} Y X^{-1/2}) X^{1/2}"""
    Xsqrt = sqrtm(X)
    Xinvsqrt = invsqrtm(X)
    return Xsqrt @ expm(Xinvsqrt @ Y @ Xinvsqrt) @ Xsqrt
Esempio n. 9
0
def test_expm():
    """Test matrix exponential"""
    C = 2*np.eye(3)
    Ctrue = np.exp(2)*np.eye(3)
    assert_array_almost_equal(expm(C),Ctrue)