Beispiel #1
0
def dim_reduction_nrmesup(X, P, labels, params):

    K = X.shape[0]
    nc = X.shape[1]

    Sw = np.zeros((nc, nc))
    Sb = np.zeros((nc, nc))
    for i in range(K):
        ci = labels[i]
        for j in range(K):
            Ci, Cj = X[i, :, :], X[j, :, :]
            Sij = np.dot(invsqrtm(Ci), np.dot(Cj, invsqrtm(Ci)))
            if (i != j) & (labels[j] == ci):
                Sw = Sw + powm(logm(Sij), 2)
            if (i != j) & (labels[j] != ci):
                Sb = Sb + powm(logm(Sij), 2)

    M = np.dot(np.linalg.inv(Sw), Sb)
    g, U = np.linalg.eig(M)

    idx = g.argsort()[::-1]
    g = g[idx]
    U = U[:, idx]

    B, p = sp.linalg.polar(U)
    W = B[:, :P]

    return W
Beispiel #2
0
def egrad_function_pair_rie(M, M_tilde, Q):
    M_tilde_invsqrt = invsqrtm(M_tilde)
    M_sqrt = sqrtm(M)
    term_aux = np.dot(Q, np.dot(M, Q.T))
    term_aux = np.dot(M_tilde_invsqrt, np.dot(term_aux, M_tilde_invsqrt))
    return 4 * np.dot(np.dot(M_tilde_invsqrt, logm(term_aux)), np.dot(
        M_sqrt, Q))
def test_riemann_correctness(rndstate, get_covmats):
    """Test example correctness of Riemann kernel."""
    n_matrices, n_channels = 5, 3
    cov = get_covmats(n_matrices, n_channels)
    K = kernel_riemann(cov, Cref=eye(n_channels), reg=0)

    log_cov = array([logm(c) for c in cov])
    tensor = tensordot(log_cov, log_cov.T, axes=1)
    K1 = trace(tensor, axis1=1, axis2=2)
    assert_array_almost_equal(K, K1)
def mean_riemann(covmats,
                 tol=10e-9,
                 maxiter=50,
                 init=None,
                 u_prime=lambda x: 1):

    Nt, Ne, Ne = covmats.shape
    if init is None:
        C = np.mean(covmats, axis=0)
    else:
        C = init
    k = 0
    nu = 1.0
    tau = np.finfo(np.float64).max
    crit = np.finfo(np.float64).max
    # stop when J<10^-9 or max iteration = 50
    while (crit > tol) and (k < maxiter) and (nu > tol):
        k = k + 1
        C12 = sqrtm(C)
        Cm12 = invsqrtm(C)
        J = np.zeros((Ne, Ne))

        for i in range(Nt):
            tmp = (Cm12 @ covmats[i, :, :]) @ Cm12
            if type(u_prime(1)) == list:
                J += logm(tmp) * u_prime(
                    distance_riemann(C, covmats[i, :, :])**2)[i] / Nt
            else:
                J += logm(tmp) * u_prime(
                    distance_riemann(C, covmats[i, :, :])**2) / Nt

        crit = np.linalg.norm(J, ord='fro')
        h = nu * crit
        C = np.dot(np.dot(C12, expm(nu * J)), C12)
        if h < tau:
            nu = 0.95 * nu
            tau = h
        else:
            nu = 0.5 * nu

    return C
def mean_riemann_custom(covmats, mean_args):
    """
        A custom version of pyriemann.utils.mean.mean_riemann to handle singular matrices
        and I/O with regards to reducing samples classes.

        For function doc refer to the doc of pyriemann.utils.mean.mean_riemann.
    """

    # Taking arguments
    tol, maxiter, init, sample_weight = mean_args

    # init
    sample_weight = _get_sample_weight(sample_weight, covmats)
    Nt, Ne, Ne = covmats.shape
    if init is None:
        C = np.mean(covmats, axis=0)
    else:
        C = init
    k = 0
    nu = 1.0
    tau = np.finfo(np.float64).max
    crit = np.finfo(np.float64).max

    # stop when J<10^-9 or max iteration = 50
    while (crit > tol) and (k < maxiter) and (nu > tol):
        k = k + 1
        C12 = sqrtm(C)
        Cm12 = invsqrtm(C)
        J = np.zeros((Ne, Ne))

        for index in range(Nt):
            tmp = np.dot(np.dot(Cm12, covmats[index, :, :]), Cm12)
            with warnings.catch_warnings():
                warnings.filterwarnings('error')
                try:
                    J += sample_weight[index] * logm(tmp)
                except RuntimeWarning:
                    pass

        crit = np.linalg.norm(J, ord='fro')
        h = nu * crit
        C = np.dot(np.dot(C12, expm(nu * J)), C12)
        if h < tau:
            nu = 0.95 * nu
            tau = h
        else:
            nu = 0.5 * nu

    return C
Beispiel #6
0
def dim_reduction_nrmelandmark(X, P, labels, params):

    K = X.shape[0]
    nc = X.shape[1]

    S = np.zeros((nc, nc))
    M = mean_riemann(X)
    for i in range(K):
        Ci = X[i, :, :]
        Sij = np.dot(invsqrtm(Ci), np.dot(M, invsqrtm(Ci)))
        S = S + powm(logm(Sij), 2)

    l, v = np.linalg.eig(S)
    idx = l.argsort()[::-1]
    l = l[idx]
    v = v[:, idx]

    W = v[:, :P]

    return W
def mean_logeuclid_custom(covmats, sample_weight=None):
    """
        A custom version of pyriemann.utils.mean.mean_logeuclid to handle singular matrices
        and I/O with regards to reducing samples classes.

        For function doc refer to the doc of pyriemann.utils.mean.mean_logeuclid.
    """

    sample_weight = _get_sample_weight(sample_weight, covmats)
    Nt, Ne, Ne = covmats.shape
    T = np.zeros((Ne, Ne))
    for index in range(Nt):
        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            try:
                T += sample_weight[index] * logm(covmats[index, :, :])
            except RuntimeWarning:
                pass
    C = expm(T)

    return C
Beispiel #8
0
    def _transform(self, X):

        W = self.transporter_
        Ri = self.reference_old
        Rf = self.reference_new
        Nt = X.shape[0]

        # detect which kind of input : tangent vectors or cov matrices
        if self.tangent_old:
            # if tangent vectors are given, transform them back to covs
            # (easier to have tg vectors in the form of symmetric matrices later)
            X = untangent_space(X, Ri)

        # transform covariances to their tangent vectors with respect to Ri
        # (these tangent vectors are in the form of symmetric matrices)
        eta_i = np.zeros(X.shape)
        Ri_sqrt = sqrtm(Ri)
        Ri_invsqrt = invsqrtm(Ri)
        for i in range(Nt):
            Li = logm(np.dot(Ri_invsqrt, np.dot(X[i], Ri_invsqrt)))
            eta_i[i, :, :] = np.dot(Ri_sqrt, np.dot(Li, Ri_sqrt))

        # multiply the tangent vectors by the transport matrix W
        eta_f = np.zeros(X.shape)
        for i in range(Nt):
            eta_f[i, :, :] = np.dot(W, np.dot(eta_i[i], W.T))

        # transform tangent vectors to covariance matrices with respect to Rf
        Xnew = np.zeros(X.shape)
        Rf_sqrt = sqrtm(Rf)
        Rf_invsqrt = invsqrtm(Rf)
        for i in range(Nt):
            Ef = expm(np.dot(Rf_invsqrt, np.dot(eta_f[i], Rf_invsqrt)))
            Xnew[i, :, :] = np.dot(Rf_sqrt, np.dot(Ef, Rf_sqrt))

        # transform back to tangent vectors (flat form, not sym matrix) if needed
        if self.tangent_new:
            Xnew = tangent_space(Xnew, Rf)

        return Xnew
Beispiel #9
0
def dim_reduction_nrmeuns(X, P, labels, params):

    K = X.shape[0]
    nc = X.shape[1]

    S = np.zeros((nc, nc))

    for i in range(K):
        for j in range(K):
            if i != j:
                Ci, Cj = X[i, :, :], X[j, :, :]
                Sij = np.dot(invsqrtm(Ci), np.dot(Cj, invsqrtm(Ci)))
                S = S + powm(logm(Sij), 2)

    l, v = np.linalg.eig(S)
    idx = l.argsort()[::-1]
    l = l[idx]
    v = v[:, idx]

    W = v[:, :P]

    return W
Beispiel #10
0
def test_logm():
    """Test matrix logarithm"""
    C = 2 * np.eye(3)
    Ctrue = np.log(2) * np.eye(3)
    assert_array_almost_equal(logm(C), Ctrue)
Beispiel #11
0
def transform_org2opt(source, target_train, target_test):

    target_opt_train = {}
    target_opt_test = {}

    target_opt_train['labels'] = target_train['labels']
    target_opt_test['labels'] = target_test['labels']

    # get cost matrix
    Cs = source['covs']
    ys = source['labels']
    Ct_train = target_train['covs']
    Ct_test = target_test['covs']
    M = np.zeros((len(Cs), len(Ct_train)))
    for i, Cs_i in enumerate(Cs):
        for j, Ct_j in enumerate(Ct_train):
            M[i, j] = distance_riemann(Cs_i, Ct_j)**2

    # get the transportation plan
    mu_s = distribution_estimation_uniform(Cs)
    mu_t = distribution_estimation_uniform(Ct_train)
    gamma = sinkhorn_lpl1_mm(mu_s, ys, mu_t, M, reg=1.0)

    # transport the target matrices (train)
    Ct_train_transported = np.zeros(Ct_train.shape)
    for j in range(len(Ct_train_transported)):
        Ct_train_transported[j] = mean_riemann(Cs, sample_weight=gamma[:, j])
    target_opt_train['covs'] = Ct_train_transported

    # transport the target matrices (test)
    D = np.zeros((len(Ct_test), len(Ct_train)))
    for k, Ct_k in enumerate(Ct_test):
        for l, Ct_l in enumerate(Ct_train):
            D[k, l] = distance_riemann(Ct_k, Ct_l)**2
    idx = np.argmin(D, axis=1)  # nearest neighbour to each target test matrix

    Ct_test_transported = np.zeros(Ct_test.shape)
    for i in range(len(Ct_test)):
        j = idx[i]

        Ci = Ct_test[i]
        Ri = Ct_train[j]
        Rf = Ct_train_transported[j]

        Ri_sqrt = sqrtm(Ri)
        Ri_invsqrt = invsqrtm(Ri)
        Li = logm(np.dot(Ri_invsqrt, np.dot(Ci, Ri_invsqrt)))
        eta_i = np.dot(Ri_sqrt, np.dot(Li, Ri_sqrt))

        Ri_Rf = geodesic_riemann(Rf, Ri, alpha=0.5)
        Ri_inv = np.linalg.inv(Ri)
        eta_f = np.dot(Ri_inv, np.dot(eta_i, Ri_inv))
        eta_f = np.dot(Ri_Rf, np.dot(eta_f, Ri_Rf))

        Rf_sqrt = sqrtm(Rf)
        Rf_invsqrt = invsqrtm(Rf)
        Ef = expm(np.dot(Rf_invsqrt, np.dot(eta_f, Rf_invsqrt)))
        Ct_test_transported[i] = np.dot(Rf_sqrt, np.dot(Ef, Rf_sqrt))

    target_opt_test['covs'] = Ct_test_transported

    return source, target_opt_train, target_opt_test
Beispiel #12
0
 def log_whitened_kernel(self, mat, c_ref_invsqrtm):
     return self.half_vectorization(
         base.logm(np.dot(np.dot(c_ref_invsqrtm, mat), c_ref_invsqrtm)))
def log_riemann(X, Y):
    """ log_X(Y) = X log(X^{-1}Y) = X^{1/2} log(X^{-1/2} Y X^{-1/2}) X^{1/2}"""
    Xsqrt = sqrtm(X)
    Xinvsqrt = invsqrtm(X)
    return Xsqrt @ logm(Xinvsqrt @ Y @ Xinvsqrt) @ Xsqrt
Beispiel #14
0
def test_logm():
    """Test matrix logarithm"""
    C = 2*np.eye(3)
    Ctrue = np.log(2)*np.eye(3)
    assert_array_almost_equal(logm(C),Ctrue)
    from pyriemann.utils.base import logm

    # Read data from pickle dump
    logger.info("Reading machine learning features data from file %s",
                args.data_file)
    with open(args.data_file, 'rb') as f:
        dataset = pickle.load(f)
    X_train_temp, y_train = dataset['Train']
    X_test_temp, y_test = dataset['Test']

    # Vectorising and computing logm of data so that we can use the classical rbf kernel of SVC
    logger.info("Vectorising and computing logm of data: train")
    n_samples_train, n_features = X_train_temp.shape[:2]
    X_train = np.empty((n_samples_train, n_features**2))
    for i, covariance in enumerate(tqdm(X_train_temp)):
        X_train[i, :] = algebra.vec(logm(covariance))
    X_train_temp = None

    logger.info("Vectorising and computing logm of data: test")
    n_samples_test, n_features = X_test_temp.shape[:2]
    X_test = np.empty((n_samples_test, n_features**2))
    for i, covariance in enumerate(tqdm(X_test_temp)):
        X_test[i, :] = algebra.vec(logm(covariance))
    X_test_temp = None

    # Shuffling data
    X_train, y_train = shuffle(X_train, y_train, random_state=args.seed)
    X_test, y_test = shuffle(X_test, y_test, random_state=args.seed)

    # Setting parameters of the gridsearch instance for classifier
    # param_grid={'kernel': ['rbf'],
def vec_logm(X):
    return vec(logm(X))