def test_dist(self):
        man = self.man
        x = man.rand()
        y = man.rand()

        # Test separability
        np_testing.assert_almost_equal(man.dist(x, x), 0.)

        # Test symmetry
        np_testing.assert_almost_equal(man.dist(x, y), man.dist(y, x))

        # Test alternative implementation
        # from Eq 6.14 of "Positive definite matrices"
        d = np.sqrt((np.log(sp.linalg.eigvalsh(x, y))**2).sum())
        np_testing.assert_almost_equal(man.dist(x, y), d)

        # check that dist is consistent with log
        np_testing.assert_almost_equal(man.dist(x, y),
                                       man.norm(x, man.log(x, y)))

        # Test invariance under inversion
        np_testing.assert_almost_equal(man.dist(x, y),
                                       man.dist(la.inv(y), la.inv(x)))

        # Test congruence-invariance
        a = rnd.randn(self.n, self.n)  # must be invertible
        axa = multiprod(multiprod(a, x), multitransp(a))
        aya = multiprod(multiprod(a, y), multitransp(a))
        np_testing.assert_almost_equal(man.dist(x, y), man.dist(axa, aya))
示例#2
0
 def log(self, point_a, point_b):
     ytx = multitransp(point_b) @ point_a
     At = multitransp(point_b) - ytx @ multitransp(point_a)
     Bt = np.linalg.solve(ytx, At)
     u, s, vt = np.linalg.svd(multitransp(Bt), full_matrices=False)
     arctan_s = np.expand_dims(np.arctan(s), -2)
     return (u * arctan_s) @ vt
示例#3
0
 def log(self, point_a, point_b):
     c = np.linalg.cholesky(point_a)
     c_inv = np.linalg.inv(c)
     logm = multilogm(
         c_inv @ point_b @ multitransp(c_inv),
         positive_definite=True,
     )
     return c @ logm @ multitransp(c)
示例#4
0
    def log(self, X, Y):
        ytx = multiprod(multitransp(Y), X)
        At = multitransp(Y) - multiprod(ytx, multitransp(X))
        Bt = np.linalg.solve(ytx, At)
        u, s, vt = svd(multitransp(Bt), full_matrices=False)
        arctan_s = np.expand_dims(np.arctan(s), -2)

        U = multiprod(u * arctan_s, vt)
        return U
示例#5
0
    def log(self, X, Y):
        ytx = multiprod(multitransp(Y), X)
        At = multitransp(Y) - multiprod(ytx, multitransp(X))
        Bt = np.linalg.solve(ytx, At)
        u, s, vt = svd(multitransp(Bt), full_matrices=False)
        arctan_s = np.expand_dims(np.arctan(s), -2)

        U = multiprod(u * arctan_s, vt)
        return U
    def test_proj(self):
        # Construct a random point X on the manifold.
        X = self.man.rand()

        # Construct a vector H in the ambient space.
        H = rnd.randn(self.k, self.m, self.n)

        # Compare the projections.
        Hproj = H - multiprod(X, multiprod(multitransp(X), H) + multiprod(multitransp(H), X)) / 2
        np_testing.assert_allclose(Hproj, self.man.proj(X, H))
示例#7
0
    def test_projection(self):
        # Construct a random point X on the manifold.
        X = self.manifold.random_point()

        # Construct a vector H in the ambient space.
        H = np.random.normal(size=(self.k, self.m, self.n))

        # Compare the projections.
        Hproj = H - X @ (multitransp(X) @ H + multitransp(H) @ X) / 2
        np_testing.assert_allclose(Hproj, self.manifold.projection(X, H))
示例#8
0
    def test_proj(self):
        # Construct a random point X on the manifold.
        X = self.man.rand()

        # Construct a vector H in the ambient space.
        H = rnd.randn(self.k, self.m, self.n)

        # Compare the projections.
        Hproj = H - multiprod(X, multiprod(multitransp(X), H) +
                              multiprod(multitransp(H), X)) / 2
        np_testing.assert_allclose(Hproj, self.man.proj(X, H))
示例#9
0
def geodesic(point_a, point_b, alpha):
    if alpha < 0 or 1 < alpha:
        raise ValueError("Exponent must be in [0,1]")
    c = np.linalg.cholesky(point_a)
    c_inv = np.linalg.inv(c)
    log_cbc = multilogm(
        c_inv @ point_b @ multitransp(c_inv),
        positive_definite=True,
    )
    powm = multiexpm(alpha * log_cbc, symmetric=False)
    return c @ powm @ multitransp(c)
示例#10
0
    def test_dist(self):
        manifold = self.manifold
        x = manifold.random_point()
        y = manifold.random_point()
        z = manifold.random_point()

        # Test separability
        np_testing.assert_almost_equal(manifold.dist(x, x), 0.0)

        # Test symmetry
        np_testing.assert_almost_equal(
            manifold.dist(x, y), manifold.dist(y, x)
        )

        # Test triangle inequality
        assert manifold.dist(x, y) <= manifold.dist(x, z) + manifold.dist(z, y)

        # Test alternative implementation (see equation (6.14) in [Bha2007]).
        d = np.sqrt((np.log(eigvalsh(x, y)) ** 2).sum())
        np_testing.assert_almost_equal(manifold.dist(x, y), d)

        # Test exponential metric increasing property
        # (see equation (6.8) in [Bha2007]).
        assert manifold.dist(x, y) >= np.linalg.norm(logm(x) - logm(y))

        # check that dist is consistent with log
        np_testing.assert_almost_equal(
            manifold.dist(x, y), manifold.norm(x, manifold.log(x, y))
        )

        # Test invariance under inversion
        np_testing.assert_almost_equal(
            manifold.dist(x, y),
            manifold.dist(np.linalg.inv(y), np.linalg.inv(x)),
        )

        # Test congruence-invariance
        a = np.random.normal(size=(self.n, self.n))  # must be invertible
        axa = a @ x @ multitransp(a)
        aya = a @ y @ multitransp(a)
        np_testing.assert_almost_equal(
            manifold.dist(x, y), manifold.dist(axa, aya)
        )

        # Test proportionality (see equation (6.12) in [Bha2007]).
        alpha = np.random.uniform()
        np_testing.assert_almost_equal(
            manifold.dist(x, geodesic(x, y, alpha)),
            alpha * manifold.dist(x, y),
        )
示例#11
0
 def inner(self, x, u, v):
     xinvu = la.solve(x, u)
     if u is v:
         xinvv = xinvu
     else:
         xinvv = la.solve(x, v)
     return np.tensordot(xinvu, multitransp(xinvv), axes=x.ndim)
示例#12
0
 def norm(self, x, u):
     # This implementation is as fast as np.linalg.solve_triangular and is
     # more stable, as the above solver tends to output non positive
     # definite results.
     c = la.cholesky(x)
     c_inv = la.inv(c)
     return la.norm(multiprod(multiprod(c_inv, u), multitransp(c_inv)))
示例#13
0
 def log(self, X, Y):
     U = multiprod(multitransp(X), Y)
     if self._k == 1:
         return multiskew(np.real(logm(U)))
     for i in range(self._k):
         U[i] = np.real(logm(U[i]))
     return multiskew(U)
示例#14
0
 def euclidean_to_riemannian_hessian(self, point, euclidean_gradient,
                                     euclidean_hessian, tangent_vector):
     Xt = multitransp(point)
     Xtegrad = Xt @ euclidean_gradient
     symXtegrad = multisym(Xtegrad)
     Xtehess = Xt @ euclidean_hessian
     return multiskew(Xtehess - tangent_vector @ symXtegrad)
示例#15
0
 def test_rand(self):
     # Just make sure that things generated are on the manifold and that
     # if you generate two they are not equal.
     X = self.man.rand()
     np_testing.assert_allclose(multiprod(multitransp(X), X), multieye(self.k, self.n), atol=1e-10)
     Y = self.man.rand()
     assert np.linalg.norm(X - Y) > 1e-6
示例#16
0
    def test_multitransp(self):
        A = rnd.randn(self.k, self.m, self.n)

        C = np.zeros((self.k, self.n, self.m))
        for i in range(self.k):
            C[i] = A[i].T

        np_testing.assert_array_equal(C, multitransp(A))
示例#17
0
    def test_random_point(self):
        point = self.so.random_point()
        assert point.shape == (self.n, self.n)
        np_testing.assert_almost_equal(point.T @ point - point @ point.T, 0)
        np_testing.assert_almost_equal(point.T @ point, np.eye(self.n))
        assert np.allclose(np.linalg.det(point), 1)

        point = self.so_product.random_point()
        assert point.shape == (self.k, self.n, self.n)
        np_testing.assert_almost_equal(
            multitransp(point) @ point - point @ multitransp(point),
            0,
        )
        np_testing.assert_almost_equal(
            multitransp(point) @ point, multieye(self.k, self.n)
        )
        assert np.allclose(np.linalg.det(point), 1)
示例#18
0
 def test_rand(self):
     # Just make sure that things generated are on the manifold and that
     # if you generate two they are not equal.
     X = self.man.rand()
     np_testing.assert_allclose(multiprod(multitransp(X), X),
                                multieye(self.k, self.n), atol=1e-10)
     Y = self.man.rand()
     assert np.linalg.norm(X - Y) > 1e-6
示例#19
0
    def test_multitransp(self):
        A = rnd.randn(self.k, self.m, self.n)

        C = np.zeros((self.k, self.n, self.m))
        for i in range(self.k):
            C[i] = A[i].T

        np_testing.assert_array_equal(C, multitransp(A))
示例#20
0
    def test_multitransp(self):
        A = np.random.normal(size=(self.k, self.m, self.n))

        C = np.zeros((self.k, self.n, self.m))
        for i in range(self.k):
            C[i] = A[i].T

        np_testing.assert_array_equal(C, multitransp(A))
示例#21
0
 def dist(self, x, y):
     # Adapted from equation 6.13 of "Positive definite matrices". Chol
     # decomp gives the same result as matrix sqrt. There may be a more
     # efficient way to compute this!
     c = np.linalg.cholesky(x)
     c_inv = np.linalg.inv(c)
     l = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
                  pos_def=True)
     return la.norm(multiprod(multiprod(c, l), c_inv))
示例#22
0
 def inner_product(self, point, tangent_vector_a, tangent_vector_b):
     p_inv_tv_a = np.linalg.solve(point, tangent_vector_a)
     if tangent_vector_a is tangent_vector_b:
         p_inv_tv_b = p_inv_tv_a
     else:
         p_inv_tv_b = np.linalg.solve(point, tangent_vector_b)
     return np.tensordot(p_inv_tv_a,
                         multitransp(p_inv_tv_b),
                         axes=tangent_vector_a.ndim)
示例#23
0
    def exp(self, point, tangent_vector):
        pt_tv = multitransp(point) @ tangent_vector
        if self._k == 1:
            identity = np.eye(self._p)
        else:
            identity = multieye(self._k, self._p)

        a = np.block([point, tangent_vector])
        b = multiexpm(
            np.block([
                [
                    pt_tv,
                    -multitransp(tangent_vector) @ tangent_vector,
                ],
                [identity, pt_tv],
            ]))[..., :self._p]
        c = multiexpm(-pt_tv)
        return a @ (b @ c)
示例#24
0
 def dist(self, x, y):
     # Adapted from equation 6.13 of "Positive definite matrices". The
     # Cholesky decomposition gives the same result as matrix sqrt. There
     # may be more efficient ways to compute this.
     c = la.cholesky(x)
     c_inv = la.inv(c)
     logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
                     pos_def=True)
     return la.norm(logm)
示例#25
0
文件: psd.py 项目: j-towns/pymanopt
 def dist(self, x, y):
     # Adapted from equation 6.13 of "Positive definite matrices". Chol
     # decomp gives the same result as matrix sqrt. There may be a more
     # efficient way to compute this!
     c = np.linalg.cholesky(x)
     c_inv = np.linalg.inv(c)
     l = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
                  pos_def=True)
     return la.norm(multiprod(multiprod(c, l), c_inv))
示例#26
0
def randskew(n, N=1):
    idxs = np.triu_indices(n, 1)
    S = np.zeros((N, n, n))
    for i in range(N):
        S[i][idxs] = rnd.randn(int(n * (n - 1) / 2))
        S = S - multitransp(S)
    if N == 1:
        return S.reshape(n, n)
    return S
示例#27
0
 def test_inner(self):
     man = self.man
     x = man.rand()
     a = man.randvec(x)
     b = man.randvec(x)
     # b is not symmetric, it is Hermitian
     np.testing.assert_almost_equal(
         np.tensordot(a, multitransp(b), axes=a.ndim),
         man.inner(x, multiprod(x, a), multiprod(x, b)))
     assert man.inner(x, a, b).dtype == np.float
示例#28
0
 def test_randvec(self):
     # Make sure things generated are in tangent space and if you generate
     # two then they are not equal.
     X = self.man.rand()
     U = self.man.randvec(X)
     np_testing.assert_allclose(multisym(multiprod(multitransp(X), U)),
                                np.zeros((self.k, self.n, self.n)),
                                atol=1e-10)
     V = self.man.randvec(X)
     assert la.norm(U - V) > 1e-6
示例#29
0
 def random_tangent_vector(self, point):
     n, k = self._n, self._k
     inds = np.triu_indices(n, 1)
     vector = np.zeros((k, n, n))
     for i in range(k):
         vector[i][inds] = np.random.normal(size=int(n * (n - 1) / 2))
     vector = vector - multitransp(vector)
     if k == 1:
         vector = vector[0]
     return vector / np.sqrt(np.tensordot(vector, vector, axes=vector.ndim))
示例#30
0
    def rand(self):
        # The way this is done is arbitrary. I think the space of p.d.
        # matrices would have infinite measure w.r.t. the Riemannian metric
        # (cf. integral 0-inf [ln(x)] dx = inf) so impossible to have a
        # 'uniform' distribution.

        # Generate eigenvalues between 1 and 2
        d = np.ones((self._k, self._n, 1)) + rnd.rand(self._k, self._n, 1)

        # Generate an orthogonal matrix. Annoyingly qr decomp isn't
        # vectorized so need to use a for loop. Could be done using
        # svd but this is slower for bigger matrices.
        u = np.zeros((self._k, self._n, self._n))
        for i in range(self._k):
            u[i], r = la.qr(rnd.randn(self._n, self._n))

        if self._k == 1:
            return multiprod(u, d * multitransp(u))[0]
        return multiprod(u, d * multitransp(u))
示例#31
0
 def test_randvec(self):
     # Make sure things generated are in tangent space and if you generate
     # two then they are not equal.
     X = self.man.rand()
     U = self.man.randvec(X)
     np_testing.assert_allclose(multisym(multiprod(multitransp(X), U)),
                                np.zeros((self.k, self.n, self.n)),
                                atol=1e-10)
     V = self.man.randvec(X)
     assert la.norm(U - V) > 1e-6
示例#32
0
    def random_point(self):
        # Generate eigenvalues between 1 and 2.
        d = 1.0 + np.random.uniform(size=(self._k, self._n, 1))

        # Generate an orthogonal matrix.
        q, _ = multiqr(np.random.normal(size=(self._n, self._n)))
        point = q @ (d * multitransp(q))
        if self._k == 1:
            return point[0]
        return point
示例#33
0
    def test_random_tangent_vector(self):
        point = self.so.random_point()
        tangent_vector = self.so.random_tangent_vector(point)
        np_testing.assert_almost_equal(tangent_vector, -tangent_vector.T)

        point = self.so_product.random_point()
        tangent_vector = self.so_product.random_tangent_vector(point)
        np_testing.assert_almost_equal(
            tangent_vector, -multitransp(tangent_vector)
        )
示例#34
0
    def exp(self, point, tangent_vector):
        u, s, vt = np.linalg.svd(tangent_vector, full_matrices=False)
        cos_s = np.expand_dims(np.cos(s), -2)
        sin_s = np.expand_dims(np.sin(s), -2)

        Y = point @ (multitransp(vt) * cos_s) @ vt + (u * sin_s) @ vt

        # From numerical experiments, it seems necessary to re-orthonormalize.
        # This is quite expensive.
        q, _ = multiqr(Y)
        return q
示例#35
0
文件: psd.py 项目: j-towns/pymanopt
    def rand(self):
        # The way this is done is arbitrary. I think the space of p.d.
        # matrices would have infinite measure w.r.t. the Riemannian metric
        # (c.f. integral 0-inf [ln(x)] dx = inf) so impossible to have a
        # 'uniform' distribution.

        # Generate eigenvalues between 1 and 2
        d = np.ones((self._k, self._n, 1)) + rnd.rand(self._k, self._n, 1)

        # Generate an orthogonal matrix. Annoyingly qr decomp isn't
        # vectorized so need to use a for loop. Could be done using
        # svd but this is slower for bigger matrices.
        u = np.zeros((self._k, self._n, self._n))
        for i in range(self._k):
            u[i], r = la.qr(rnd.randn(self._n, self._n))

        if self._k == 1:
            return multiprod(u, d * multitransp(u))[0]
        else:
            return multiprod(u, d * multitransp(u))
示例#36
0
 def test_random_tangent_vector(self):
     # Make sure things generated are in tangent space and if you generate
     # two then they are not equal.
     X = self.manifold.random_point()
     U = self.manifold.random_tangent_vector(X)
     np_testing.assert_allclose(
         multisym(multitransp(X) @ U),
         np.zeros((self.k, self.n, self.n)),
         atol=1e-10,
     )
     V = self.manifold.random_tangent_vector(X)
     assert np.linalg.norm(U - V) > 1e-6
示例#37
0
    def test_retr(self):
        # Test that the result is on the manifold and that for small
        # tangent vectors it has little effect.
        x = self.man.rand()
        u = self.man.randvec(x)

        xretru = self.man.retr(x, u)

        np_testing.assert_allclose(multiprod(multitransp(xretru), xretru), multieye(self.k, self.n), atol=1e-10)

        u = u * 1e-6
        xretru = self.man.retr(x, u)
        np_testing.assert_allclose(xretru, x + u)
示例#38
0
    def test_exp(self):
        # Check that exp lies on the manifold and that exp of a small vector u
        # is close to x + u.
        s = self.man
        x = s.rand()
        u = s.randvec(x)

        xexpu = s.exp(x, u)
        np_testing.assert_allclose(multiprod(multitransp(xexpu), xexpu), multieye(self.k, self.n), atol=1e-10)

        u = u * 1e-6
        xexpu = s.exp(x, u)
        np_testing.assert_allclose(xexpu, x + u)
示例#39
0
    def test_exp(self):
        # Check that exp lies on the manifold and that exp of a small vector u
        # is close to x + u.
        s = self.man
        x = s.rand()
        u = s.randvec(x)

        xexpu = s.exp(x, u)
        np_testing.assert_allclose(multiprod(multitransp(xexpu), xexpu),
                                   multieye(self.k, self.n), atol=1e-10)

        u = u * 1e-6
        xexpu = s.exp(x, u)
        np_testing.assert_allclose(xexpu, x + u)
示例#40
0
 def dist(self, X, Y):
     if self._k == 1:
         u, s, v = np.linalg.svd(np.dot(X.T, Y))
         s[s > 1] = 1
         s = np.arccos(s)
         return np.linalg.norm(s)
     else:
         XtY = multiprod(multitransp(X), Y)
         square_d = 0
         for i in xrange(self._k):
             s = np.linalg.svd(XtY[i], compute_uv=False)
             # Ensure that -1 <= s <= 1
             s = np.fmin(s, [1])
             s = np.fmax(s, [-1])
             square_d = square_d + np.linalg.norm(np.arccos(s))**2
         return np.sqrt(square_d)
示例#41
0
    def exp(self, X, U):
        u, s, vt = svd(U, full_matrices=False)
        cos_s = np.expand_dims(np.cos(s), -2)
        sin_s = np.expand_dims(np.sin(s), -2)

        Y = (multiprod(multiprod(X, multitransp(vt) * cos_s), vt) +
             multiprod(u * sin_s, vt))

        # From numerical experiments, it seems necessary to
        # re-orthonormalize. This is overall quite expensive.
        if self._k == 1:
            Y, unused = np.linalg.qr(Y)
            return Y
        else:
            for i in range(self._k):
                Y[i], unused = np.linalg.qr(Y[i])
            return Y
示例#42
0
文件: psd.py 项目: j-towns/pymanopt
 def log(self, x, y):
     c = la.cholesky(x)
     c_inv = la.inv(c)
     l = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
                  pos_def=True)
     return multiprod(multiprod(c, l), multitransp(c))
示例#43
0
 def proj(self, X, U):
     UNew = U - multiprod(
         X, multiprod(multitransp(X), U) + multiprod(multitransp(U), X)) / 2
     return UNew
示例#44
0
 def test_multitransp_singlemat(self):
     A = rnd.randn(self.m, self.n)
     np_testing.assert_array_equal(A.T, multitransp(A))
示例#45
0
 def ehess2rhess(self, X, egrad, ehess, H):
     # Convert Euclidean into Riemannian Hessian.
     XtG = multiprod(multitransp(X), egrad)
     symXtG = multisym(XtG)
     HsymXtG = multiprod(H, symXtG)
     return self.proj(X, ehess - HsymXtG)
示例#46
0
 def dist(self, X, Y):
     u, s, v = svd(multiprod(multitransp(X), Y))
     s[s > 1] = 1
     s = np.arccos(s)
     return np.linalg.norm(s)
示例#47
0
 def proj(self, X, U):
     return U - multiprod(X, multiprod(multitransp(X), U))
示例#48
0
 def ehess2rhess(self, X, egrad, ehess, H):
     # Convert Euclidean into Riemannian Hessian.
     PXehess = self.proj(X, ehess)
     XtG = multiprod(multitransp(X), egrad)
     HXtG = multiprod(H, XtG)
     return PXehess - HXtG