コード例 #1
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 128
    subspace_dimension = 3
    matrix = np.random.normal(size=(num_rows, num_rows))
    matrix = 0.5 * (matrix + matrix.T)

    manifold = Grassmann(num_rows, subspace_dimension)
    cost, euclidean_gradient, euclidean_hessian = create_cost_and_derivates(
        manifold, matrix, backend)
    problem = pymanopt.Problem(
        manifold,
        cost,
        euclidean_gradient=euclidean_gradient,
        euclidean_hessian=euclidean_hessian,
    )

    optimizer = TrustRegions(verbosity=2 * int(not quiet))
    estimated_spanning_set = optimizer.run(problem,
                                           Delta_bar=8 *
                                           np.sqrt(subspace_dimension)).point

    if quiet:
        return

    eigenvalues, eigenvectors = np.linalg.eig(matrix)
    column_indices = np.argsort(eigenvalues)[-subspace_dimension:]
    spanning_set = eigenvectors[:, column_indices]
    print(
        "Geodesic distance between true and estimated dominant subspace:",
        manifold.dist(spanning_set, estimated_spanning_set),
    )
コード例 #2
0
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 1
        self.man = Grassmann(m, n, k=k)

        self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u))
コード例 #3
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    """This example generates a random 128 x 128 symmetric matrix and finds the
    dominant invariant 3 dimensional subspace for this matrix, i.e., it finds
    the subspace spanned by the three eigenvectors with the largest
    eigenvalues.
    """
    num_rows = 128
    subspace_dimension = 3
    matrix = rnd.randn(num_rows, num_rows)
    matrix = 0.5 * (matrix + matrix.T)

    cost, egrad, ehess = create_cost_egrad_ehess(
        backend, matrix, subspace_dimension)
    manifold = Grassmann(num_rows, subspace_dimension)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    estimated_spanning_set = solver.solve(
        problem, Delta_bar=8*np.sqrt(subspace_dimension))

    if quiet:
        return

    eigenvalues, eigenvectors = la.eig(matrix)
    column_indices = np.argsort(eigenvalues)[-subspace_dimension:]
    spanning_set = eigenvectors[:, column_indices]
    print("Geodesic distance between true and estimated dominant subspace:",
          manifold.dist(spanning_set, estimated_spanning_set))
コード例 #4
0
ファイル: test_grassmann.py プロジェクト: pymanopt/pymanopt
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 1
        self.manifold = Grassmann(m, n, k=k)

        self.projection = lambda x, u: u - x @ x.T @ u

        super().setUp()
コード例 #5
0
def NG_sdr(X, y, m, v_w = 5, v_b = 5, verbosity=0, *args, **kwargs):
    """
    X: array of N points on complex Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(m, p), m < n) 
    where X_hat_i = R^T X_i, W \in St(n, m)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued
    if cpx:
        gr = ComplexGrassmann(n, p)
        man = ComplexGrassmann(n, m)
    else:
        gr = Grassmann(n, p)
        man = Grassmann(n, m)
    
    # distance matrix
    dist_m = np.zeros((N, N))

    for i in range(N):
        for j in range(i):
            dist_m[i, j] = gr.dist(X[i], X[j])
            dist_m[j, i] = dist_m[i, j]
    
    # affinity matrix
    affinity = affinity_matrix(dist_m, y, v_w, v_b)

            
    X_ = torch.from_numpy(X)
    affinity_ = torch.from_numpy(affinity)
    
    @pymanopt.function.PyTorch
    def cost(A):
        dm = torch.zeros((N, N))
        for i in range(N):
            for j in range(i):
                dm[i, j] = dist_proj(torch.matmul(A.conj().t(), X_[i]), torch.matmul(A.conj().t(), X_[j]))**2
                #dm[i, j] = gr_low.dist(X_proj[i], X_proj[j])**2
                dm[j, i] = dm[i, j]
    
        d2 = torch.mean(affinity_*dm)   
        return d2

    # solver = ConjugateGradient()
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    A = solver.solve(problem)

    tmp = np.array([A.conj().T for i in range(N)]) # N x m x n
    X_low = multiprod(tmp, X) # N x m x p
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])
    
    return X_low, A
def envelope(X_env, Y_env, u):

    p, r = X_env.shape[1], Y_env.shape[1]
    linear_model = LinearRegression().fit(X_env, Y_env)
    err = Y_env - linear_model.predict(X_env)
    Sigma_res = np.cov(err.transpose())
    Sigma_Y = np.cov(Y_env.transpose())

    def cost(Gamma):
        X = np.matmul(Gamma, Gamma.T)
        out = -np.log(
            np.linalg.det(
                np.matmul(np.matmul(X, Sigma_res), X) +
                np.matmul(np.matmul(np.eye(r) - X, Sigma_Y),
                          np.eye(r) - X)))
        return (np.array(out))

    manifold = Grassmann(r, u)
    # manifold = Stiefel(r, u)
    problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    solver = SteepestDescent()
    Gamma = solver.solve(problem)
    PSigma1_hat = np.matmul(Gamma, Gamma.T)
    PSigma2_hat = np.eye(r) - PSigma1_hat

    beta_hat = np.matmul(PSigma1_hat, linear_model.coef_)
    Sigma1_hat = np.matmul(np.matmul(PSigma1_hat, Sigma_res), PSigma1_hat)
    Sigma2_hat = np.matmul(np.matmul(np.eye(r) - PSigma1_hat, Sigma_res),
                           np.eye(r) - PSigma1_hat)
    alpha_hat = np.mean(Y_env - np.matmul(X_env, beta_hat.T), axis=0)

    return (alpha_hat.reshape(1, r), beta_hat.reshape(p, r))
コード例 #7
0
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 1
        self.man = Grassmann(m, n, k=k)

        self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u))
コード例 #8
0
def solve_manopt(X, d, cost, egrad, Wo=None):

    D = X.shape[1]
    manifold = Grassmann(height=D, width=d)
    problem = Problem(manifold=manifold, cost=cost, egrad=egrad, verbosity=0)

    solver = ConjugateGradient(mingradnorm=1e-3)
    W = solver.solve(problem, x=Wo)

    return W
コード例 #9
0
    def __init__(self, db):
        self.cost_function = db['compute_cost']
        self.gradient_function = db['compute_gradient']

        # (1) Instantiate a manifold
        #self.manifold = Stiefel(db['Dloader'].d, db['q'])
        self.manifold = Grassmann(db['Dloader'].d, db['q'])

        self.x_opt = None
        self.cost_opt = None
        self.db = db
コード例 #10
0
def NG_dr1(X, verbosity = 0):
    """
    X: array of N points on Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(n-1, p)) 
    where X_hat_i = A^T X_i, A \in St(n, n-1)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued

    if cpx:
        man = Product([ComplexGrassmann(n, 1), Euclidean(p, 2)])
        
    else:
        man = Product([Grassmann(n, 1), Euclidean(p)])
    
    X_ = torch.from_numpy(X)
    
    @pymanopt.function.PyTorch
    def cost(v, b):
        vvT = torch.matmul(v, v.conj().t()) # n x n
        if cpx:
            b_ = b[:,0] + b[:,1]*1j
            b_ = torch.unsqueeze(b_, axis=1)
        else:
            b_ = torch.unsqueeze(b, axis=1)
        vbt = torch.matmul(v, b_.t()) # n x p
        IvvT = torch.eye(n, dtype=X_.dtype) - vvT
        d2 = 0
        for i in range(N):
            d2 = d2 + dist_proj(X_[i], torch.matmul(IvvT, X_[i]) + vbt)**2/N
            #d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]))**2/N
        return d2
    
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    theta = solver.solve(problem)
    v = theta[0]
    b_ = theta[1]
    
    if cpx:
        b = b_[:,0] + b_[:,1]*1j
        b = np.expand_dims(b, axis=1)
    else:
        b = np.expand_dims(b_, axis=1)
    
    R = ortho_complement(v)
    tmp = np.array([R.conj().T for i in range(N)])
    X_low = multiprod(tmp, X)
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])

    return X_low, R, v, b
コード例 #11
0
def sample_grassmann(d, p, n):
    """
	Sample n points from the manifold of p-dimensional subspaces
	of R^d using pymanopt.manifolds.grassmann.Grassmann.rand
	"""
    try:
        with open(
                "data/grassmann__" + str(d) + "_" + str(p) + "_" + str(n) +
                ".pkl", "rb") as f:
            points = pkl.load(f)
    except FileNotFoundError:
        print("Sampling " + str(n) + " points from Grassmann(+" + str(d) +
              "," + str(p) + ")...")
        manifold = Grassmann(d, p)
        points = []
        for _ in tqdm(range(n)):
            points.append(manifold.rand())
        points = np.stack(points)
        with open(
                "data/grassmann__" + str(d) + "_" + str(p) + "_" + str(n) +
                ".pkl", "wb") as f:
            pkl.dump(points, f)
    return points
コード例 #12
0
def NG_dr(X, m, verbosity=0, *args, **kwargs):
    """
    X: array of N points on Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(m, p), m < n) 
    where X_hat_i = R^T X_i, R \in St(n, m)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued

    if cpx:
        man = Product([ComplexGrassmann(n, m), Euclidean(n, p, 2)])
        
    else:
        man = Product([Grassmann(n, m), Euclidean(n, p)])
    
    X_ = torch.from_numpy(X)
    
    @pymanopt.function.PyTorch
    def cost(A, B):
        AAT = torch.matmul(A, A.conj().t()) # n x n
        if cpx:
            B_ = B[:,:,0] + B[:,:,1]*1j
        else:
            B_ = B
        IAATB = torch.matmul(torch.eye(n, dtype=X_.dtype) - AAT, B_) # n x p
        d2 = 0
        for i in range(N):
            d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]) + IAATB)**2/N
            #d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]))**2/N
        return d2

    #solver = ConjugateGradient()
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    theta = solver.solve(problem)
    A = theta[0]
    B = theta[1]
    
    if cpx:
        B_ = B[:,:,0] + B[:,:,1]*1j
    else:
        B_ = B

    #tmp = np.array([A.T for i in range(N)])
    tmp = np.array([A.conj().T for i in range(N)])
    X_low = multiprod(tmp, X)
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])

    return X_low, A, B_
コード例 #13
0
def solve_manopt(X, d, cost, egrad):

    D = X.shape[1]
    manifold = Grassmann(height=D, width=d)
    problem = Problem(manifold=manifold, cost=cost, egrad=egrad, verbosity=0)

    solver = ConjugateGradient(mingradnorm=1e-3)

    M = mean_riemann(X)
    w, v = np.linalg.eig(M)
    idx = w.argsort()[::-1]
    v_ = v[:, idx]
    Wo = v_[:, :d]
    W = solver.solve(problem, x=Wo)
    return W
コード例 #14
0
def dominant_invariant_subspace(A, p):
    """
    Returns an orthonormal basis of the dominant invariant p-subspace of A.

    Arguments:
        - A
            A real, symmetric matrix A of size nxn
        - p
            integer p < n.
    Returns:
        A real, orthonormal matrix X of size nxp such that trace(X'*A*X)
        is maximized. That is, the columns of X form an orthonormal basis
        of a dominant subspace of dimension p of A. These span the same space
        as  the eigenvectors associated with the largest eigenvalues of A.
        Sign is important: 2 is deemed a larger eigenvalue than -5.
    """
    # Make sure the input matrix is square and symmetric
    n = A.shape[0]
    assert type(A) == np.ndarray, 'A must be a numpy array.'
    assert np.isreal(A).all(), 'A must be real.'
    assert A.shape[1] == n, 'A must be square.'
    assert np.linalg.norm(A-A.T) < n * np.spacing(1), 'A must be symmetric.'
    assert p <= n, 'p must be smaller than n.'

    # Define the cost on the Grassmann manifold
    Gr = Grassmann(n, p)
    X = T.matrix()
    cost = -T.dot(X.T, T.dot(A, X)).trace()

    # Setup the problem
    problem = Problem(man=Gr, ad_cost=cost, ad_arg=X)

    # Create a solver object
    solver = TrustRegions()

    # Solve
    Xopt = solver.solve(problem, Delta_bar=8*np.sqrt(p))

    return Xopt
コード例 #15
0
def nonlinear_eigenspace(L, k, alpha=1):
    """Nonlinear eigenvalue problem: total energy minimization.

    This example is motivated in [1]_ and was adapted from the manopt toolbox
    in Matlab.

    TODO : check this

    Parameters
    ----------
    L : array, shape=(n_channels, n_channels)
        Discrete Laplacian operator: the covariance matrix.
    alpha : float
        Given constant for optimization problem.
    k : int
        Determines how many eigenvalues are returned.

    Returns
    -------
    Xsol : array, shape=(n_channels, n_channels)
        Eigenvectors.
    S0 : array
        Eigenvalues.

    References
    ----------
    .. [1] "A Riemannian Newton Algorithm for Nonlinear Eigenvalue Problems",
       Zhi Zhao, Zheng-Jian Bai, and Xiao-Qing Jin, SIAM Journal on Matrix
       Analysis and Applications, 36(2), 752-774, 2015.

    """
    n = L.shape[0]
    assert L.shape[1] == n, 'L must be square.'

    # Grassmann manifold description
    manifold = Grassmann(n, k)
    manifold._dimension = 1  # hack

    # A solver that involves the hessian (check if correct TODO)
    solver = TrustRegions()

    # Cost function evaluation
    @pymanopt.function.Callable
    def cost(X):
        rhoX = np.sum(X**2, 1, keepdims=True)  # diag(X*X')
        val = 0.5 * np.trace(X.T @ (L * X)) + \
            (alpha / 4) * (rhoX.T @ mldivide(L, rhoX))
        return val

    # Euclidean gradient evaluation
    @pymanopt.function.Callable
    def egrad(X):
        rhoX = np.sum(X**2, 1, keepdims=True)  # diag(X*X')
        g = L @ X + alpha * np.diagflat(mldivide(L, rhoX)) @ X
        return g

    # Euclidean Hessian evaluation
    # Note: Manopt automatically converts it to the Riemannian counterpart.
    @pymanopt.function.Callable
    def ehess(X, U):
        rhoX = np.sum(X**2, 1, keepdims=True)  # np.diag(X * X')
        rhoXdot = 2 * np.sum(X.dot(U), 1)
        h = L @ U + alpha * np.diagflat(mldivide(L, rhoXdot)) @ X + \
            alpha * np.diagflat(mldivide(L, rhoX)) @ U
        return h

    # Initialization as suggested in above referenced paper.
    # randomly generate starting point for svd
    x = np.random.randn(n, k)
    [U, S, V] = linalg.svd(x, full_matrices=False)
    x = U.dot(V.T)
    S0, U0 = linalg.eig(L + alpha * np.diagflat(mldivide(L, np.sum(x**2, 1))))

    # Call manoptsolve to automatically call an appropriate solver.
    # Note: it calls the trust regions solver as we have all the required
    # ingredients, namely, gradient and Hessian, information.
    problem = Problem(manifold=manifold,
                      cost=cost,
                      egrad=egrad,
                      ehess=ehess,
                      verbosity=0)
    Xsol = solver.solve(problem, U0)

    return S0, Xsol
コード例 #16
0
class TestSingleGrassmannManifold(unittest.TestCase):
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 1
        self.man = Grassmann(m, n, k=k)

        self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u))

    def test_dist(self):
        x = self.man.rand()
        y = self.man.rand()
        np_testing.assert_almost_equal(self.man.dist(x, y),
                                       self.man.norm(x, self.man.log(x, y)))

    def test_ehess2rhess(self):
        # Test this function at some randomly generated point.
        x = self.man.rand()
        u = self.man.randvec(x)
        egrad = rnd.randn(self.m, self.n)
        ehess = rnd.randn(self.m, self.n)

        np_testing.assert_allclose(
            testing.ehess2rhess(self.proj)(x, egrad, ehess, u),
            self.man.ehess2rhess(x, egrad, ehess, u))

    def test_retr(self):
        # Test that the result is on the manifold and that for small
        # tangent vectors it has little effect.
        x = self.man.rand()
        u = self.man.randvec(x)

        xretru = self.man.retr(x, u)

        np_testing.assert_allclose(multiprod(multitransp(xretru), xretru),
                                   np.eye(self.n),
                                   atol=1e-10)

        u = u * 1e-6
        xretru = self.man.retr(x, u)
        np_testing.assert_allclose(xretru, x + u)

    # def test_egrad2rgrad(self):

    # def test_norm(self):

    def test_rand(self):
        # Just make sure that things generated are on the manifold and that
        # if you generate two they are not equal.
        X = self.man.rand()
        np_testing.assert_allclose(multiprod(multitransp(X), X),
                                   np.eye(self.n),
                                   atol=1e-10)
        Y = self.man.rand()
        assert la.norm(X - Y) > 1e-6

    # def test_randvec(self):

    # def test_transp(self):

    def test_exp_log_inverse(self):
        s = self.man
        x = s.rand()
        y = s.rand()
        u = s.log(x, y)
        z = s.exp(x, u)
        np_testing.assert_almost_equal(0, self.man.dist(y, z), decimal=5)

    def test_log_exp_inverse(self):
        s = self.man
        x = s.rand()
        u = s.randvec(x)
        y = s.exp(x, u)
        v = s.log(x, y)
        # Check that the manifold difference between the tangent vectors u and
        # v is 0
        np_testing.assert_almost_equal(0, self.man.norm(x, u - v))
コード例 #17
0
class TestMultiGrassmannManifold(unittest.TestCase):
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 3
        self.man = Grassmann(m, n, k=k)

        self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u))

    def test_dim(self):
        assert self.man.dim == self.k * (self.m * self.n - self.n**2)

    def test_typicaldist(self):
        np_testing.assert_almost_equal(self.man.typicaldist,
                                       np.sqrt(self.n * self.k))

    def test_dist(self):
        x = self.man.rand()
        y = self.man.rand()
        np_testing.assert_almost_equal(self.man.dist(x, y),
                                       self.man.norm(x, self.man.log(x, y)))

    def test_inner(self):
        X = self.man.rand()
        A = self.man.randvec(X)
        B = self.man.randvec(X)
        np_testing.assert_allclose(np.sum(A * B), self.man.inner(X, A, B))

    def test_proj(self):
        # Construct a random point X on the manifold.
        X = self.man.rand()

        # Construct a vector H in the ambient space.
        H = rnd.randn(self.k, self.m, self.n)

        # Compare the projections.
        Hproj = H - multiprod(X, multiprod(multitransp(X), H))
        np_testing.assert_allclose(Hproj, self.man.proj(X, H))

    def test_retr(self):
        # Test that the result is on the manifold and that for small
        # tangent vectors it has little effect.
        x = self.man.rand()
        u = self.man.randvec(x)

        xretru = self.man.retr(x, u)

        np_testing.assert_allclose(multiprod(multitransp(xretru), xretru),
                                   multieye(self.k, self.n),
                                   atol=1e-10)

        u = u * 1e-6
        xretru = self.man.retr(x, u)
        np_testing.assert_allclose(xretru, x + u)

    # def test_egrad2rgrad(self):

    def test_norm(self):
        x = self.man.rand()
        u = self.man.randvec(x)
        np_testing.assert_almost_equal(self.man.norm(x, u), la.norm(u))

    def test_rand(self):
        # Just make sure that things generated are on the manifold and that
        # if you generate two they are not equal.
        X = self.man.rand()
        np_testing.assert_allclose(multiprod(multitransp(X), X),
                                   multieye(self.k, self.n),
                                   atol=1e-10)
        Y = self.man.rand()
        assert la.norm(X - Y) > 1e-6

    def test_randvec(self):
        # Make sure things generated are in tangent space and if you generate
        # two then they are not equal.
        X = self.man.rand()
        U = self.man.randvec(X)
        np_testing.assert_allclose(multisym(multiprod(multitransp(X), U)),
                                   np.zeros((self.k, self.n, self.n)),
                                   atol=1e-10)
        V = self.man.randvec(X)
        assert la.norm(U - V) > 1e-6

    # def test_transp(self):

    def test_exp_log_inverse(self):
        s = self.man
        x = s.rand()
        y = s.rand()
        u = s.log(x, y)
        z = s.exp(x, u)
        np_testing.assert_almost_equal(0, self.man.dist(y, z))

    def test_log_exp_inverse(self):
        s = self.man
        x = s.rand()
        u = s.randvec(x)
        y = s.exp(x, u)
        v = s.log(x, y)
        # Check that the manifold difference between the tangent vectors u and
        # v is 0
        np_testing.assert_almost_equal(0, self.man.norm(x, u - v))
コード例 #18
0
ファイル: test_grassmann.py プロジェクト: pymanopt/pymanopt
class TestSingleGrassmannManifold(ManifoldTestCase):
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 1
        self.manifold = Grassmann(m, n, k=k)

        self.projection = lambda x, u: u - x @ x.T @ u

        super().setUp()

    def test_dist(self):
        x = self.manifold.random_point()
        y = self.manifold.random_point()
        np_testing.assert_almost_equal(
            self.manifold.dist(x, y),
            self.manifold.norm(x, self.manifold.log(x, y)),
        )

    def test_euclidean_to_riemannian_hessian(self):
        # Test this function at some randomly generated point.
        x = self.manifold.random_point()
        u = self.manifold.random_tangent_vector(x)
        egrad = np.random.normal(size=(self.m, self.n))
        ehess = np.random.normal(size=(self.m, self.n))

        np_testing.assert_allclose(
            testing.euclidean_to_riemannian_hessian(self.projection)(x, egrad,
                                                                     ehess, u),
            self.manifold.euclidean_to_riemannian_hessian(x, egrad, ehess, u),
        )

    def test_retraction(self):
        # Test that the result is on the manifold and that for small
        # tangent vectors it has little effect.
        x = self.manifold.random_point()
        u = self.manifold.random_tangent_vector(x)

        xretru = self.manifold.retraction(x, u)

        np_testing.assert_allclose(multitransp(xretru) @ xretru,
                                   np.eye(self.n),
                                   atol=1e-10)

        u = u * 1e-6
        xretru = self.manifold.retraction(x, u)
        np_testing.assert_allclose(xretru, x + u)

    def test_first_order_function_approximation(self):
        self.run_gradient_approximation_test()

    def test_second_order_function_approximation(self):
        self.run_hessian_approximation_test()

    # def test_norm(self):

    def test_random_point(self):
        # Just make sure that things generated are on the manifold and that
        # if you generate two they are not equal.
        X = self.manifold.random_point()
        np_testing.assert_allclose(multitransp(X) @ X,
                                   np.eye(self.n),
                                   atol=1e-10)
        Y = self.manifold.random_point()
        assert np.linalg.norm(X - Y) > 1e-6

    # def test_random_tangent_vector(self):

    # def test_transport(self):

    def test_exp_log_inverse(self):
        s = self.manifold
        x = s.random_point()
        y = s.random_point()
        u = s.log(x, y)
        z = s.exp(x, u)
        np_testing.assert_almost_equal(0, self.manifold.dist(y, z), decimal=5)

    def test_log_exp_inverse(self):
        s = self.manifold
        x = s.random_point()
        u = s.random_tangent_vector(x)
        y = s.exp(x, u)
        v = s.log(x, y)
        # Check that the manifold difference between the tangent vectors u and
        # v is 0
        np_testing.assert_almost_equal(0, self.manifold.norm(x, u - v))
コード例 #19
0
ファイル: test_grassmann.py プロジェクト: pymanopt/pymanopt
class TestMultiGrassmannManifold(ManifoldTestCase):
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 3
        self.manifold = Grassmann(m, n, k=k)

        self.projection = lambda x, u: u - x @ x.T @ u

        super().setUp()

    def test_dim(self):
        assert self.manifold.dim == self.k * (self.m * self.n - self.n**2)

    def test_typical_dist(self):
        np_testing.assert_almost_equal(self.manifold.typical_dist,
                                       np.sqrt(self.n * self.k))

    def test_dist(self):
        x = self.manifold.random_point()
        y = self.manifold.random_point()
        np_testing.assert_almost_equal(
            self.manifold.dist(x, y),
            self.manifold.norm(x, self.manifold.log(x, y)),
        )

    def test_inner_product(self):
        X = self.manifold.random_point()
        A = self.manifold.random_tangent_vector(X)
        B = self.manifold.random_tangent_vector(X)
        np_testing.assert_allclose(np.sum(A * B),
                                   self.manifold.inner_product(X, A, B))

    def test_projection(self):
        # Construct a random point X on the manifold.
        X = self.manifold.random_point()

        # Construct a vector H in the ambient space.
        H = np.random.normal(size=(self.k, self.m, self.n))

        # Compare the projections.
        Hproj = H - X @ multitransp(X) @ H
        np_testing.assert_allclose(Hproj, self.manifold.projection(X, H))

    def test_retraction(self):
        # Test that the result is on the manifold and that for small
        # tangent vectors it has little effect.
        x = self.manifold.random_point()
        u = self.manifold.random_tangent_vector(x)

        xretru = self.manifold.retraction(x, u)

        np_testing.assert_allclose(
            multitransp(xretru) @ xretru,
            multieye(self.k, self.n),
            atol=1e-10,
        )

        u = u * 1e-6
        xretru = self.manifold.retraction(x, u)
        np_testing.assert_allclose(xretru, x + u)

    def test_first_order_function_approximation(self):
        self.run_gradient_approximation_test()

    def test_second_order_function_approximation(self):
        self.run_hessian_approximation_test()

    def test_norm(self):
        x = self.manifold.random_point()
        u = self.manifold.random_tangent_vector(x)
        np_testing.assert_almost_equal(self.manifold.norm(x, u),
                                       np.linalg.norm(u))

    def test_random_point(self):
        # Just make sure that things generated are on the manifold and that
        # if you generate two they are not equal.
        X = self.manifold.random_point()
        np_testing.assert_allclose(multitransp(X) @ X,
                                   multieye(self.k, self.n),
                                   atol=1e-10)
        Y = self.manifold.random_point()
        assert np.linalg.norm(X - Y) > 1e-6

    def test_random_tangent_vector(self):
        # Make sure things generated are in tangent space and if you generate
        # two then they are not equal.
        X = self.manifold.random_point()
        U = self.manifold.random_tangent_vector(X)
        np_testing.assert_allclose(
            multisym(multitransp(X) @ U),
            np.zeros((self.k, self.n, self.n)),
            atol=1e-10,
        )
        V = self.manifold.random_tangent_vector(X)
        assert np.linalg.norm(U - V) > 1e-6

    # def test_transport(self):

    def test_exp_log_inverse(self):
        s = self.manifold
        x = s.random_point()
        y = s.random_point()
        u = s.log(x, y)
        z = s.exp(x, u)
        np_testing.assert_almost_equal(0, self.manifold.dist(y, z))

    def test_log_exp_inverse(self):
        s = self.manifold
        x = s.random_point()
        u = s.random_tangent_vector(x)
        y = s.exp(x, u)
        v = s.log(x, y)
        # Check that the manifold difference between the tangent vectors u and
        # v is 0
        np_testing.assert_almost_equal(0, self.manifold.norm(x, u - v))
コード例 #20
0
def DR_geod_complex(X, m, verbosity=0):
    """ 
    X: array of N points on Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(m, p), m < n) 
    where X_hat_i = R^T X_i, W \in St(n, m)
    minimizing the projection error (using geodesic distance)
    """
    N, n, p = X.shape
    Cgr = ComplexGrassmann(n, p, N)
    Cgr_low = Grassmann(m, p)
    Cgr_map = ComplexGrassmann(n, m) # n x m
    XXT = multiprod(X, multihconj(X))
    
    @pymanopt.function.Callable
    def cost(Q):
        tmp = np.array([np.matmul(Q, Q.T) for i in range(N)]) # N x n x n
        new_X = multiprod(tmp, X) # N x n x p
        q = np.array([qr(new_X[i])[0] for i in range(N)])
        d2 = Cgr.dist(X, q)**2
        return d2/N
    
    @pymanopt.function.Callable
    def egrad(Q):
        """
        need to be fixed
        """
        QQ = np.matmul(Q, multihconj(Q))
        tmp = np.array([QQ for i in range(N)])
        XQQX = multiprod(multiprod(multihconj(X), tmp), X)
        lam, V = np.linalg.eigh(XQQX)
        theta = np.arccos(np.sqrt(lam))
        d = -2*theta/(np.cos(theta)*np.sin(theta))
        Sig = np.array([np.diag(dd) for dd in d])
        XV = multiprod(X,V)
        eg = multiprod(XV, multiprod(Sig, multitransp(XV.conj())))
        eg = np.mean(eg, axis = 0)
        eg = np.matmul(eg, Q)
        return eg

    def egrad_num(R, eps = 1e-8+1e-8j):
        """
        compute egrad numerically
        """
        g = np.zeros(R.shape, dtype=np.complex128)
        for i in range(n):
            for j in range(m):
                R1 = R.copy()
                R2 = R.copy()
                R1[i,j] += eps
                R2[i,j] -= eps
                g[i,j] = (cost(R1) - cost(R2))/(2*eps)
        return g

    # solver = ConjugateGradient()
    solver = SteepestDescent()
    problem = Problem(manifold=Cst, cost=cost, egrad=egrad, verbosity=verbosity)
    Q_proj = solver.solve(problem)

    tmp = np.array([multihconj(Q_proj) for i in range(N)])
    X_low = multiprod(tmp, X)
    X_low = X_low/np.expand_dims(np.linalg.norm(X_low, axis=1), axis = 2)

    M_hat = compute_centroid(Cgr_low, X_low)
    v_hat = var(Cgr_low, X_low, M_hat)/N
    var_ratio = v_hat/v
    return var_ratio, X_low, Q_proj
コード例 #21
0
ファイル: PNG.py プロジェクト: cvgmi/NestedGrassmann
        scores[j, (n-p)*p-1] = v[0] # signed distance

    return scores[:,::-1]
    


if __name__ == '__main__':
    m = 6
    n = 10
    p = 2
    N = 15
    
    print(f'Example 1: {N} points on Gr({p}, {m}) embedded in Gr({p}, {n})\n') 
    
    sig = 0.1
    gr_low = Grassmann(m, p, N)
    gr = Grassmann(n, p, N)
    gr_map = Grassmann(n, m)

    X_low = gr_low.rand() # N x m x p
    A = gr_map.rand() # n x m
    #B = np.random.normal(0, 0.1, (n, p)) # n x p
    B = np.zeros((n,p))
    AAT = np.matmul(A, A.T) 
    IAATB = np.matmul(np.eye(n) - AAT, B)
    X = np.array([np.linalg.qr(np.matmul(A, X_low[i]) + IAATB)[0] for i in range(N)]) # N x n x p
    X = gr.exp(X, sig * gr.randvec(X)) # perturb the emdedded X
    
    scores = PNG(X, log = True)
    
    print('\n')
コード例 #22
0
 def test_tangent_vector_multiplication(self):
     # Regression test for https://github.com/pymanopt/pymanopt/issues/49.
     man = Product((Euclidean(12), Grassmann(12, 3)))
     x = man.rand()
     eta = man.randvec(x)
     np.float64(1.0) * eta
コード例 #23
0
class TestSingleGrassmannManifold(unittest.TestCase):
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 1
        self.man = Grassmann(m, n, k=k)

        self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u))

    def test_dist(self):
        x = self.man.rand()
        y = self.man.rand()
        np_testing.assert_almost_equal(self.man.dist(x, y),
                                       self.man.norm(x, self.man.log(x, y)))

    def test_ehess2rhess(self):
        # Test this function at some randomly generated point.
        x = self.man.rand()
        u = self.man.randvec(x)
        egrad = rnd.randn(self.m, self.n)
        ehess = rnd.randn(self.m, self.n)

        np_testing.assert_allclose(testing.ehess2rhess(self.proj)(x, egrad,
                                                                  ehess, u),
                                   self.man.ehess2rhess(x, egrad, ehess, u))

    def test_retr(self):
        # Test that the result is on the manifold and that for small
        # tangent vectors it has little effect.
        x = self.man.rand()
        u = self.man.randvec(x)

        xretru = self.man.retr(x, u)

        np_testing.assert_allclose(multiprod(multitransp(xretru), xretru),
                                   np.eye(self.n),
                                   atol=1e-10)

        u = u * 1e-6
        xretru = self.man.retr(x, u)
        np_testing.assert_allclose(xretru, x + u)

    # def test_egrad2rgrad(self):

    # def test_norm(self):

    def test_rand(self):
        # Just make sure that things generated are on the manifold and that
        # if you generate two they are not equal.
        X = self.man.rand()
        np_testing.assert_allclose(multiprod(multitransp(X), X),
                                   np.eye(self.n), atol=1e-10)
        Y = self.man.rand()
        assert la.norm(X - Y) > 1e-6

    # def test_randvec(self):

    # def test_transp(self):

    def test_exp_log_inverse(self):
        s = self.man
        x = s.rand()
        y = s.rand()
        u = s.log(x, y)
        z = s.exp(x, u)
        np_testing.assert_almost_equal(0, self.man.dist(y, z), decimal=5)

    def test_log_exp_inverse(self):
        s = self.man
        x = s.rand()
        u = s.randvec(x)
        y = s.exp(x, u)
        v = s.log(x, y)
        # Check that the manifold difference between the tangent vectors u and
        # v is 0
        np_testing.assert_almost_equal(0, self.man.norm(x, u - v))
コード例 #24
0
ファイル: base_lin.py プロジェクト: kqh2621990/NDDAV
def findSingleLP(X, d, k, sigma, embMethod='lpp'):
    D, N = X.shape
    W = np.zeros((N, N))
    B = np.zeros((N, N))

    if embMethod == 'pca':
        for i in range(N - 1):
            for j in range(i + 1, N):
                W[i, j] = 1.0 / N
        W = 0.5 * (W + W.T)
        B = np.eye(N)
        L = B - W
        M1 = X.dot(L).dot(X.T)
        Mc = np.eye(M1.shape[0])
    elif embMethod == 'lpp':
        G = kneighbors_graph(X.T, k, mode='distance',
                             include_self=False).toarray()
        W = 0.5 * (G + G.T)
        W[W != 0] = np.exp(-W[W != 0] / (2 * sigma * sigma))
        B = np.diag(np.sum(W, axis=0))
        L = B - W
        M1 = X.dot(L).dot(X.T)
        Mc = X.dot(B).dot(X.T)
    elif embMethod == 'rand':
        Gnk = Grassmann(D, 2)
        proj = Gnk.rand()
        return [proj]
    elif embMethod == 'syn':
        proj = np.zeros((D, 2))
        card = 2
        #ids = np.arange(D)
        ids = np.array([1, 0, 4, 3])  # For ecoli 2
        #ids = np.array([2,7,3,0]) # For yeast 2
        #ids = np.array([12, 39,  5,  0, 45, 43]) # For seaWater 3
        #ids = np.array([0, 46,  5, 14, 11, 40, 49, 43]) # For seaWater 4
        np.random.shuffle(ids)
        #print ids
        proj[ids[:card], 0] = 1 / np.sqrt(card)
        proj[ids[card:2 * card], 1] = 1 / np.sqrt(card)
        #proj[ids[card-1:2*card-1],1] = 1/np.sqrt(card) # For cities
        return [proj]

    u, s = eig(M1)
    if np.min(u) < 0:
        M1 = M1 - np.min(u) * np.eye(M1.shape[0])

    u, s = eig(Mc)
    if np.min(u) < 0:
        Mc = Mc - np.min(u) * np.eye(Mc.shape[0])

    eigvals, eigvecs = eig(M1, Mc)

    eigvecs = np.dot(sp.linalg.sqrtm(Mc), eigvecs)

    if embMethod == 'pca':
        ind = np.argsort(-eigvals)
        proj = eigvecs[:, ind[0:d]]
    elif embMethod == 'lpp':
        ind = np.argsort(eigvals)
        proj = eigvecs[:, ind[0:d]]

    return [proj]
コード例 #25
0
def RidgeAlternating(X,
                     f,
                     U0,
                     degree=1,
                     maxiter=100,
                     tol=1e-10,
                     history=False,
                     disp=False,
                     gtol=1e-6,
                     inner_iter=20):
    if len(f.shape) == 1:
        f = f.reshape(-1, 1)

    # Instantiate the polynomial approximation
    rs = PolynomialApproximation(N=degree)

    # Instantiate the Grassmann manifold
    m, n = U0.shape
    manifold = Grassmann(m, n)

    if history:
        hist = {}
        hist['U'] = []
        hist['residual'] = []
        hist['inner_steps'] = []

    # Alternating minimization
    i = 0
    res = 1e9
    while i < maxiter and res > tol:

        # Train the polynomial approximation with projected points
        Y = np.dot(X, U0)
        rs.train(Y, f)

        # Minimize residual with polynomial over Grassmann
        func = lambda y: _res(y, X, f, rs)
        grad = lambda y: _dres(y, X, f, rs)

        problem = Problem(manifold=manifold,
                          cost=func,
                          egrad=grad,
                          verbosity=0)
        if history:
            solver = SteepestDescent(logverbosity=1,
                                     mingradnorm=gtol,
                                     maxiter=inner_iter,
                                     minstepsize=tol)
            U1, log = solver.solve(problem, x=U0)
        else:
            solver = SteepestDescent(logverbosity=0,
                                     mingradnorm=gtol,
                                     maxiter=inner_iter,
                                     minstepsize=tol)
            U1 = solver.solve(problem, x=U0)

        # Evaluate and store the residual
        res = func(U1)  # This is the squared mismatch
        if history:
            hist['U'].append(U1)
            # To match the rest of code, we define the residual as the mismatch
            r = (f - rs.predict(Y)[0]).flatten()
            hist['residual'].append(r)
            hist['inner_steps'].append(log['final_values']['iterations'])
        if disp:
            print "iter %3d\t |r| : %10.10e" % (i, np.linalg.norm(res))
        # Update iterators
        U0 = U1
        i += 1

    # Store data
    if i == maxiter:
        exitflag = 1
    else:
        exitflag = 0

    if history:
        return U0, hist
    else:
        return U0
def spatial_envelope(X_env, Y_env, si, theta, u, thershold):
    #const:
    #n,p,r
    #H,G
    #beta_MLE
    #si: matrix of loacation
    n, p, r = X_env.shape[0], X_env.shape[1], Y_env.shape[1]
    H = Y_env - np.kron(
        np.mean(Y_env, axis=0).reshape(1, r),
        np.repeat(1, n).reshape(n, 1))
    G = X_env - np.kron(
        np.mean(X_env, axis=0).reshape(1, p),
        np.repeat(1, n).reshape(n, 1))

    linear_model = LinearRegression().fit(X_env, Y_env)
    err = Y_env - linear_model.predict(X_env)
    beta_MLE = linear_model.coef_

    # changable
    # judge, iter_count, err_count, theta
    # V0_hat, V1_hat, PV0_hat, PV1_hat
    # Sigma_Y, Sigma_res
    # rho_h_theta
    Sigma_res = np.cov(err.T)
    Sigma_Y = np.cov(Y_env.T)

    judge = True
    iter_count = 0
    cal_thershold, min_thershold = thershold, thershold
    prot = 1
    err_count = 0
    while judge:
        try:
            if err_count > 10:
                print("Start new")
                theta = np.random.uniform(0, 0.5, 2)
                err_count = 0
            # Step1: Optimize on Gamma to get V0,V1,PV0,PV1
            def cost(Gamma):
                X = np.matmul(Gamma, Gamma.T)
                out = -np.log(
                    np.linalg.det(
                        np.matmul(np.matmul(X, Sigma_res), X) +
                        np.matmul(np.matmul(np.eye(r) - X, Sigma_Y),
                                  np.eye(r) - X)))
                return (np.array(out))

            manifold = Grassmann(r, u)
            #manifold = Stiefel(r,u)
            problem = Problem(manifold=manifold, cost=cost, verbosity=0)
            solver = SteepestDescent()
            Gamma = solver.solve(problem)
            PV1_hat = np.matmul(Gamma, Gamma.T)
            PV0_hat = np.eye(r) - PV1_hat

            V1_hat = np.matmul(np.matmul(PV1_hat, Sigma_Y), PV1_hat)
            V0_hat = np.matmul(np.matmul(PV1_hat, Sigma_res), PV1_hat)

            # Step2: Optimize on theta
            def theta_fun(theta):
                rho_h_theta = np.array(rho(si, theta))
                item1 = np.matmul(
                    sqrtm(np.linalg.inv(rho_h_theta).real).real, G)
                project = lambda x: np.eye(n) - np.matmul(
                    np.matmul(x,
                              np.linalg.inv(np.matmul(x.T, x)).real), x.T)
                item2 = np.matmul(
                    np.matmul(project(item1),
                              sqrtm(np.linalg.inv(rho_h_theta).real).real), H)
                item3 = np.matmul(
                    np.matmul(item2,
                              np.linalg.pinv(V1_hat).real), item2.T)
                item4 = np.matmul(
                    sqrtm(np.linalg.inv(rho_h_theta).real).real, H)
                item5 = np.matmul(
                    np.matmul(item4,
                              np.linalg.pinv(V0_hat).real), item4.T)
                loss = r * np.linalg.det(rho_h_theta) + 0.5 * np.trace(item3 +
                                                                       item5)
                return (loss)
#             print("Theta: {}".format(theta))

            opt_res = minimize(theta_fun, theta, method="BFGS")
            #             print("Pass")
            weight = max(min(1, 1 / cal_thershold),
                         (thershold / prot)**(1 - 1 / (iter_count + 1)))
            theta_opt = np.abs(np.array(opt_res.x))
            theta_new = (1 - weight) * theta + weight * theta_opt
            theta = theta_new
            #             theta = np.array(opt_res.x)
            # Step3 update Sigma_Y, Sigma_Res based on theta
            rho_h_theta = np.array(rho(si, theta))
            term1 = np.matmul(np.matmul(H.T,
                                        np.linalg.inv(rho_h_theta).real), H)
            term2 = np.matmul(np.matmul(G.T,
                                        np.linalg.inv(rho_h_theta).real), H)
            term3 = np.matmul(np.matmul(G.T,
                                        np.linalg.inv(rho_h_theta).real), G)

            Sigma_Y = term1
            Sigma_res = term1 - np.matmul(
                np.matmul(term2.T,
                          np.linalg.inv(term3).real), term2)

            if iter_count == 0:
                iter_count += 1
                oldV0_hat, oldV1_hat, old_theta = V0_hat, V1_hat, theta
                continue
#             print("Before thershold")
            cal_thershold = np.sum((oldV1_hat - V1_hat)**2) + np.sum(
                (oldV0_hat - V0_hat)**2) + np.sum((old_theta - theta)**2)
            print("Gap: {}, Theta: {}, weight: {}".format(
                cal_thershold, theta, weight))
            if cal_thershold < thershold:
                judge = False
                min_thershold = min(min_thershold, cal_thershold)
                prot = cal_thershold / min_thershold

            oldV0_hat, oldV1_hat, old_theta = V0_hat, V1_hat, theta
            iter_count += 1
        except:
            err_count += 1
            theta = theta + np.array([
                randint(-10, 10) * thershold * prot,
                randint(-10, 10) * thershold * prot
            ])
            X_env = X_env + np.random.normal(0, 1e-6, n * p).reshape(n, p)
            Y_env = Y_env + np.random.normal(0, 1e-6, n * r).reshape(n, r)
            continue

    beta_final = np.matmul(PV1_hat, beta_MLE)
    Y_bar = np.mean(Y_env, axis=0)
    X_bar = np.mean(X_env, axis=0)
    alpha_final = Y_bar - np.matmul(X_bar, beta_final.T)
    output = (alpha_final.reshape(1, r), beta_final.reshape(p, r))
    #     print("stop, iter = {}".format(iter_count+err_count))

    return (output)
コード例 #27
0
class TestMultiGrassmannManifold(unittest.TestCase):
    def setUp(self):
        self.m = m = 5
        self.n = n = 2
        self.k = k = 3
        self.man = Grassmann(m, n, k=k)

        self.proj = lambda x, u: u - npa.dot(x, npa.dot(x.T, u))

    def test_dim(self):
        assert self.man.dim == self.k * (self.m * self.n - self.n ** 2)

    def test_typicaldist(self):
        np_testing.assert_almost_equal(self.man.typicaldist,
                                       np.sqrt(self.n * self.k))

    def test_dist(self):
        x = self.man.rand()
        y = self.man.rand()
        np_testing.assert_almost_equal(self.man.dist(x, y),
                                       self.man.norm(x, self.man.log(x, y)))

    def test_inner(self):
        X = self.man.rand()
        A = self.man.randvec(X)
        B = self.man.randvec(X)
        np_testing.assert_allclose(np.sum(A * B), self.man.inner(X, A, B))

    def test_proj(self):
        # Construct a random point X on the manifold.
        X = self.man.rand()

        # Construct a vector H in the ambient space.
        H = rnd.randn(self.k, self.m, self.n)

        # Compare the projections.
        Hproj = H - multiprod(X, multiprod(multitransp(X), H))
        np_testing.assert_allclose(Hproj, self.man.proj(X, H))

    def test_retr(self):
        # Test that the result is on the manifold and that for small
        # tangent vectors it has little effect.
        x = self.man.rand()
        u = self.man.randvec(x)

        xretru = self.man.retr(x, u)

        np_testing.assert_allclose(multiprod(multitransp(xretru), xretru),
                                   multieye(self.k, self.n),
                                   atol=1e-10)

        u = u * 1e-6
        xretru = self.man.retr(x, u)
        np_testing.assert_allclose(xretru, x + u)

    # def test_egrad2rgrad(self):

    def test_norm(self):
        x = self.man.rand()
        u = self.man.randvec(x)
        np_testing.assert_almost_equal(self.man.norm(x, u), la.norm(u))

    def test_rand(self):
        # Just make sure that things generated are on the manifold and that
        # if you generate two they are not equal.
        X = self.man.rand()
        np_testing.assert_allclose(multiprod(multitransp(X), X),
                                   multieye(self.k, self.n), atol=1e-10)
        Y = self.man.rand()
        assert la.norm(X - Y) > 1e-6

    def test_randvec(self):
        # Make sure things generated are in tangent space and if you generate
        # two then they are not equal.
        X = self.man.rand()
        U = self.man.randvec(X)
        np_testing.assert_allclose(multisym(multiprod(multitransp(X), U)),
                                   np.zeros((self.k, self.n, self.n)),
                                   atol=1e-10)
        V = self.man.randvec(X)
        assert la.norm(U - V) > 1e-6

    # def test_transp(self):

    def test_exp_log_inverse(self):
        s = self.man
        x = s.rand()
        y = s.rand()
        u = s.log(x, y)
        z = s.exp(x, u)
        np_testing.assert_almost_equal(0, self.man.dist(y, z))

    def test_log_exp_inverse(self):
        s = self.man
        x = s.rand()
        u = s.randvec(x)
        y = s.exp(x, u)
        v = s.log(x, y)
        # Check that the manifold difference between the tangent vectors u and
        # v is 0
        np_testing.assert_almost_equal(0, self.man.norm(x, u - v))
コード例 #28
0
ファイル: PNG.py プロジェクト: cvgmi/NestedGrassmann
def PNG(X, log=True, verbosity = 1):
    # Assuming X consists of N points on Gr(p, n), p < n
    # options:
    #     log: print projection info
    # return an N x p(n-p) score array
    
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued
    scores = np.zeros((N,int(p*(n-p))), dtype = X.dtype)
    scores[:] = np.NaN
    X_old = X.copy()
    
    # Gr(p, n) -> Gr(p, n-1) -> ... -> Gr(p, p+1)
    for i in range(n-1, p, -1):
        if log:
            print(f'Gr({p}, {i+1}) -> Gr({p}, {i})')
        
        #X_new, A, B = NG_dr(X_old, i, verbosity) 
        X_new, A, A_perp, b = NG_dr1(X_old, verbosity) 
        #A_perp = ortho_complement(A)[:,0]
        AAT = np.matmul(A, A.conj().T) 
        #IAATB = np.matmul(np.eye(X_old.shape[1]) - AAT, B)
        A_perpBT = np.matmul(A_perp, b.T)
        #X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + IAATB)[0] for i in range(N)])
        X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + A_perpBT)[0] for i in range(N)])
            
        # compute scores
        if cpx:
            gr = ComplexGrassmann(X_old.shape[1], X_old.shape[2])
        else:    
            gr = Grassmann(X_old.shape[1], X_old.shape[2])
        
        for j in range(N):
            scores[j,((n-i-1)*p):(n-i)*p] = gr.dist(X_old[j], X_new_embedded[j]) * \
            np.matmul(X_old[j].conj().transpose(), A_perp)[:,0]

        X_old = X_new
        
    if p > 1:
        
        # Gr(p, p+1) -> Gr(1, p+1)
        X_new = np.zeros((N, p+1, 1), dtype=X_old.dtype)
        if log:
            print(f'Gr({p}, {p+1}) -> Gr(1, {p+1})')
        for i in range(N):
            X_new[i] = ortho_complement(X_old[i])

        X_old = X_new

        # Gr(1, p+1) -> ... -> Gr(1,2)
        for i in range(p, 1, -1):
            if log:
                print(f'Gr(1, {i+1}) -> Gr(1, {i})')   

            #X_new, A, B = NG_dr(X_old, i, verbosity)
            X_new, A, A_perp, b = NG_dr1(X_old, verbosity)
            #A_perp = ortho_complement(A)[:,0]
            AAT = np.matmul(A, A.conj().T) 
            #IAATB = np.matmul(np.eye(X_old.shape[1]) - AAT, B)
            A_perpBT = np.matmul(A_perp, b.T)
            #X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + IAATB)[0] for i in range(N)])
            X_new_embedded = np.array([np.linalg.qr(np.matmul(A, X_new[i]) + A_perpBT)[0] for i in range(N)])

            # compute scores
            if cpx:              
                gr = ComplexGrassmann(X_old.shape[1], X_old.shape[2])
            else:
                gr = Grassmann(X_old.shape[1], X_old.shape[2])
            
            for j in range(N):
                scores[j,(n-p)*p-i] = gr.dist(X_old[j], X_new_embedded[j]) * \
                np.matmul(X_old[j].conj().transpose(), A_perp)

            X_old = X_new
    
    # Gr(1,2) -> NGM
    if log:
        print('Gr(1, 2) -> NGM')
    
    if cpx:
        gr = ComplexGrassmann(2,1)
    else:
        gr = Grassmann(2,1)
        
    NGM = compute_centroid(gr, X_new)
    v_0 = gr.log(NGM, X_new[0])
    v_0 = v_0/np.linalg.norm(v_0)
    for j in range(N):
        v = gr.log(NGM, X_new[j])/v_0
        scores[j, (n-p)*p-1] = v[0] # signed distance

    return scores[:,::-1]