예제 #1
0
파일: pca.py 프로젝트: agoel00/MBAweb
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    dimension = 3
    num_samples = 200
    num_components = 2
    samples = np.random.randn(num_samples, dimension) @ np.diag([3, 2, 1])
    samples -= samples.mean(axis=0)

    cost, egrad, ehess = create_cost_egrad_ehess(backend, samples,
                                                 num_components)
    manifold = Stiefel(dimension, num_components)
    problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    # from pymanopt.solvers import ConjugateGradient
    # solver = ConjugateGradient()
    estimated_span_matrix = solver.solve(problem)

    if quiet:
        return

    estimated_projector = estimated_span_matrix @ estimated_span_matrix.T

    eigenvalues, eigenvectors = np.linalg.eig(samples.T @ samples)
    indices = np.argsort(eigenvalues)[::-1][:num_components]
    span_matrix = eigenvectors[:, indices]
    projector = span_matrix @ span_matrix.T

    print(
        "Frobenius norm error between estimated and closed-form projection "
        "matrix:", np.linalg.norm(projector - estimated_projector))
예제 #2
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    m = 5
    n = 8
    matrix = np.random.normal(size=(m, n))

    manifold = Oblique(m, n)
    cost, euclidean_gradient = create_cost_and_derivates(
        manifold, matrix, backend)
    problem = pymanopt.Problem(manifold,
                               cost,
                               euclidean_gradient=euclidean_gradient)

    optimizer = ConjugateGradient(verbosity=2 * int(not quiet),
                                  beta_rule="FletcherReeves")
    Xopt = optimizer.run(problem).point

    if quiet:
        return

    # Calculate the actual solution by normalizing the columns of matrix.
    X = matrix / np.linalg.norm(matrix, axis=0)[np.newaxis, :]

    # Print information about the solution.
    print("Solution found:", np.allclose(X, Xopt, rtol=1e-3))
    print("Frobenius-error:", np.linalg.norm(X - Xopt))
예제 #3
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 128
    subspace_dimension = 3
    matrix = np.random.normal(size=(
        num_rows, num_rows)) + 1j * np.random.normal(size=(num_rows, num_rows))
    matrix = 0.5 * (matrix + matrix.T.conj())

    manifold = ComplexGrassmann(num_rows, subspace_dimension)
    cost, euclidean_gradient, euclidean_hessian = create_cost_and_derivates(
        manifold, matrix, backend)
    problem = pymanopt.Problem(
        manifold,
        cost,
        euclidean_gradient=euclidean_gradient,
        euclidean_hessian=euclidean_hessian,
    )

    optimizer = TrustRegions(verbosity=2 * int(not quiet))
    estimated_spanning_set = optimizer.run(problem,
                                           Delta_bar=8 *
                                           np.sqrt(subspace_dimension)).point

    if quiet:
        return

    eigenvalues, eigenvectors = np.linalg.eig(matrix)
    column_indices = np.argsort(eigenvalues)[-subspace_dimension:]
    spanning_set = eigenvectors[:, column_indices]
    print(
        "Geodesic distance between true and estimated dominant complex "
        "subspace:",
        manifold.dist(spanning_set, estimated_spanning_set),
    )
예제 #4
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 1000
    rank = 5
    low_rank_factor = np.random.normal(size=(num_rows, rank))
    matrix = low_rank_factor @ low_rank_factor.T

    manifold = PSDFixedRank(num_rows, rank)
    cost, euclidean_gradient, euclidean_hessian = create_cost_and_derivates(
        manifold, matrix, backend
    )
    problem = pymanopt.Problem(
        manifold,
        cost,
        euclidean_gradient=euclidean_gradient,
        euclidean_hessian=euclidean_hessian,
    )

    optimizer = TrustRegions(
        max_iterations=500, min_step_size=1e-6, verbosity=2 * int(not quiet)
    )
    low_rank_factor_estimate = optimizer.run(problem).point

    if quiet:
        return

    print("Rank of target matrix:", np.linalg.matrix_rank(matrix))
    matrix_estimate = low_rank_factor_estimate @ low_rank_factor_estimate.T
    print(
        "Frobenius norm error of low-rank estimate:",
        np.linalg.norm(matrix - matrix_estimate),
    )
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_samples, num_weights = 200, 3

    solver = TrustRegions()
    manifold = Euclidean(3)

    for k in range(5):
        samples = rnd.randn(num_samples, num_weights)
        targets = rnd.randn(num_samples)

        cost, egrad, ehess = create_cost_egrad_ehess(backend, samples, targets)
        problem = pymanopt.Problem(manifold,
                                   cost,
                                   egrad=egrad,
                                   ehess=ehess,
                                   verbosity=0)

        estimated_weights = solver.solve(problem)
        if not quiet:
            print("Run {}".format(k + 1))
            print("Weights found by pymanopt (top) / "
                  "closed form solution (bottom)")
            print(estimated_weights)
            print(la.pinv(samples) @ targets)
            print("")
예제 #6
0
def compute_centroid(manifold, points):
    """Compute the centroid of `points` on the `manifold` as Karcher mean."""
    num_points = len(points)

    @pymanopt.function.Callable
    def objective(y):
        accumulator = 0
        for i in range(num_points):
            accumulator += manifold.dist(y, points[i]) ** 2
        return accumulator / 2

    @pymanopt.function.Callable
    def gradient(y):
        g = manifold.zerovec(y)
        g = g.astype(points.dtype)
        for i in range(num_points):
            g -= manifold.log(y, points[i])
        return g

    # XXX: Manopt runs a few TR iterations here. For us to do this, we either
    #      need to work out the Hessian of the Karcher mean by hand or
    #      implement approximations for the Hessian to use in the TR solver as
    #      Manopt. This is because we cannot implement the Karcher mean with
    #      Theano, say, and compute the Hessian automatically due to dependence
    #      on the manifold-dependent distance function, which is written in
    #      numpy.
    solver = SteepestDescent(maxiter=15)
    problem = pymanopt.Problem(manifold, objective, grad=gradient, verbosity=0)
    return solver.solve(problem)
예제 #7
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    n = 128
    matrix = rnd.randn(n, n)
    matrix = 0.5 * (matrix + matrix.T)

    cost, egrad = create_cost_egrad(backend, matrix)
    manifold = Sphere(n)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    solver = SteepestDescent()
    estimated_dominant_eigenvector = solver.solve(problem)

    if quiet:
        return

    # Calculate the actual solution by a conventional eigenvalue decomposition.
    eigenvalues, eigenvectors = la.eig(matrix)
    dominant_eigenvector = eigenvectors[:, np.argmax(eigenvalues)]

    # Make sure both vectors have the same direction. Both are valid
    # eigenvectors, but for comparison we need to get rid of the sign
    # ambiguity.
    if (np.sign(dominant_eigenvector[0]) != np.sign(
            estimated_dominant_eigenvector[0])):
        estimated_dominant_eigenvector = -estimated_dominant_eigenvector

    # Print information about the solution.
    print("l2-norm of x: %f" % la.norm(dominant_eigenvector))
    print("l2-norm of xopt: %f" % la.norm(estimated_dominant_eigenvector))
    print("Solution found: %s" % np.allclose(
        dominant_eigenvector, estimated_dominant_eigenvector, rtol=1e-3))
    error_norm = la.norm(dominant_eigenvector - estimated_dominant_eigenvector)
    print("l2-error: %f" % error_norm)
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    m, n, rank = 5, 4, 2
    matrix = rnd.randn(m, n)

    cost, egrad = create_cost_egrad(backend, matrix, rank)
    manifold = FixedRankEmbedded(m, n, rank)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    solver = ConjugateGradient()
    left_singular_vectors, singular_values, right_singular_vectors = \
        solver.solve(problem)
    low_rank_approximation = (left_singular_vectors @ np.diag(singular_values)
                              @ right_singular_vectors)

    if not quiet:
        u, s, vt = la.svd(matrix, full_matrices=False)
        indices = np.argsort(s)[-rank:]
        low_rank_solution = (
            u[:, indices] @ np.diag(s[indices]) @ vt[indices, :])
        print("Analytic low-rank solution:")
        print()
        print(low_rank_solution)
        print()
        print("Rank-{} approximation:".format(rank))
        print()
        print(low_rank_approximation)
        print()
        print("Frobenius norm error:",
              la.norm(low_rank_approximation - low_rank_solution))
        print()
예제 #9
0
    def test_complex_cost_problem(self):
        # Solve the dominant invariant complex subspace problem.
        num_rows = 32
        subspace_dimension = 3
        matrix = np.random.normal(
            size=(num_rows,
                  num_rows)) + 1j * np.random.normal(size=(num_rows, num_rows))
        matrix = 0.5 * (matrix + matrix.T.conj())

        manifold = pymanopt.manifolds.ComplexGrassmann(num_rows,
                                                       subspace_dimension)

        @pymanopt.function.autograd(manifold)
        def cost(X):
            return -np.real(np.trace(np.conj(X.T) @ matrix @ X))

        problem = pymanopt.Problem(manifold, cost)
        optimizer = ConjugateGradient(verbosity=0)
        estimated_spanning_set = optimizer.run(problem).point

        # True solution.
        eigenvalues, eigenvectors = np.linalg.eig(matrix)
        column_indices = np.argsort(eigenvalues)[-subspace_dimension:]
        spanning_set = eigenvectors[:, column_indices]
        np_testing.assert_allclose(manifold.dist(spanning_set,
                                                 estimated_spanning_set),
                                   0,
                                   atol=1e-6)
예제 #10
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 10
    rank = 3
    matrix = np.random.normal(size=(num_rows, num_rows))
    matrix = 0.5 * (matrix + matrix.T)

    # Solve the problem with pymanopt.
    manifold = Oblique(rank, num_rows)
    cost, euclidean_gradient, euclidean_hessian = create_cost_and_derivates(
        manifold, matrix, backend
    )
    problem = pymanopt.Problem(
        manifold,
        cost,
        euclidean_gradient=euclidean_gradient,
        euclidean_hessian=euclidean_hessian,
    )

    optimizer = TrustRegions(verbosity=2 * int(not quiet))
    X = optimizer.run(problem).point

    if quiet:
        return

    C = X.T @ X
    print("Diagonal elements:", np.diag(C))
    print("Eigenvalues:", np.sort(np.linalg.eig(C)[0].real)[::-1])
예제 #11
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    dimension = 3  # Dimension of the embedding space, i.e. R^k
    num_points = 24  # Points on the sphere
    # This value should be as close to 0 as affordable. If it is too close to
    # zero, optimization first becomes much slower, than simply doesn't work
    # anymore because of floating point overflow errors (NaN's and Inf's start
    # to appear). If it is too large, then log-sum-exp is a poor approximation
    # of the max function, and the spread will be less uniform. An okay value
    # seems to be 0.01 or 0.001 for example. Note that a better strategy than
    # using a small epsilon straightaway is to reduce epsilon bit by bit and to
    # warm-start subsequent optimization in that way. Trustregions will be more
    # appropriate for these fine tunings.
    epsilon = 0.0015

    cost = create_cost(backend, dimension, num_points, epsilon)
    manifold = Elliptope(num_points, dimension)
    problem = pymanopt.Problem(manifold, cost)
    if quiet:
        problem.verbosity = 0

    solver = ConjugateGradient(mingradnorm=1e-8, maxiter=1e5)
    Yopt = solver.solve(problem)

    if quiet:
        return

    Xopt = Yopt @ Yopt.T
    maxdot = np.triu(Xopt, 1).max()
    print("Maximum angle between any two points:", maxdot)
예제 #12
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_samples, num_weights = 200, 3

    optimizer = TrustRegions(verbosity=0)
    manifold = Euclidean(3)

    for k in range(5):
        samples = np.random.normal(size=(num_samples, num_weights))
        targets = np.random.normal(size=num_samples)

        (
            cost,
            euclidean_gradient,
            euclidean_hessian,
        ) = create_cost_and_derivates(manifold, samples, targets, backend)
        problem = pymanopt.Problem(
            manifold,
            cost,
            euclidean_gradient=euclidean_gradient,
            euclidean_hessian=euclidean_hessian,
        )

        estimated_weights = optimizer.run(problem).point
        if not quiet:
            print(f"Run {k + 1}")
            print(
                "Weights found by pymanopt (top) / "
                "closed form solution (bottom)"
            )
            print(estimated_weights)
            print(np.linalg.pinv(samples) @ targets)
            print("")
예제 #13
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    """This example generates a random 128 x 128 symmetric matrix and finds the
    dominant invariant 3 dimensional subspace for this matrix, i.e., it finds
    the subspace spanned by the three eigenvectors with the largest
    eigenvalues.
    """
    num_rows = 128
    subspace_dimension = 3
    matrix = rnd.randn(num_rows, num_rows)
    matrix = 0.5 * (matrix + matrix.T)

    cost, egrad, ehess = create_cost_egrad_ehess(
        backend, matrix, subspace_dimension)
    manifold = Grassmann(num_rows, subspace_dimension)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    estimated_spanning_set = solver.solve(
        problem, Delta_bar=8*np.sqrt(subspace_dimension))

    if quiet:
        return

    eigenvalues, eigenvectors = la.eig(matrix)
    column_indices = np.argsort(eigenvalues)[-subspace_dimension:]
    spanning_set = eigenvectors[:, column_indices]
    print("Geodesic distance between true and estimated dominant subspace:",
          manifold.dist(spanning_set, estimated_spanning_set))
예제 #14
0
def run(quiet=True):
    dimension = 3
    num_samples = 200
    num_components = 2
    samples = np.random.randn(num_samples, dimension) @ np.diag([3, 2, 1])
    samples -= samples.mean(axis=0)
    samples_ = torch.from_numpy(samples)

    @pymanopt.function.PyTorch
    def cost(w):
        projector = torch.matmul(w, torch.transpose(w, 1, 0))
        return torch.norm(samples_ - torch.matmul(samples_, projector)) ** 2

    manifold = Stiefel(dimension, num_components)
    problem = pymanopt.Problem(manifold, cost, egrad=None, ehess=None)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    # from pymanopt.solvers import ConjugateGradient
    # solver = ConjugateGradient()
    estimated_span_matrix = solver.solve(problem)

    if quiet:
        return

    estimated_projector = estimated_span_matrix @ estimated_span_matrix.T

    eigenvalues, eigenvectors = np.linalg.eig(samples.T @ samples)
    indices = np.argsort(eigenvalues)[::-1][:num_components]
    span_matrix = eigenvectors[:, indices]
    projector = span_matrix @ span_matrix.T

    print("Frobenius norm error between estimated and closed-form projection "
          "matrix:", np.linalg.norm(projector - estimated_projector))
예제 #15
0
 def test_attribute_override(self):
     problem = pymanopt.Problem(self.man, self.cost)
     with self.assertRaises(ValueError):
         problem.verbosity = "0"
     with self.assertRaises(ValueError):
         problem.verbosity = -1
     problem.verbosity = 2
     with self.assertRaises(AttributeError):
         problem.manifold = None
예제 #16
0
    def setUp(self):
        n = 32
        matrix = np.random.normal(size=(n, n))
        self.manifold = manifold = Sphere(n)
        self.max_iterations = 50

        @pymanopt.function.autograd(manifold)
        def cost(point):
            return -point.T @ matrix @ point

        self.problem = pymanopt.Problem(manifold, cost)
예제 #17
0
    def test_vararg_cost_on_product(self):
        shape = (3, 3)
        manifold = Product([Stiefel(*shape)] * 2)

        @pymanopt.function.tensorflow(manifold)
        def cost(*args):
            X, Y = args
            return tf.reduce_sum(X) + tf.reduce_sum(Y)

        problem = pymanopt.Problem(manifold, cost)
        optimizer = TrustRegions(max_iterations=1)
        Xopt, Yopt = optimizer.run(problem).point
        self.assertEqual(Xopt.shape, (3, 3))
        self.assertEqual(Yopt.shape, (3, 3))
예제 #18
0
def cp_mds_reg(X, D, lam=1.0, v=1, maxiter=1000):
    """Version of MDS in which "signs" are also an optimization parameter.

    Rather than performing a full optimization and then resetting the
    sign matrix, here we treat the signs as a parameter `A = [a_ij]` and
    minimize the cost function
        F(X,A) = ||W*(X^H(A*X) - cos(D))||^2 + lambda*||A - X^HX/|X^HX| ||^2
    Lambda is a regularization parameter we can experiment with. The
    collection of data, `X`, is treated as a point on the `Oblique`
    manifold, consisting of `k*n` matrices with unit-norm columns. Since
    we are working on a sphere in complex space we require `k` to be
    even. The first `k/2` entries of each column are the real components
    and the last `k/2` entries are the imaginary parts.

    Parameters
    ----------
    X : ndarray (k, n)
        Initial guess for data.
    D : ndarray (k, k)
        Goal distance matrix.
    lam : float, optional
        Weight to give regularization term.
    v : int, optional
        Verbosity

    Returns
    -------
    X_opt : ndarray (k, n)
        Collection of points optimizing cost.

    """

    dim = X.shape[0]
    num_points = X.shape[1]
    W = distance_to_weights(D)
    Sreal, Simag = norm_rotations(X)
    A = np.vstack(
        (np.reshape(Sreal,
                    (1, num_points**2)), np.reshape(Simag, num_points**2)))
    cp_manifold = Oblique(dim, num_points)
    a_manifold = Oblique(2, num_points**2)
    manifold = Product((cp_manifold, a_manifold))
    solver = ConjugateGradient(maxiter=maxiter, maxtime=float('inf'))
    cost = setup_reg_autograd_cost(D, int(dim / 2), num_points, lam=lam)
    problem = pymanopt.Problem(cost=cost, manifold=manifold)
    Xopt, Aopt = solver.solve(problem, x=(X, A))
    Areal = np.reshape(Aopt[0, :], (num_points, num_points))
    Aimag = np.reshape(Aopt[1, :], (num_points, num_points))
    return Xopt, Areal, Aimag
예제 #19
0
    def setUp(self):
        n = 32
        matrix = np.random.normal(size=(n, n))
        matrix = 0.5 * (matrix + matrix.T)

        eigenvalues, eigenvectors = np.linalg.eig(matrix)
        self.dominant_eigenvector = eigenvectors[:, np.argmax(eigenvalues)]

        self.manifold = manifold = pymanopt.manifolds.Sphere(n)

        @pymanopt.function.autograd(manifold)
        def cost(point):
            return -point.T @ matrix @ point

        self.problem = pymanopt.Problem(manifold, cost)
예제 #20
0
    def setUp(self):
        self.m = m = 20
        self.n = n = 10
        self.rank = rank = 3

        A = np.random.randn(m, n)

        @pymanopt.function.Autograd
        def cost(u, s, vt, x):
            return np.linalg.norm(((u * s) @ vt - A) @ x)**2

        self.cost = cost
        self.gradient = self.cost.compute_gradient()
        self.hvp = self.cost.compute_hessian_vector_product()

        self.manifold = Product([FixedRankEmbedded(m, n, rank), Euclidean(n)])
        self.problem = pymanopt.Problem(self.manifold, self.cost)
예제 #21
0
    def setUp(self):
        self.m = m = 20
        self.n = n = 10
        self.rank = rank = 3

        A = np.random.normal(size=(m, n))
        self.manifold = Product([FixedRankEmbedded(m, n, rank), Euclidean(n)])

        @pymanopt.function.autograd(self.manifold)
        def cost(u, s, vt, x):
            return np.linalg.norm(((u * s) @ vt - A) @ x) ** 2

        self.cost = cost
        self.gradient = self.cost.get_gradient_operator()
        self.hessian = self.cost.get_hessian_operator()

        self.problem = pymanopt.Problem(self.manifold, self.cost)
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    m, n, rank = 5, 4, 2
    matrix = np.random.normal(size=(m, n))

    manifold = FixedRankEmbedded(m, n, rank)
    cost, euclidean_gradient = create_cost_and_derivates(
        manifold, matrix, backend
    )
    problem = pymanopt.Problem(
        manifold, cost, euclidean_gradient=euclidean_gradient
    )

    optimizer = ConjugateGradient(
        verbosity=2 * int(not quiet), beta_rule="PolakRibiere"
    )
    (
        left_singular_vectors,
        singular_values,
        right_singular_vectors,
    ) = optimizer.run(problem).point
    low_rank_approximation = (
        left_singular_vectors
        @ np.diag(singular_values)
        @ right_singular_vectors
    )

    if not quiet:
        u, s, vt = np.linalg.svd(matrix, full_matrices=False)
        indices = np.argsort(s)[-rank:]
        low_rank_solution = (
            u[:, indices] @ np.diag(s[indices]) @ vt[indices, :]
        )
        print("Analytic low-rank solution:")
        print()
        print(low_rank_solution)
        print()
        print(f"Rank-{rank} approximation:")
        print()
        print(low_rank_approximation)
        print()
        print(
            "Frobenius norm error:",
            np.linalg.norm(low_rank_approximation - low_rank_solution),
        )
        print()
예제 #23
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    n = 128
    matrix = np.random.normal(size=(n, n))
    matrix = 0.5 * (matrix + matrix.T)

    manifold = Sphere(n)
    cost, euclidean_gradient = create_cost_and_derivates(
        manifold, matrix, backend
    )
    problem = pymanopt.Problem(
        manifold, cost, euclidean_gradient=euclidean_gradient
    )

    optimizer = SteepestDescent(verbosity=2 * int(not quiet))
    estimated_dominant_eigenvector = optimizer.run(problem).point

    if quiet:
        return

    # Calculate the actual solution by a conventional eigenvalue decomposition.
    eigenvalues, eigenvectors = np.linalg.eig(matrix)
    dominant_eigenvector = eigenvectors[:, np.argmax(eigenvalues)]

    # Make sure both vectors have the same direction. Both are valid
    # eigenvectors, but for comparison we need to get rid of the sign
    # ambiguity.
    if np.sign(dominant_eigenvector[0]) != np.sign(
        estimated_dominant_eigenvector[0]
    ):
        estimated_dominant_eigenvector = -estimated_dominant_eigenvector

    # Print information about the solution.
    print("l2-norm of x:", np.linalg.norm(dominant_eigenvector))
    print("l2-norm of xopt:", np.linalg.norm(estimated_dominant_eigenvector))
    print(
        "Solution found:",
        np.allclose(
            dominant_eigenvector, estimated_dominant_eigenvector, atol=1e-6
        ),
    )
    error_norm = np.linalg.norm(
        dominant_eigenvector - estimated_dominant_eigenvector
    )
    print("l2-error:", error_norm)
예제 #24
0
 def run_minimization(self, a, b, Y0 = None, testing=True):
     """
     Optimizes the problem a*tr(Y) + b*neg(Y)
     If Y0 is given then this runs gradient descent starting from Y0,
     otherwise it starts from a random point.
     Returns the Y value, together with the log information from the solver.
     
     if testing=True then it also prints the number of iterations needed for convergence,
     which is nice for watching the algorithm run and understanding its difficulties.
     """
     cst = self.fn_weighted(a,b)
     grad = self.gr_weighted(a,b)
     pblm = mo.Problem(manifold = self.M,
                       cost = self.fn_weighted(a,b),
                       egrad = self.gr_weighted(a,b),
                       verbosity = 0)
     Y,log = self.solver.solve(pblm, x=Y0)
     print("Number of Iterations: " + str(log['final_values']['iterations']))
     return Y,log
예제 #25
0
def main_mds(D, dim, X=None, space='real'):
    """MDS via gradient descent with the chordal metric.

    Parameters
    ----------
    D : ndarray (n, n)
        Goal distance matrix.
    dim : int
        Goal dimension (of ambient Euclidean space).
    X : ndarray (dim, n), optional
        Initial value for gradient descent. `n` points in dimension `dim`. If
        both a dimension and an initial condition are specified, the initial
        condition overrides the dimension.
    field : str
        Choice of real or complex version. Options 'real', 'complex'. If
        'complex' dim must be even.

    """

    n = D.shape[0]
    max_d = np.max(D)
    if max_d > np.pi / 2:
        print('WARNING: maximum value in distance matrix exceeds diameter of '\
            'projective space. Max value in distance matrix = %2.4f.' %max_d)
    manifold = pymanopt.manifolds.Oblique(dim, n)
    solver = pymanopt.solvers.ConjugateGradient()
    if space == 'real':
        # Set return_grad=False to use auto gradient. Testing shows they are
        # identical, but analytic grad significantly faster.
        cost, egrad = setup_RPn_cost(D, return_grad=True)
    elif space == 'complex':
        cost, egrad = setup_CPn_cost(D, int(dim / 2), return_grad=False)
    problem = pymanopt.Problem(manifold=manifold, cost=cost, egrad=egrad)
    if X is None:
        X_out = solver.solve(problem)
    else:
        if X.shape[0] != dim:
            print('WARNING: initial condition does not match specified goal '\
                'dimension. Finding optimum in dimension %d' %X.shape[0])
        X_out = solver.solve(problem, x=X)
    return X_out
예제 #26
0
    def minneg_in_SOk(self, Y0):
        """
        Minimizes the negative part of $Y_0 Q$ over $Q \in SO(k)$.
        """
        def cost(Q):
            Y = Y0.dot(Q)
            return self.neg(Y)

        def cost_grad(Q):
            Y = Y0.dot(Q)
            return Y0.transpose().dot(Y*(Y<0))
            
        k = Y0.shape[1]
        SOk = Stiefel(k,k)
        pblm =   mo.Problem(manifold = SOk,
                            cost = cost,
                            egrad = cost_grad,
                            verbosity=0)

        Q,log = self.solver.solve(pblm)
        return Y0.dot(Q)
예제 #27
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    n = 3
    m = 10
    k = 10

    A = np.random.randn(k, n, m)
    B = np.random.randn(k, n, m)
    ABt = np.array([Ak @ Bk.T for Ak, Bk in zip(A, B)])

    cost, egrad = create_cost_egrad(backend, ABt)
    manifold = SpecialOrthogonalGroup(n, k)
    problem = pymanopt.Problem(manifold, cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    solver = SteepestDescent()
    X = solver.solve(problem)

    if not quiet:
        Xopt = np.array([compute_optimal_solution(ABtk) for ABtk in ABt])
        print("Frobenius norm error:", np.linalg.norm(Xopt - X))
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 10
    rank = 3
    matrix = rnd.randn(num_rows, num_rows)
    matrix = 0.5 * (matrix + matrix.T)

    # Solve the problem with pymanopt.
    cost, egrad, ehess = create_cost_egrad_ehess(backend, matrix, rank)
    manifold = Oblique(rank, num_rows)
    problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    X = solver.solve(problem)

    if quiet:
        return

    C = X.T @ X
    print("Diagonal elements:", np.diag(C))
    print("Eigenvalues:", np.sort(la.eig(C)[0].real)[::-1])
예제 #29
0
def estimate_dominant_eigenvector(matrix):
    """Returns the dominant eigenvector of the symmetric matrix A by minimizing
    the Rayleigh quotient -x' * A * x / (x' * x).
    """
    num_rows, num_columns = gs.shape(matrix)
    if num_rows != num_columns:
        raise ValueError('Matrix must be square.')
    if not gs.allclose(gs.sum(matrix - gs.transpose(matrix)), 0.0):
        raise ValueError('Matrix must be symmetric.')

    @pymanopt.function.Callable
    def cost(vector):
        return -gs.dot(vector, gs.dot(matrix, vector))

    @pymanopt.function.Callable
    def egrad(vector):
        return -2 * gs.dot(matrix, vector)

    sphere = GeomstatsSphere(num_columns)
    problem = pymanopt.Problem(manifold=sphere, cost=cost, egrad=egrad)
    solver = SteepestDescent()
    return solver.solve(problem)
예제 #30
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 1000
    rank = 5
    low_rank_factor = rnd.randn(num_rows, rank)
    matrix = low_rank_factor @ low_rank_factor.T

    cost, egrad, ehess = create_cost_egrad_ehess(backend, matrix, rank)
    manifold = PSDFixedRank(num_rows, rank)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions(maxiter=500, minstepsize=1e-6)
    low_rank_factor_estimate = solver.solve(problem)

    if quiet:
        return

    print("Rank of target matrix:", la.matrix_rank(matrix))
    matrix_estimate = low_rank_factor_estimate @ low_rank_factor_estimate.T
    print("Frobenius norm error of low-rank estimate:",
          la.norm(matrix - matrix_estimate))