Пример #1
0
    num_rows = 128
    subspace_dimension = 3
    matrix = rnd.randn(num_rows, num_rows)
    matrix = 0.5 * (matrix + matrix.T)

    cost, egrad, ehess = create_cost_egrad_ehess(
        backend, matrix, subspace_dimension)
    manifold = Grassmann(num_rows, subspace_dimension)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    estimated_spanning_set = solver.solve(
        problem, Delta_bar=8*np.sqrt(subspace_dimension))

    if quiet:
        return

    eigenvalues, eigenvectors = la.eig(matrix)
    column_indices = np.argsort(eigenvalues)[-subspace_dimension:]
    spanning_set = eigenvectors[:, column_indices]
    print("Geodesic distance between true and estimated dominant subspace:",
          manifold.dist(spanning_set, estimated_spanning_set))


if __name__ == "__main__":
    runner = ExampleRunner(run, "Dominant invariant subspace",
                           SUPPORTED_BACKENDS)
    runner.run()
Пример #2
0
    cost, egrad, ehess = create_cost_egrad_ehess(backend, samples,
                                                 num_components)
    manifold = Stiefel(dimension, num_components)
    problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    # from pymanopt.solvers import ConjugateGradient
    # solver = ConjugateGradient()
    estimated_span_matrix = solver.solve(problem)

    if quiet:
        return

    estimated_projector = estimated_span_matrix @ estimated_span_matrix.T

    eigenvalues, eigenvectors = np.linalg.eig(samples.T @ samples)
    indices = np.argsort(eigenvalues)[::-1][:num_components]
    span_matrix = eigenvectors[:, indices]
    projector = span_matrix @ span_matrix.T

    print(
        "Frobenius norm error between estimated and closed-form projection "
        "matrix:", np.linalg.norm(projector - estimated_projector))


if __name__ == "__main__":
    runner = ExampleRunner(run, "PCA", SUPPORTED_BACKENDS)
    runner.run()
        problem.verbosity = 0

    solver = ConjugateGradient()
    left_singular_vectors, singular_values, right_singular_vectors = \
        solver.solve(problem)
    low_rank_approximation = (left_singular_vectors @ np.diag(singular_values)
                              @ right_singular_vectors)

    if not quiet:
        u, s, vt = la.svd(matrix, full_matrices=False)
        indices = np.argsort(s)[-rank:]
        low_rank_solution = (
            u[:, indices] @ np.diag(s[indices]) @ vt[indices, :])
        print("Analytic low-rank solution:")
        print()
        print(low_rank_solution)
        print()
        print("Rank-{} approximation:".format(rank))
        print()
        print(low_rank_approximation)
        print()
        print("Frobenius norm error:",
              la.norm(low_rank_approximation - low_rank_solution))
        print()


if __name__ == "__main__":
    runner = ExampleRunner(run, "Low-rank matrix approximation",
                           SUPPORTED_BACKENDS)
    runner.run()
Пример #4
0
        (
            cost,
            euclidean_gradient,
            euclidean_hessian,
        ) = create_cost_and_derivates(manifold, samples, targets, backend)
        problem = pymanopt.Problem(
            manifold,
            cost,
            euclidean_gradient=euclidean_gradient,
            euclidean_hessian=euclidean_hessian,
        )

        estimated_weights = optimizer.run(problem).point
        if not quiet:
            print(f"Run {k + 1}")
            print(
                "Weights found by pymanopt (top) / "
                "closed form solution (bottom)"
            )
            print(estimated_weights)
            print(np.linalg.pinv(samples) @ targets)
            print("")


if __name__ == "__main__":
    runner = ExampleRunner(
        run, "Multiple linear regression", SUPPORTED_BACKENDS
    )
    runner.run()
Пример #5
0
    # zero, optimization first becomes much slower, than simply doesn't work
    # anymore because of floating point overflow errors (NaN's and Inf's start
    # to appear). If it is too large, then log-sum-exp is a poor approximation
    # of the max function, and the spread will be less uniform. An okay value
    # seems to be 0.01 or 0.001 for example. Note that a better strategy than
    # using a small epsilon straightaway is to reduce epsilon bit by bit and to
    # warm-start subsequent optimization in that way. Trustregions will be more
    # appropriate for these fine tunings.
    epsilon = 0.0015

    cost = create_cost(backend, dimension, num_points, epsilon)
    manifold = Elliptope(num_points, dimension)
    problem = pymanopt.Problem(manifold, cost)
    if quiet:
        problem.verbosity = 0

    solver = ConjugateGradient(mingradnorm=1e-8, maxiter=1e5)
    Yopt = solver.solve(problem)

    if quiet:
        return

    Xopt = Yopt @ Yopt.T
    maxdot = np.triu(Xopt, 1).max()
    print("Maximum angle between any two points:", maxdot)


if __name__ == "__main__":
    runner = ExampleRunner(run, "Packing on the sphere", SUPPORTED_BACKENDS)
    runner.run()
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    m = 5
    n = 8
    matrix = rnd.randn(m, n)

    cost, egrad = create_cost_egrad(backend, matrix)
    manifold = Oblique(m, n)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    solver = ConjugateGradient()
    Xopt = solver.solve(problem)

    if quiet:
        return

    # Calculate the actual solution by normalizing the columns of A.
    X = matrix / la.norm(matrix, axis=0)[np.newaxis, :]

    # Print information about the solution.
    print("Solution found: %s" % np.allclose(X, Xopt, rtol=1e-3))
    print("Frobenius-error: %f" % la.norm(X - Xopt))


if __name__ == "__main__":
    runner = ExampleRunner(run, "Closest unit Frobenius norm approximation",
                           SUPPORTED_BACKENDS)
    runner.run()

def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 10
    rank = 3
    matrix = rnd.randn(num_rows, num_rows)
    matrix = 0.5 * (matrix + matrix.T)

    # Solve the problem with pymanopt.
    cost, egrad, ehess = create_cost_egrad_ehess(backend, matrix, rank)
    manifold = Oblique(rank, num_rows)
    problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    X = solver.solve(problem)

    if quiet:
        return

    C = X.T @ X
    print("Diagonal elements:", np.diag(C))
    print("Eigenvalues:", np.sort(la.eig(C)[0].real)[::-1])


if __name__ == "__main__":
    runner = ExampleRunner(run, "Nearest low-rank correlation matrix",
                           SUPPORTED_BACKENDS)
    runner.run()
Пример #8
0
    solver = SteepestDescent()
    estimated_dominant_eigenvector = solver.solve(problem)

    if quiet:
        return

    # Calculate the actual solution by a conventional eigenvalue decomposition.
    eigenvalues, eigenvectors = la.eig(matrix)
    dominant_eigenvector = eigenvectors[:, np.argmax(eigenvalues)]

    # Make sure both vectors have the same direction. Both are valid
    # eigenvectors, but for comparison we need to get rid of the sign
    # ambiguity.
    if (np.sign(dominant_eigenvector[0]) != np.sign(
            estimated_dominant_eigenvector[0])):
        estimated_dominant_eigenvector = -estimated_dominant_eigenvector

    # Print information about the solution.
    print("l2-norm of x: %f" % la.norm(dominant_eigenvector))
    print("l2-norm of xopt: %f" % la.norm(estimated_dominant_eigenvector))
    print("Solution found: %s" % np.allclose(
        dominant_eigenvector, estimated_dominant_eigenvector, rtol=1e-3))
    error_norm = la.norm(dominant_eigenvector - estimated_dominant_eigenvector)
    print("l2-error: %f" % error_norm)


if __name__ == "__main__":
    runner = ExampleRunner(run, "Dominant eigenvector of a PSD matrix",
                           SUPPORTED_BACKENDS)
    runner.run()
Пример #9
0
    return (U * J) @ Vt


def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    n = 3
    m = 10
    k = 10

    A = np.random.randn(k, n, m)
    B = np.random.randn(k, n, m)
    ABt = np.array([Ak @ Bk.T for Ak, Bk in zip(A, B)])

    cost, egrad = create_cost_egrad(backend, ABt)
    manifold = SpecialOrthogonalGroup(n, k)
    problem = pymanopt.Problem(manifold, cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    solver = SteepestDescent()
    X = solver.solve(problem)

    if not quiet:
        Xopt = np.array([compute_optimal_solution(ABtk) for ABtk in ABt])
        print("Frobenius norm error:", np.linalg.norm(Xopt - X))


if __name__ == "__main__":
    runner = ExampleRunner(run, "Optimal rotations example",
                           SUPPORTED_BACKENDS)
    runner.run()
Пример #10
0
from examples._tools import ExampleRunner
from pymanopt.manifolds import Positive
from pymanopt.tools.diagnostics import check_retraction

SUPPORTED_BACKENDS = ("numpy", )


def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    m = 128
    n = 64
    manifold = Positive(m, n, k=2)
    check_retraction(manifold)


if __name__ == "__main__":
    runner = ExampleRunner(run, "Check retraction of positive manifold",
                           SUPPORTED_BACKENDS)
    runner.run()
Пример #11
0
        @pymanopt.function.Theano(x)
        def cost(x):
            return -x.T.dot(T.dot(A, x))
    else:
        raise ValueError("Unsupported backend '{:s}'".format(backend))

    return cost, egrad


def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    # Generate random problem data.
    n = 128
    A = np.random.randn(n, n)
    A = 0.5 * (A + A.T)
    cost, egrad = create_cost_egrad(backend, A)

    # Create the problem structure.
    manifold = Sphere(n)
    problem = Problem(manifold=manifold, cost=cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    # Numerically check gradient consistency (optional).
    check_gradient(problem)


if __name__ == "__main__":
    runner = ExampleRunner(run, "Check gradient for sphere manifold",
                           SUPPORTED_BACKENDS)
    runner.run()