コード例 #1
0
def cp_mds_reg(X, D, lam=1.0, v=1, maxiter=1000):
    """Version of MDS in which "signs" are also an optimization parameter.

    Rather than performing a full optimization and then resetting the
    sign matrix, here we treat the signs as a parameter `A = [a_ij]` and
    minimize the cost function
        F(X,A) = ||W*(X^H(A*X) - cos(D))||^2 + lambda*||A - X^HX/|X^HX| ||^2
    Lambda is a regularization parameter we can experiment with. The
    collection of data, `X`, is treated as a point on the `Oblique`
    manifold, consisting of `k*n` matrices with unit-norm columns. Since
    we are working on a sphere in complex space we require `k` to be
    even. The first `k/2` entries of each column are the real components
    and the last `k/2` entries are the imaginary parts.

    Parameters
    ----------
    X : ndarray (k, n)
        Initial guess for data.
    D : ndarray (k, k)
        Goal distance matrix.
    lam : float, optional
        Weight to give regularization term.
    v : int, optional
        Verbosity

    Returns
    -------
    X_opt : ndarray (k, n)
        Collection of points optimizing cost.

    """

    dim = X.shape[0]
    num_points = X.shape[1]
    W = distance_to_weights(D)
    Sreal, Simag = norm_rotations(X)
    A = np.vstack(
        (np.reshape(Sreal,
                    (1, num_points**2)), np.reshape(Simag, num_points**2)))
    cp_manifold = Oblique(dim, num_points)
    a_manifold = Oblique(2, num_points**2)
    manifold = Product((cp_manifold, a_manifold))
    solver = ConjugateGradient(maxiter=maxiter, maxtime=float('inf'))
    cost = setup_reg_autograd_cost(D, int(dim / 2), num_points, lam=lam)
    problem = pymanopt.Problem(cost=cost, manifold=manifold)
    Xopt, Aopt = solver.solve(problem, x=(X, A))
    Areal = np.reshape(Aopt[0, :], (num_points, num_points))
    Aimag = np.reshape(Aopt[1, :], (num_points, num_points))
    return Xopt, Areal, Aimag
コード例 #2
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 10
    rank = 3
    matrix = np.random.normal(size=(num_rows, num_rows))
    matrix = 0.5 * (matrix + matrix.T)

    # Solve the problem with pymanopt.
    manifold = Oblique(rank, num_rows)
    cost, euclidean_gradient, euclidean_hessian = create_cost_and_derivates(
        manifold, matrix, backend
    )
    problem = pymanopt.Problem(
        manifold,
        cost,
        euclidean_gradient=euclidean_gradient,
        euclidean_hessian=euclidean_hessian,
    )

    optimizer = TrustRegions(verbosity=2 * int(not quiet))
    X = optimizer.run(problem).point

    if quiet:
        return

    C = X.T @ X
    print("Diagonal elements:", np.diag(C))
    print("Eigenvalues:", np.sort(np.linalg.eig(C)[0].real)[::-1])
コード例 #3
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    m = 5
    n = 8
    matrix = np.random.normal(size=(m, n))

    manifold = Oblique(m, n)
    cost, euclidean_gradient = create_cost_and_derivates(
        manifold, matrix, backend)
    problem = pymanopt.Problem(manifold,
                               cost,
                               euclidean_gradient=euclidean_gradient)

    optimizer = ConjugateGradient(verbosity=2 * int(not quiet),
                                  beta_rule="FletcherReeves")
    Xopt = optimizer.run(problem).point

    if quiet:
        return

    # Calculate the actual solution by normalizing the columns of matrix.
    X = matrix / np.linalg.norm(matrix, axis=0)[np.newaxis, :]

    # Print information about the solution.
    print("Solution found:", np.allclose(X, Xopt, rtol=1e-3))
    print("Frobenius-error:", np.linalg.norm(X - Xopt))
コード例 #4
0
def closest_unit_norm_column_approximation(A):
    """
    Returns the matrix with unit-norm columns that is closests to A w.r.t. the
    Frobenius norm.
    """
    m, n = A.shape

    manifold = Oblique(m, n)
    solver = ConjugateGradient()
    X = T.matrix()
    cost = 0.5 * T.sum((X - A)**2)

    problem = Problem(manifold=manifold, cost=cost, arg=X)
    return solver.solve(problem)
コード例 #5
0
def rank_k_correlation_matrix_approximation(A, k):
    """
    Returns the matrix with unit-norm columns that is closests to A w.r.t. the
    Frobenius norm.
    """
    m, n = A.shape
    assert m == n, "matrix must be square"
    assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"

    manifold = Oblique(k, n)
    solver = TrustRegions()
    X = T.matrix()
    cost = 0.25 * T.sum((T.dot(X.T, X) - A)**2)

    problem = Problem(manifold=manifold, cost=cost, arg=X)
    return solver.solve(problem)
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    num_rows = 10
    rank = 3
    matrix = rnd.randn(num_rows, num_rows)
    matrix = 0.5 * (matrix + matrix.T)

    # Solve the problem with pymanopt.
    cost, egrad, ehess = create_cost_egrad_ehess(backend, matrix, rank)
    manifold = Oblique(rank, num_rows)
    problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess)
    if quiet:
        problem.verbosity = 0

    solver = TrustRegions()
    X = solver.solve(problem)

    if quiet:
        return

    C = X.T @ X
    print("Diagonal elements:", np.diag(C))
    print("Eigenvalues:", np.sort(la.eig(C)[0].real)[::-1])
コード例 #7
0
def main_mds(D, dim=3, X=None, space='real'):
    """MDS via gradient descent with the chordal metric.

    Parameters
    ----------
    D : ndarray (n, n)
        Goal distance matrix.
    dim : int, optional
        Goal dimension (of ambient Euclidean space). Default is `dim = 3`.
    X : ndarray (dim, n), optional
        Initial value for gradient descent. `n` points in dimension `dim`. If
        both a dimension and an initial condition are specified, the initial
        condition overrides the dimension.
    field : str
        Choice of real or complex version. Options 'real', 'complex'. If
        'complex' dim must be even.

    """

    n = D.shape[0]
    max_d = np.max(D)
    if max_d > 1:
        print('WARNING: maximum value in distance matrix exceeds diameter of '\
            'projective space. Max distance = $2.4f.' %max_d)
    manifold = Oblique(dim, n)
    solver = ConjugateGradient()
    if space == 'real':
        cost = setup_cost(D)
    elif space == 'complex':
        cost = setup_CPn_cost(D, int(dim/2))
    problem = pymanopt.Problem(manifold=manifold, cost=cost)
    if X is None:
        X_out = solver.solve(problem)
    else:
        if X.shape[0] != dim:
            print('WARNING: initial condition does not match specified goal '\
                'dimension. Finding optimum in dimension %d' %X.shape[0])
        X_out = solver.solve(problem, x=X)
    return X_out
コード例 #8
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    m = 5
    n = 8
    matrix = rnd.randn(m, n)

    cost, egrad = create_cost_egrad(backend, matrix)
    manifold = Oblique(m, n)
    problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    solver = ConjugateGradient()
    Xopt = solver.solve(problem)

    if quiet:
        return

    # Calculate the actual solution by normalizing the columns of A.
    X = matrix / la.norm(matrix, axis=0)[np.newaxis, :]

    # Print information about the solution.
    print("Solution found: %s" % np.allclose(X, Xopt, rtol=1e-3))
    print("Frobenius-error: %f" % la.norm(X - Xopt))
コード例 #9
0
        os.makedirs('result')
    path = os.path.join('result', experiment_name + '.csv')

    N = 5
    n = 10
    p = 5

    for i in range(n_exp):

        matrices = []
        for k in range(N):
            B = rnd.randn(n, n)
            C = (B + B.T) / 2
            matrices.append(C)

        cost = create_cost(matrices)
        manifold = Oblique(n, p)
        problem = pymanopt.Problem(manifold, cost=cost, egrad=None)

        res_list = []

        for beta_type in BetaTypes:
            solver = ConjugateGradient(beta_type=beta_type, maxiter=10000)
            res = solver.solve(problem)
            res_list.append(res[1])
            res_list.append(res[2])

        with open(path, 'a') as f:
            writer = csv.writer(f)
            writer.writerow(res_list)
コード例 #10
0
     dim = LA.matrix_rank(X)
 else:
     # TODO: implement a simple way of getting points on sphere.
 if verbosity > 0:
     print('Finding projection onto RP^%i.' %(dim-1))
 W = distance_to_weights(D)
 S = np.sign([email protected])
 C = S*np.cos(D)
 if np.sum(S == 0) > 0:
     print('Warning: Some initial guess vectors are orthogonal, this may ' +
         'cause issues with convergence.')
 cost = setup_cost(D,S)
 cost_list = [cost(X.T)]
 true_cost = setup_cost(projective_distance_matrix(X),S)
 true_cost_list = [true_cost(X.T)]
 manifold = Oblique(dim,num_points) # Short, wide matrices.
 if pmo_solve == 'nm':
     solver = NelderMead()
 if pmo_solve == 'ps':
     solver = ParticleSwarm()
 if pmo_solve == 'tr':
     solver = TrustRegions()
 if pmo_solve == 'sd':
     solver = SteepestDescent()
 else:
     solver = ConjugateGradient()
 for i in range(0,max_iter):
     if autograd:
         cost = setup_cost(D,S)
         problem = pymanopt.Problem(manifold, cost, verbosity=verbosity)
     else:
コード例 #11
0
def RELMM(data, A_init, S0, lambda_S, lambda_S0):

    [L, N] = data.shape

    [L, P] = S0.shape

    V = P * np.eye(P) - np.outer(np.ones(P), np.transpose(np.ones(P)))

    def cost(X):

        data_fit = np.zeros(N)

        for n in np.arange(N):
            data_fit[n] = np.linalg.norm(
                S[:, :, n] - np.dot(X, np.diag(psi[:, n])), 'fro')**2

        cost = lambda_S / 2 * np.sum(data_fit,
                                     axis=0) + lambda_S0 / 2 * np.trace(
                                         np.dot(np.dot(X, V), np.transpose(X)))

        return cost

    def egrad(X):

        partial_grad = np.zeros([L, P, N])

        for n in np.arange(N):
            partial_grad[:, :, n] = np.dot(X, np.diag(psi[:, n])) - np.dot(
                S[:, :, n], np.diag(psi[:, n]))

        egrad = lambda_S * np.sum(partial_grad, axis=2) + lambda_S0 * np.dot(
            X, V)

        return egrad

    A = A_init
    S = np.zeros([L, P, N])
    psi = np.ones([P, N])

    for n in np.arange(N):
        S[:, :, n] = S0

    maxiter = 200

    U = A  # split variable
    D = np.zeros(A.shape)  # Lagrange mutlipliers

    rho = 1

    maxiter_ADMM = 100
    tol_A_ADMM = 10**-3
    tol_A = 10**-3
    tol_S = 10**-3
    tol_psi = 10**-3
    tol_S0 = 10**-3

    I = np.identity(P)

    for i in np.arange(maxiter):

        A_old = np.copy(A)
        psi_old = np.copy(psi)
        S_old = np.copy(S)
        S0_old = np.copy(S0)

        # A update

        for j in np.arange(maxiter_ADMM):

            A_old_ADMM = np.copy(A)

            for n in np.arange(N):
                A[:, n] = np.dot(
                    np.linalg.inv(
                        np.dot(np.transpose(S[:, :, n]), S[:, :, n]) +
                        rho * I),
                    np.dot(np.transpose(S[:, :, n]), data[:, n]) + rho *
                    (U[:, n] - D[:, n]))

            U = proj_simplex(A + D)

            D = D + A - U

            if j > 0:
                rel_A_ADMM = np.abs((np.linalg.norm(A, 'fro') - np.linalg.norm(
                    A_old_ADMM, 'fro'))) / np.linalg.norm(A_old_ADMM, 'fro')

                print("iteration ", j, " of ", maxiter_ADMM, ", rel_A_ADMM =",
                      rel_A_ADMM)

                if rel_A_ADMM < tol_A_ADMM:
                    break

# psi update

        for n in np.arange(N):
            for p in np.arange(P):
                psi[p,
                    n] = np.dot(np.transpose(S0[:, p]), S[:, p, n]) / np.dot(
                        np.transpose(S0[:, p]), S0[:, p])

# S update

        for n in np.arange(N):
            S[:, :, n] = np.dot(
                np.outer(data[:, n], np.transpose(A[:, n])) +
                lambda_S * np.dot(S0, np.diag(psi[:, n])),
                np.linalg.inv(
                    np.outer(A[:, n], np.transpose(A[:, n])) + lambda_S * I))

# S0 update

        manifold = Oblique(L, P)
        solver = ConjugateGradient()
        problem = Problem(manifold=manifold, cost=cost, egrad=egrad)
        S0 = solver.solve(problem)

        # termination checks

        if i > 0:

            S_vec = np.hstack(S)

            rel_A = np.abs(
                np.linalg.norm(A, 'fro') -
                np.linalg.norm(A_old, 'fro')) / np.linalg.norm(A_old, 'fro')
            rel_psi = np.abs(
                np.linalg.norm(psi, 'fro') -
                np.linalg.norm(psi_old, 'fro')) / np.linalg.norm(
                    psi_old, 'fro')
            rel_S = np.abs(
                np.linalg.norm(S_vec) -
                np.linalg.norm(np.hstack(S_old))) / np.linalg.norm(S_old)
            rel_S0 = np.abs(
                np.linalg.norm(S0, 'fro') -
                np.linalg.norm(S0_old, 'fro')) / np.linalg.norm(S0_old, 'fro')

            print("iteration ", i, " of ", maxiter, ", rel_A =", rel_A,
                  ", rel_psi =", rel_psi, "rel_S =", rel_S, "rel_S0 =", rel_S0)

            if rel_A < tol_A and rel_psi and tol_psi and rel_S < tol_S and rel_S0 < tol_S0 and i > 1:
                break

    return A, psi, S, S0
コード例 #12
0
 def setUp(self):
     self.m = m = 100
     self.n = n = 50
     self.man = Oblique(m, n)
コード例 #13
0
def FS_mds(Y,
           D,
           p,
           max_iter=20,
           verbosity=1,
           pmo_solve='cg',
           autograd=False,
           appx=False,
           minstepsize=1e-10,
           mingradnorm=1e-6):
    """VERY EXPERIMENTAL.

    Attempts to align a collection of data points in the lens space
    :math:`L_p^n` so that the collection of distances between each pair
    of points matches a given input distance matrix as closely as
    possible.

    Parameters
    ----------
    Y : ndarray (m*d)
        Initial guess of data points. Each row corresponds to a point on
        the (2n-1)-sphere, so must have norm one and d must be even.
    D : ndarray (square)
        Distance matrix to optimize toward.
    p : int
        Cyclic group with which to act.
    max_iter: int, optional
        Maximum number of times to iterate the loop.
    verbosity: int, optional
        Amount of output to display at each iteration.
    pmo_solve: string, {'cg','sd','tr','nm','ps'}
        Solver to use with pymanopt. Default is conjugate gradient.

    Returns
    -------
    X : ndarray
        Optimal configuration of points on lens space.
    C : list (float)
        Computed cost at each loop of the iteration.
    T : list (float)
        Actual cost at each loop of the iteration.

    Notes
    -----
    The optimization can only be carried out w/r/t/ an approximation of
    the true cost function (which is not differentiable). The computed
    cost C should not match T, but should decrease when T does.

    """

    m = Y.shape[0]
    d = Y.shape[1] - 1
    W = distance_to_weights(D)
    omega = g_action_matrix(p, d)
    if appx:
        M = get_blurred_masks(Y.T, omega, p, D)
    else:
        S = optimal_rotation(Y.T, omega, p)
        M = get_masks(S, p)
    C = np.cos(D)
    #   # TODO: verify that input is valid.
    cost = setup_sum_cost(omega, M, D, W, p)
    cost_list = [cost(Y.T)]
    manifold = Oblique(d + 1, m)  # Short, wide matrices.
    if pmo_solve == 'cg':
        solver = ConjugateGradient(minstepsize=minstepsize,
                                   mingradnorm=mingradnorm)
    elif pmo_solve == 'nm':
        solver = NelderMead()
    for i in range(0, max_iter):
        if autograd:
            cost = setup_sum_cost(omega, M, D, W, p)
            problem = pymanopt.Problem(manifold, cost, verbosity=verbosity)
        else:
            cost, egrad = setup_sum_cost(omega,
                                         M,
                                         D,
                                         W,
                                         p,
                                         return_derivatives=True)
            problem = pymanopt.Problem(manifold,
                                       cost,
                                       egrad=egrad,
                                       verbosity=verbosity)
        if pmo_solve == 'cg' or pmo_solve == 'sd' or pmo_solve == 'tr':
            Y_new = solver.solve(problem, x=Y.T)
        else:
            Y_new = solver.solve(problem)
        Y_new = Y_new.T  # Y should be tall-skinny
        cost_oldM = cost(Y_new.T)
        # cost_list.append(cost_oldM)
        S_new = optimal_rotation(Y_new.T, omega, p)
        M_new = get_masks(S_new, p)
        cost_new = setup_sum_cost(omega, M_new, D, W, p)
        cost_newM = cost_new(Y_new.T)
        cost_list.append(cost_newM)
        percent_cost_diff = 100 * (cost_list[i] - cost_oldM) / cost_list[i]
        # NOW DO AN UPDATE RUN WITH FS-METRIC!
        print('Entering FS run.')
        fs_cost = setup_fubini_study_cost(D, W)
        problem = pymanopt.Problem(manifold, fs_cost, verbosity=verbosity)
        Ycplx = complexify(Y_new.T)
        Y_FS = solver.solve(problem, x=Ycplx)
        Y_FS = realify(Y_FS)
        cost_newFS = cost_new(Y_FS)
        if verbosity > 0:
            print('Through %i iterations:' % (i + 1))
            #           print('\tTrue cost: %2.2f' %true_cost(Y_new.T))
            print('\tComputed cost: %2.2f' % cost_oldM)
            print('\tPercent cost difference: % 2.2f' % percent_cost_diff)
            #           print('\tPercent Difference in S: % 2.2f' %percent_S_diff)
            print('\tComputed cost with new M: %2.2f' % cost_newM)
            print('\tComputed cost after FS update: %2.2f' % cost_newFS)
            if np.isnan(cost_newM):
                stuff = {
                    'Y_new': Y_new,
                    'Y_old': Y,
                    'S_new': S_new,
                    'S_old': S,
                    'M_new': M_new,
                    'M_old': M,
                    'cost_fn_new': cost_new,
                    'cost_fn_old': cost,
                    'grad': egrad
                }
                return Y_new, stuff
#           print('\tDifference in cost matrix: %2.2f' %(LA.norm(C-C_new)))
#       if S_diff < 1:
#           print('No change in S matrix. Stopping iterations')
#           break
        if percent_cost_diff < .0001:
            print('No significant cost improvement. Stopping iterations.')
            break
        if i == max_iter:
            print('Maximum iterations reached.')
        # Update variables:
        Y = Y_FS
        #       C = C_new
        #       S = S_new
        M = M_new
    return Y, cost_list
コード例 #14
0
if __name__ == "__main__":
    experiment_name = 'closest-unit'
    n_exp = 10

    if not os.path.isdir('result'):
        os.makedirs('result')
    path = os.path.join('result', experiment_name + '.csv')

    m = 10
    n = 1000

    for i in range(n_exp):
        matrix = rnd.randn(m, n)

        cost = create_cost(matrix)
        manifold = Oblique(m, n)
        problem = pymanopt.Problem(manifold, cost=cost, egrad=None)

        res_list = []

        for beta_type in BetaTypes:
            solver = ConjugateGradient(beta_type=beta_type, maxiter=10000)
            res = solver.solve(problem)
            res_list.append(res[1])
            res_list.append(res[2])

        with open(path, 'a') as f:
            writer = csv.writer(f)
            writer.writerow(res_list)
コード例 #15
0
def pmds(Y,D,max_iter=20,verbosity=1,autograd=False,pmo_solve='cg'):
    """Projective multi-dimensional scaling algorithm.

    Detailed description in career grant, pages 6-7 (method 1).

    Parameters
    ----------
    Y : ndarray
        Initial guess of points in RP^k. Result will lie on RP^k for
        same k as the initial guess.
    D : ndarray
        Square distance matrix determining cost.
    max_iter : int, optional
        Number of times to iterate the loop. Will eventually be updated
        to a better convergence criterion. Default is 20.
    verbosity : int, optional
        If positive, print output relating to convergence conditions at each
        iteration.
    solve_prog : string, optional
        Choice of algorithm for low-rank correlation matrix reduction.
        Options are "pymanopt" or "matlab", default is "pymanopt".

    Returns
    -------
    Y : ndarray
        Optimal configuration of points in RP^k.
    C : list
        List of costs at each iteration.

    """

    num_points = Y.shape[0]
    rank = LA.matrix_rank(Y)
    if verbosity > 0:
        print('Finding projection onto RP^%i.' %(rank-1))
    W = distance_to_weights(D)
    S = np.sign([email protected])
    C = S*np.cos(D)
    if np.sum(S == 0) > 0:
        print('Warning: Some initial guess vectors are orthogonal, this may ' +
            'cause issues with convergence.')
    cost = setup_cost(D,S)
    cost_list = [cost(Y.T)]
    true_cost = setup_cost(projective_distance_matrix(Y),S)
    true_cost_list = [true_cost(Y.T)]
    manifold = Oblique(rank,num_points) # Short, wide matrices.
    solver = ConjugateGradient()
# TODO: play with alternate solve methods and manifolds.
#   if pmo_solve == 'nm':
#       solver = NelderMead()
#   if pmo_solve == 'ps':
#       solver = ParticleSwarm()
#   if pmo_solve == 'tr':
#       solver = TrustRegions()
#   if pmo_solve == 'sd':
#       solver = SteepestDescent()
#   else:
#       solver = ConjugateGradient()
#       if solve_prog == 'matlab':
# TODO: this may generate errors based on changes to other methods.
#           cost, egrad, ehess = setup_cost(C,W)
#           workspace = lrcm_wrapper(C,W,Y)
#           Y_new = workspace['optimal_matrix']
    for i in range(0,max_iter):
        if autograd:
            cost = setup_cost(D,S)
            problem = pymanopt.Problem(manifold, cost, verbosity=verbosity)
        else:
            cost, egrad, ehess = setup_cost(D,S,return_derivatives=True)
            problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess, verbosity=verbosity)
        if pmo_solve == 'cg' or pmo_solve == 'sd' or pmo_solve == 'tr':
            # Use initial condition with gradient-based solvers.
            Y_new = solver.solve(problem,x=Y.T)
        else:
            Y_new =  solver.solve(problem)
        Y_new = Y_new.T     # Y should be tall-skinny
        cost_oldS = cost(Y_new.T)
        cost_list.append(cost_oldS)
        S_new = np.sign(Y_new@Y_new.T)
        C_new = S_new*np.cos(D)
        cost_new = setup_cost(D,S_new)
        cost_newS = cost_new(Y_new.T)
        S_diff = ((LA.norm(S_new - S))**2)/4
        percent_S_diff = 100*S_diff/S_new.size
        percent_cost_diff = 100*(cost_list[i] - cost_list[i+1])/cost_list[i]
        true_cost = setup_cost(projective_distance_matrix(Y),S)
        true_cost_list.append(true_cost(Y_new.T))
        # Do an SVD to get the correlation matrix on the sphere.
        # Y,s,vh = LA.svd(out_matrix,full_matrices=False)
        if verbosity > 0:
            print('Through %i iterations:' %(i+1))
            print('\tTrue cost: %2.2f' %true_cost(Y_new.T))
            print('\tComputed cost: %2.2f' %cost_list[i+1])
            print('\tPercent cost difference: % 2.2f' %percent_cost_diff)
            print('\tPercent Difference in S: % 2.2f' %percent_S_diff)
            print('\tComputed cost with new S: %2.2f' %cost_newS)
            print('\tDifference in cost matrix: %2.2f' %(LA.norm(C-C_new)))
        if S_diff < 1:
            print('No change in S matrix. Stopping iterations')
            break
        if percent_cost_diff < .0001:
            print('No significant cost improvement. Stopping iterations.')
            break
        if i == max_iter:
            print('Maximum iterations reached.')
        # Update variables:
        Y = Y_new
        C = C_new
        S = S_new
    return Y, cost_list, true_cost_list
コード例 #16
0
def cp_mds(D, X, max_iter=20, v=1):
    """Projective multi-dimensional scaling algorithm.

    Detailed description in career grant, pages 6-7 (method 1).

    Parameters
    ----------
    X : ndarray (2n+2, k)
        Initial guess of `k` points in CP^n. Result will lie on CP^n for
        same `n` as the initial guess. (Each column is a data point.)
    D : ndarray (k, k)
        Square distance matrix determining cost.
    max_iter : int, optional
        Number of times to iterate the loop. Will eventually be updated
        to a better convergence criterion. Default is 20.
    v : int, optional
        Verbosity. If positive, print output relating to convergence
        conditions at each iteration.

    Returns
    -------
    X : ndarray (2n+2, k)
        Optimal configuration of points in CP^n.
    C : list
        List of costs at each iteration.

    """

    dim = X.shape[0]
    num_points = X.shape[1]
    start_cost_list = []
    end_cost_list = []
    loop_diff = np.inf
    percent_cost_diff = 100
    # rank = LA.matrix_rank(X)
    vprint('Finding optimal configuration in CP^%i.' % ((dim - 2) // 2), 1, v)
    W = distance_to_weights(D)
    Sreal, Simag = norm_rotations(X)
    manifold = Oblique(dim, num_points)
    # Oblique manifold is dim*num_points matrices with unit-norm columns.
    solver = ConjugateGradient()
    for i in range(0, max_iter):
        # AUTOGRAD VERSION
        cost = setup_CPn_autograd_cost(D, Sreal, Simag, int(dim / 2))
        # ANALYTIC VERSION:
        #cost, egrad, ehess = setup_CPn_cost(D, Sreal, Simag)
        start_cost_list.append(cost(X))
        # AUTOGRAD VERSION:
        problem = pymanopt.Problem(manifold, cost, verbosity=v)
        # ANALYTIC VERSION:
        #problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess,
        #   verbosity=v)
        X_new = solver.solve(problem, x=X)
        end_cost_list.append(cost(X_new))
        Sreal_new, Simag_new = norm_rotations(X_new)
        S_diff = LA.norm(Sreal_new - Sreal)**2 + LA.norm(Simag_new - Simag)**2
        iter_diff = start_cost_list[i] - end_cost_list[i]
        if i > 0:
            loop_diff = end_cost_list[i - 1] - end_cost_list[i]
            percent_cost_diff = 100 * loop_diff / end_cost_list[i - 1]
        vprint('Through %i iterations:' % (i + 1), 1, v)
        vprint('\tCost at start: %2.4f' % start_cost_list[i], 1, v)
        vprint('\tCost at end: %2.4f' % end_cost_list[i], 1, v)
        vprint('\tCost reduction from optimization: %2.4f' % iter_diff, 1, v)
        vprint('\tCost reduction over previous loop: %2.4f' % loop_diff, 1, v)
        vprint('\tPercent cost difference: % 2.4f' % percent_cost_diff, 1, v)
        vprint('\tDifference in S: % 2.2f' % S_diff, 1, v)
        if S_diff < .0001:
            vprint('No change in S matrix. Stopping iterations', 0, v)
            break
        if percent_cost_diff < .0001:
            vprint('No significant cost improvement. Stopping iterations.', 0,
                   v)
            break
        if i == max_iter:
            vprint('Maximum iterations reached.', 0, v)
        # Update variables:
        X = X_new
        Sreal = Sreal_new
        Simag = Simag_new
    return X
コード例 #17
0
def rp_mds(D, X, max_iter=20, verbosity=1):
    """Projective multi-dimensional scaling algorithm.

    Detailed description in career grant, pages 6-7 (method 1).

    Parameters
    ----------
    X : ndarray
        Initial guess of points in RP^k. Result will lie on RP^k for
        same k as the initial guess.
    D : ndarray
        Square distance matrix determining cost.
    max_iter : int, optional
        Number of times to iterate the loop. Will eventually be updated
        to a better convergence criterion. Default is 20.
    verbosity : int, optional
        If positive, print output relating to convergence conditions at each
        iteration.
    solve_prog : string, optional
        Choice of algorithm for low-rank correlation matrix reduction.
        Options are "pymanopt" or "matlab", default is "pymanopt".

    Returns
    -------
    X : ndarray
        Optimal configuration of points in RP^k.
    C : list
        List of costs at each iteration.

    """

    num_points = X.shape[0]
    start_cost_list = []
    end_cost_list = []
    loop_cost_diff = np.inf
    percent_cost_diff = 100
    rank = LA.matrix_rank(X)
    vprint('Finding projection onto RP^%i.' % (rank - 1), 1, verbosity)
    W = distance_to_weights(D)
    S = np.sign(X @ X.T)
    C = S * np.cos(D)
    if np.sum(S == 0) > 0:
        print('Warning: Some initial guess vectors are orthogonal, this may ' +
              'cause issues with convergence.')
    manifold = Oblique(rank, num_points)  # Short, wide matrices.
    solver = ConjugateGradient()
    for i in range(0, max_iter):
        # cost, egrad, ehess = setup_RPn_cost(D, S)
        cost = setup_square_cost(D)
        start_cost_list.append(cost(X.T))
        # problem = pymanopt.Problem(manifold, cost, egrad=egrad, ehess=ehess,
        # verbosity=verbosity)
        problem = pymanopt.Problem(manifold, cost, verbosity=verbosity)
        X_new = solver.solve(problem, x=X.T)
        X_new = X_new.T  # X should be tall-skinny
        end_cost_list.append(cost(X_new.T))
        S_new = np.sign(X_new @ X_new.T)
        C_new = S_new * np.cos(D)
        S_diff = ((LA.norm(S_new - S))**2) / 4
        percent_S_diff = 100 * S_diff / S_new.size
        iteration_cost_diff = start_cost_list[i] - end_cost_list[i]
        if i > 0:
            loop_cost_diff = end_cost_list[i - 1] - end_cost_list[i]
            percent_cost_diff = 100 * loop_cost_diff / end_cost_list[i - 1]
        vprint('Through %i iterations:' % (i + 1), 1, verbosity)
        vprint('\tCost at start: %2.4f' % start_cost_list[i], 1, verbosity)
        vprint('\tCost at end: %2.4f' % end_cost_list[i], 1, verbosity)
        vprint(
            '\tCost reduction from optimization: %2.4f' % iteration_cost_diff,
            1, verbosity)
        vprint('\tCost reduction over previous loop: %2.4f' % loop_cost_diff,
               1, verbosity)
        vprint('\tPercent cost difference: % 2.4f' % percent_cost_diff, 1,
               verbosity)
        vprint('\tPercent Difference in S: % 2.2f' % percent_S_diff, 1,
               verbosity)
        vprint('\tDifference in cost matrix: %2.2f' % (LA.norm(C - C_new)), 1,
               verbosity)
        if S_diff < 1:
            vprint('No change in S matrix. Stopping iterations', 0, verbosity)
            break
        if percent_cost_diff < .0001:
            vprint('No significant cost improvement. Stopping iterations.', 0,
                   verbosity)
            break
        if i == max_iter:
            vprint('Maximum iterations reached.', 0, verbosity)
        # Update variables:
        X = X_new
        C = C_new
        S = S_new
    return X