Ejemplo n.º 1
0
def get_rotation_matrix(M, Mtilde, weights=None, dist=None):
    
    if dist is None:
        dist = 'euc'
    
    n = M[0].shape[0]
        
    # (1) Instantiate a manifold
    manifold = Rotations(n)
    
    # (2) Define cost function and a problem
    if dist == 'euc':
        cost = partial(cost_function_full, M=M, Mtilde=Mtilde, weights=weights, dist=dist)    
        problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    elif dist == 'rie':
        cost = partial(cost_function_full, M=M, Mtilde=Mtilde, weights=weights, dist=dist)    
        egrad = partial(egrad_function_full_rie, M=M, Mtilde=Mtilde, weights=weights) 
        problem = Problem(manifold=manifold, cost=cost, egrad=egrad, verbosity=0)
        
    # (3) Instantiate a Pymanopt solver
    solver = SteepestDescent(mingradnorm=1e-3)   
    
    # let Pymanopt do the rest
    Q_opt = solver.solve(problem)    
    
    return Q_opt
Ejemplo n.º 2
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    # Generate random problem data.
    n = 128
    A = np.random.randn(n, n)
    A = 0.5 * (A + A.T)
    cost, egrad = create_cost_egrad(backend, A)

    # Create the problem structure.
    manifold = Sphere(n)
    problem = Problem(manifold=manifold, cost=cost, egrad=egrad)
    if quiet:
        problem.verbosity = 0

    # Numerically check gradient consistency (optional).
    check_gradient(problem)
def envelope(X_env, Y_env, u):

    p, r = X_env.shape[1], Y_env.shape[1]
    linear_model = LinearRegression().fit(X_env, Y_env)
    err = Y_env - linear_model.predict(X_env)
    Sigma_res = np.cov(err.transpose())
    Sigma_Y = np.cov(Y_env.transpose())

    def cost(Gamma):
        X = np.matmul(Gamma, Gamma.T)
        out = -np.log(
            np.linalg.det(
                np.matmul(np.matmul(X, Sigma_res), X) +
                np.matmul(np.matmul(np.eye(r) - X, Sigma_Y),
                          np.eye(r) - X)))
        return (np.array(out))

    manifold = Grassmann(r, u)
    # manifold = Stiefel(r, u)
    problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    solver = SteepestDescent()
    Gamma = solver.solve(problem)
    PSigma1_hat = np.matmul(Gamma, Gamma.T)
    PSigma2_hat = np.eye(r) - PSigma1_hat

    beta_hat = np.matmul(PSigma1_hat, linear_model.coef_)
    Sigma1_hat = np.matmul(np.matmul(PSigma1_hat, Sigma_res), PSigma1_hat)
    Sigma2_hat = np.matmul(np.matmul(np.eye(r) - PSigma1_hat, Sigma_res),
                           np.eye(r) - PSigma1_hat)
    alpha_hat = np.mean(Y_env - np.matmul(X_env, beta_hat.T), axis=0)

    return (alpha_hat.reshape(1, r), beta_hat.reshape(p, r))
def optimize_AB(Cor11, Cor21, n, V11, V21, D11, D21, k):

    global D2
    global V1
    global V2
    global Cor1
    global Cor2
    global k_

    D2 = D21
    V1 = V11
    V2 = V21
    Cor1 = Cor11
    Cor2 = Cor21
    k_ = k

    manifold = Stiefel(k, k)
    x0 = init_x0(Cor1, Cor2, n, V1, V2, D1, D2, k)
    # x0=np.load('zwischenspeicher/B.npy')
    problem = Problem(manifold=manifold, cost=cost)

    # (3) Instantiate a Pymanopt solver

    #solver = pymanopt.solvers.conjugate_gradient.ConjugateGradient(maxtime=10000, maxiter=10000)
    solver = pymanopt.solvers.trust_regions.TrustRegions(
    )  # maxtime=float('inf'))

    # let Pymanopt do the rest
    B = solver.solve(problem, x=x0)
    # print(B)
    # print(np.reshape(res.x[0:k*k_],(k_,k))[email protected](res.x[0:k*k_],(k_,k)))

    return B
def fixedrank(A, YT, r):
    """ Solves the AX=YT problem on the manifold of r-rank matrices with  
    """

    # Instantiate a manifold
    manifold = FixedRankEmbedded(N, r, r)

    # Define the cost function (here using autograd.numpy)
    def cost(X):
        U = X[0]
        cst = 0
        for n in range(N):
            cst = cst + huber(U[n, :])
        Mat = np.matmul(np.matmul(X[0], np.diag(X[1])), X[2])
        fidelity = LA.norm(np.subtract(np.matmul(A, Mat), YT))
        return cst + lambd * fidelity**2

    problem = Problem(manifold=manifold, cost=cost)
    solver = ConjugateGradient(maxiter=maxiter)

    # Let Pymanopt do the rest
    Xopt = solver.solve(problem)

    #Solve
    Sol = np.dot(np.dot(Xopt[0], np.diag(Xopt[1])), Xopt[2])

    return Sol
Ejemplo n.º 6
0
def max_GtRq_brute(A, B, feedback=0, optimizer='ParticleSwarm', **kwargs):
    """
    Brute force maximization of the Generalized Tensor Rayleigh quotient
    on the sphere. Optimization is performed with Pymanopt.

    :param A: the input tensor
    :param B: the second input tensor
    :param feedback: the feedback level for pymanopt
    :param optimizer: the name of the pymanopt minimizer
    :param kwargs: keyword arguments to pass to the pymanopt solver
    """
    # get dimension:
    d = A.shape[0]
    # initialize:
    manifold = Sphere(d)
    problem = Problem(manifold=manifold,
                      cost=lambda x: -_GtRq_brute_autograd(x, A, B),
                      verbosity=feedback)
    # optimization:
    if optimizer == 'ParticleSwarm':
        solver = pymanopt.solvers.ParticleSwarm(logverbosity=0, **kwargs)
        Xopt = solver.solve(problem)
    elif optimizer == 'TrustRegions':
        solver = pymanopt.solvers.TrustRegions(logverbosity=0, **kwargs)
        Xopt = solver.solve(problem)
    # finalize:
    return _GtRq_brute_autograd(Xopt, A, B), Xopt
def CGmanopt(X, objective_function, A, **kwargs):
    '''
    Minimizes the objective function subject to the constraint that X.T * X = I_k using the
    conjugate gradient method

    Args:
        X: Initial 2D array of shape (n, k) such that X.T * X = I_k
        objective_function: Objective function F(X, A) to minimize.
        A: Additional parameters for the objective function F(X, A)

    Keyword Args:
        None

    Returns:
        Xopt: Value of X that minimizes the objective subject to the constraint.
    '''

    manifold = Stiefel(X.shape[0], X.shape[1])

    def cost(X):
        c, _ = objective_function(X, A)
        return c

    problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    solver = ConjugateGradient(logverbosity=0)
    Xopt = solver.solve(problem)
    return Xopt, None
def solve_dist_with_man(man, A, X0, maxiter):
    from pymanopt import Problem
    from pymanopt.solvers import TrustRegions
    from pymanopt.function import Callable

    @Callable
    def cost(S):
        # if not(S.P.dtype == np.float):
        #    raise(ValueError("Non real"))
        diff = (A - S.U @ S.P @ S.V.T.conj())
        val = rtrace(diff @ diff.T.conj())
        # print('val=%f' % val)
        return val

    @Callable
    def egrad(S):
        return fr_ambient(-2*A @ S.V @ S.P,
                          -2*A.T.conj() @ S.U @S.P,
                          2*(S.P-S.U.T.conj() @ A @ S.V))
    
    @Callable
    def ehess(S, xi):
        return fr_ambient(-2*A @ (xi.tV @ S.P + S.V @ xi.tP),
                          -2*A.T.conj() @ (xi.tU @S.P + [email protected]),
                          2*(xi.tP - xi.tU.T.conj()@[email protected] -
                             S.U.T.conj()@[email protected]))

    prob = Problem(
        man, cost, egrad=egrad, ehess=ehess)

    solver = TrustRegions(maxtime=100000, maxiter=maxiter, use_rand=False)
    opt = solver.solve(prob, x=X0, Delta_bar=250)
    return opt
Ejemplo n.º 9
0
def compute_centroid(man, x):
    """
    Compute the centroid as Karcher mean of points x belonging to the manifold
    man.
    """
    n = len(x)

    def objective(y):  # weighted Frechet variance
        acc = 0
        for i in range(n):
            acc += man.dist(y, x[i])**2
        return acc / 2

    def gradient(y):
        g = man.zerovec(y)
        for i in range(n):
            g -= man.log(y, x[i])
        return g

    # TODO: manopt runs a few TR iterations here. For us to do this, we either
    #       need to work out the Hessian of the Frechet variance by hand or
    #       implement approximations for the Hessian to use in the TR solver.
    #       This is because we cannot implement the Frechet variance with
    #       theano and compute the Hessian automatically due to dependency on
    #       the manifold-dependent distance function.
    solver = SteepestDescent(maxiter=15)
    problem = Problem(man, cost=objective, grad=gradient, verbosity=0)
    return solver.solve(problem)
Ejemplo n.º 10
0
def rotation_matrix(mean_source, mean_target_train):
    manifold = Rotations(mean_source[0].shape[0])
    cost = partial(cost_function_full, mean_source, mean_target_train)
    problem = Problem(manifold, cost)
    solver = SteepestDescent(mingradnorm=1e-3)
    U = solver.solve(problem)
    return U
Ejemplo n.º 11
0
    def train_model(self, x0=None):
        self.U = T.matrix('U')
        self.S = T.matrix('S')
        self.V = T.matrix('V')
        problem = Problem(man=self.manifold,
                          theano_cost=self.log_likelihood(),
                          theano_arg=[self.U, self.S, self.V])

        if x0 is None:
            user_vectors = np.random.normal(size=(self.num_users,
                                                  self.num_factors + 1))
            item_vectors = np.random.normal(size=(self.num_items,
                                                  self.num_factors + 1))
            s = rnd.random(self.num_factors + 1)
            s[:-1] = np.sort(s[:-1])[::-1]

            x0 = (user_vectors, np.diag(s), item_vectors.T)
        else:
            x0 = x0
        (left, middle, right), self.loss_history = self.solver.solve(problem,
                                                                     x=x0)
        right = right.T

        s_mid = np.diag(np.sqrt(np.diag(middle)[:-1]))
        self.middle = s_mid

        print('U norm: {}'.format(la.norm(left[:, :-1])))
        print('V norm: {}'.format(la.norm(right[:, :-1])))
        self.user_vectors = left[:, :-1].dot(s_mid)
        self.item_vectors = right[:, :-1].dot(s_mid)
        self.user_biases = left[:, -1] * np.sqrt(middle[-1, -1])
        self.item_biases = right[:, -1] * np.sqrt(middle[-1, -1])
        print('U norm: {}'.format(la.norm(self.user_vectors)))
        print('V norm: {}'.format(la.norm(self.item_vectors)))
        print('LL: {}'.format(self._log_likelihood()))
Ejemplo n.º 12
0
    def fit(self):
        v_matrix_shape = (self.w_matrix.shape[0], self.w_matrix.shape[1])
        w_matrix = tf.convert_to_tensor(self.w_matrix, dtype=tf.float64)
        z_matrix = tf.convert_to_tensor(self.z_matrix, dtype=tf.float64)
        x_matrix = tf.convert_to_tensor(self.x_matrix, dtype=tf.float64)
        lambda_matrix = tf.convert_to_tensor(self.lambda_matrix,
                                             dtype=tf.float64)
        x = tf.Variable(
            initial_value=tf.ones(v_matrix_shape, dtype=tf.dtypes.float64))

        cost = tf.norm(x_matrix - tf.linalg.matmul(
            tf.linalg.matmul(x, lambda_matrix), tf.transpose(x))
                       ) + self.rho / 2 * tf.norm(x - w_matrix + z_matrix)

        manifold = Stiefel(v_matrix_shape[0], v_matrix_shape[1])
        problem = Problem(manifold=manifold, cost=cost, arg=x)
        solver = SteepestDescent(logverbosity=self.verbosity)
        if self.verbosity > 2:
            v_optimal, _ = solver.solve(problem)
        else:
            v_optimal = solver.solve(problem)

        if self.verbosity > 2:
            print("==> WSubproblem ==> Showing v_optimal:")
            print(v_optimal)

        return v_optimal
Ejemplo n.º 13
0
    def train_model(self, x0=None):
        self.L = T.matrix('L')
        self.R = T.matrix('R')
        problem = Problem(man=self.manifold,
                          theano_cost=self.log_likelihood(),
                          theano_arg=[self.L, self.R])

        if x0 is None:
            user_vectors = np.random.normal(size=(self.num_users,
                                                  self.num_factors))
            item_vectors = np.random.normal(size=(self.num_items,
                                                  self.num_factors))
            user_biases = np.random.normal(size=(self.num_users, 1)) / SCONST
            item_biases = np.random.normal(size=(self.num_items, 1)) / SCONST
            x0 = (np.hstack((user_vectors, user_biases)),
                  np.hstack((item_vectors, item_biases)))
        else:
            x0 = x0
        (left, right), self.loss_history = self.solver.solve(problem, x=x0)

        self.user_vectors = left[:, :-1]
        self.item_vectors = right[:, :-1]
        self.user_biases = left[:, -1].reshape((self.num_users, 1))
        self.item_biases = right[:, -1].reshape((self.num_items, 1))
        print('U norm: {}'.format(la.norm(self.user_vectors)))
        print('V norm: {}'.format(la.norm(self.item_vectors)))
Ejemplo n.º 14
0
    def _align_H_stiefel(self, Q, G):
        """Tangent vector field alignment via optimization on orthogonal group."""

        N, D, d = Q.shape

        indptr = G.indptr
        indices = G.indices

        K = G.data

        def cost(V):
            F = 0
            for i in range(N):
                for j, K_ij in zip(indices[indptr[i]:indptr[i + 1]],
                                   K[indptr[i]:indptr[i + 1]]):
                    f_i = K_ij * np.trace(
                        np.dot(np.dot(V[i].T, np.dot(Q[i].T, Q[j])), V[j]))
                    F += f_i

            return F

        manifold = Rotations(d)
        problem = Problem(manifold=manifold, cost=cost)
        solver = SteepestDescent()

        V = solver.solve(problem, np.zeros((d, d)))

        return H
Ejemplo n.º 15
0
    def fit(self):
        f = self.f
        X = self.X
        tol = self.tol
        d = self.d
        n = self.n

        current_best_residual = np.inf
        for r in range(self.restarts):
            print('restart %d' % r)
            M0 = np.linalg.qr(np.random.randn(self.d, self.n))[0]
            my_params = [Parameter(order=self.order, distribution='uniform', lower=-5, upper=5) for _ in range(n)]
            my_basis = Basis('total-order')
            my_poly_init = Poly(parameters=my_params, basis=my_basis, method='least-squares',
                                sampling_args={'mesh': 'user-defined',
                                               'sample-points': X @ M0,
                                               'sample-outputs': f})
            my_poly_init.set_model()
            c0 = my_poly_init.coefficients.copy()

            residual = self.cost(f, X, M0, c0)

            cauchy_length = self.cauchy_length
            residual_history = []
            iter_ind = 0
            M = M0.copy()
            c = c0.copy()
            while residual > tol:
                if self.verbosity == 2:
                    print(residual)
                residual_history.append(residual)
                # Minimize over M
                func_M = lambda M_var: self.cost(f, X, M_var, c)
                grad_M = lambda M_var: self.dcostdM(f, X, M_var, c)

                manifold = Stiefel(d, n)
                solver = ConjugateGradient(maxiter=self.max_M_iters)

                problem = Problem(manifold=manifold, cost=func_M, egrad=grad_M, verbosity=0)

                M = solver.solve(problem, x=M)

                # Minimize over c
                func_c = lambda c_var: self.cost(f, X, M, c_var)
                grad_c = lambda c_var: self.dcostdc(f, X, M, c_var)

                res = minimize(func_c, x0=c, method='CG', jac=grad_c)
                c = res.x
                residual = self.cost(f, X, M, c)
                if iter_ind < cauchy_length:
                    iter_ind += 1
                elif np.abs(np.mean(residual_history[-cauchy_length:]) - residual)/residual < self.cauchy_tol:
                    break

            if self.verbosity > 0:
                print('final residual on training data: %f' % self.cost(f, X, M, c))
            if residual < current_best_residual:
                self.M = M
                self.c = c
                current_best_residual = residual
Ejemplo n.º 16
0
 def test_prepare(self):
     problem = Problem(self.man, self.cost)
     with self.assertRaises(ValueError):
         # Asking for the gradient of a Theano cost function without
         # specifying an argument for differentiation should raise an
         # exception.
         problem.grad
def low_rank_matrix_approximation_theano(A, k):
    manifold, solver = _bootstrap_problem(A, k)

    Y = T.matrix()
    cost = T.sum((T.dot(Y, Y.T) - A)**2)

    problem = Problem(man=manifold, theano_cost=cost, theano_arg=Y)
    return solver.solve(problem)
Ejemplo n.º 18
0
def solve_manopt(X, d, cost, egrad, Wo=None):

    D = X.shape[1]
    manifold = Grassmann(height=D, width=d)
    problem = Problem(manifold=manifold, cost=cost, egrad=egrad, verbosity=0)

    solver = ConjugateGradient(mingradnorm=1e-3)
    W = solver.solve(problem, x=Wo)

    return W
Ejemplo n.º 19
0
def get_rotation_matrix(X, C):
    def cost(R):
        Z = npy.dot(X, R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M)**2)

    manifold = Stiefel(C, C)
    problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    solver = SteepestDescent(logverbosity=0)
    opt = solver.solve(problem=problem, x=npy.eye(C))
    return cost(opt), opt
Ejemplo n.º 20
0
def estimateR_weighted(S, W, D, R0):
    '''
    estimates the update of the rotation matrix for the second part of the iterations
    :param S : shape
    :param W : heatmap
    :param D : weight of the heatmap
    :param R0 : rotation matrix
    :return: R the new rotation matrix
    '''

    A = np.transpose(S)
    B = np.transpose(W)
    X0 = R0[0:2, :]
    store_E = Store()

    [m, n] = A.shape
    p = B.shape[1]

    At = np.zeros([n, m])
    At = np.transpose(A)

    # we use the optimization on a Stiefel manifold because R is constrained to be othogonal
    manifold = Stiefel(n, p, 1)

    ####################################################################################################################
    def cost(X):
        '''
        cost function of the manifold, the cost is trace(E'*D*E)/2 with E = A*X - B or store_E
        :param X : vector
        :return f : the cost
        '''

        if store_E.stored is None:
            store_E.stored = np.dot(A, np.transpose(X)) - B

        E = store_E.stored
        f = np.trace(np.dot(np.transpose(E), np.dot(D, E))) / 2

        return f

    ####################################################################################################################

    # setup the problem structure with manifold M and cost
    problem = Problem(manifold=manifold, cost=cost, verbosity=0)

    # setup the trust region algorithm to solve the problem
    TR = TrustRegions(maxiter=10)

    # solve the problem
    X = TR.solve(problem, X0)

    #print('X : ',X)
    return np.transpose(X)  # return R = X'
Ejemplo n.º 21
0
def NG_sdr(X, y, m, v_w = 5, v_b = 5, verbosity=0, *args, **kwargs):
    """
    X: array of N points on complex Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(m, p), m < n) 
    where X_hat_i = R^T X_i, W \in St(n, m)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued
    if cpx:
        gr = ComplexGrassmann(n, p)
        man = ComplexGrassmann(n, m)
    else:
        gr = Grassmann(n, p)
        man = Grassmann(n, m)
    
    # distance matrix
    dist_m = np.zeros((N, N))

    for i in range(N):
        for j in range(i):
            dist_m[i, j] = gr.dist(X[i], X[j])
            dist_m[j, i] = dist_m[i, j]
    
    # affinity matrix
    affinity = affinity_matrix(dist_m, y, v_w, v_b)

            
    X_ = torch.from_numpy(X)
    affinity_ = torch.from_numpy(affinity)
    
    @pymanopt.function.PyTorch
    def cost(A):
        dm = torch.zeros((N, N))
        for i in range(N):
            for j in range(i):
                dm[i, j] = dist_proj(torch.matmul(A.conj().t(), X_[i]), torch.matmul(A.conj().t(), X_[j]))**2
                #dm[i, j] = gr_low.dist(X_proj[i], X_proj[j])**2
                dm[j, i] = dm[i, j]
    
        d2 = torch.mean(affinity_*dm)   
        return d2

    # solver = ConjugateGradient()
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    A = solver.solve(problem)

    tmp = np.array([A.conj().T for i in range(N)]) # N x m x n
    X_low = multiprod(tmp, X) # N x m x p
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])
    
    return X_low, A
Ejemplo n.º 22
0
def NG_dr1(X, verbosity = 0):
    """
    X: array of N points on Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(n-1, p)) 
    where X_hat_i = A^T X_i, A \in St(n, n-1)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued

    if cpx:
        man = Product([ComplexGrassmann(n, 1), Euclidean(p, 2)])
        
    else:
        man = Product([Grassmann(n, 1), Euclidean(p)])
    
    X_ = torch.from_numpy(X)
    
    @pymanopt.function.PyTorch
    def cost(v, b):
        vvT = torch.matmul(v, v.conj().t()) # n x n
        if cpx:
            b_ = b[:,0] + b[:,1]*1j
            b_ = torch.unsqueeze(b_, axis=1)
        else:
            b_ = torch.unsqueeze(b, axis=1)
        vbt = torch.matmul(v, b_.t()) # n x p
        IvvT = torch.eye(n, dtype=X_.dtype) - vvT
        d2 = 0
        for i in range(N):
            d2 = d2 + dist_proj(X_[i], torch.matmul(IvvT, X_[i]) + vbt)**2/N
            #d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]))**2/N
        return d2
    
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    theta = solver.solve(problem)
    v = theta[0]
    b_ = theta[1]
    
    if cpx:
        b = b_[:,0] + b_[:,1]*1j
        b = np.expand_dims(b, axis=1)
    else:
        b = np.expand_dims(b_, axis=1)
    
    R = ortho_complement(v)
    tmp = np.array([R.conj().T for i in range(N)])
    X_low = multiprod(tmp, X)
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])

    return X_low, R, v, b
    def estimate_orth_subspaces(self, DataStruct):
        '''
        main optimization function
        '''

        # Grassman point?
        if LA.norm(np.dot(self.Q.T, self.Q) - np.eye(self.Q.shape[-1]),
                   ord='fro') > 1e-4:
            self._project_stiefel()

        # ----------------------------------------------------------------------- #

        # eGrad = grad(cost)
        # eHess = hessian(cost)

        # Perform optimization
        # ----------------------------------------------------------------------- #
        # ----------------------------------------------------------------------- #

        d, r = np.shape(self.Q)  # problem size
        print(d)

        manif = Stiefel(d, r)  # initialize manifold

        # instantiate problem
        problem = Problem(manifold=manif, cost=self._cost, verbosity=2)

        # initialize solver
        solver = TrustRegions(mingradnorm=1e-8,
                              minstepsize=1e-16,
                              logverbosity=1)

        # solve
        Xopt, optlog = solver.solve(problem)

        opt_subspaces = self._objfn(Xopt)

        # Align the axes within a subspace by variance high to low
        for j in range(self.numSubspaces):
            Aj = DataStruct.A[j]
            Qj = opt_subspaces[2].Q[j]
            # data projected onto subspace
            Aj_proj = np.dot((Aj - np.mean(Aj, 0)), Qj)
            if np.size(np.cov(Aj_proj.T)) < 2:
                V = 1
            else:
                V = LA.svd(np.cov(Aj_proj.T))[0]
            Qj = np.dot(Qj, V)
            opt_subspaces[2].Q[j] = Qj  # ranked top to low variance

        return opt_subspaces[2]
Ejemplo n.º 24
0
def get_rotation_matrix(Mt, Ms, metric='euc'):
    Mt = Mt.reshape(-1, *Mt.shape[-2:])
    Ms = Ms.reshape(-1, *Ms.shape[-2:])

    n = Mt[0].shape[0]
    manifolds = Rotations(n)

    if metric == 'euc':
        cost = partial(_procruster_cost_function_euc, Mt=Mt, Ms=Ms)
        problem = Problem(manifold=manifolds, cost=cost, verbosity=0)
    elif metric == 'rie':
        cost = partial(_procruster_cost_function_rie, Mt=Mt, Ms=Ms)
        egrad = partial(_procruster_egrad_function_rie, Mt=Mt, Ms=Ms)
        problem = Problem(manifold=manifolds,
                          cost=cost,
                          egrad=egrad,
                          verbosity=0)

    solver = SteepestDescent(mingradnorm=1e-3)

    Ropt = solver.solve(problem)

    return Ropt
Ejemplo n.º 25
0
def NG_dr(X, m, verbosity=0, *args, **kwargs):
    """
    X: array of N points on Gr(n, p); N x n x p array
    aim to represent X by X_hat (N points on Gr(m, p), m < n) 
    where X_hat_i = R^T X_i, R \in St(n, m)
    minimizing the projection error (using projection F-norm)
    """
    N, n, p = X.shape
    cpx = np.iscomplex(X).any() # true if X is complex-valued

    if cpx:
        man = Product([ComplexGrassmann(n, m), Euclidean(n, p, 2)])
        
    else:
        man = Product([Grassmann(n, m), Euclidean(n, p)])
    
    X_ = torch.from_numpy(X)
    
    @pymanopt.function.PyTorch
    def cost(A, B):
        AAT = torch.matmul(A, A.conj().t()) # n x n
        if cpx:
            B_ = B[:,:,0] + B[:,:,1]*1j
        else:
            B_ = B
        IAATB = torch.matmul(torch.eye(n, dtype=X_.dtype) - AAT, B_) # n x p
        d2 = 0
        for i in range(N):
            d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]) + IAATB)**2/N
            #d2 = d2 + dist_proj(X_[i], torch.matmul(AAT, X_[i]))**2/N
        return d2

    #solver = ConjugateGradient()
    solver = SteepestDescent()
    problem = Problem(manifold=man, cost=cost, verbosity=verbosity)
    theta = solver.solve(problem)
    A = theta[0]
    B = theta[1]
    
    if cpx:
        B_ = B[:,:,0] + B[:,:,1]*1j
    else:
        B_ = B

    #tmp = np.array([A.T for i in range(N)])
    tmp = np.array([A.conj().T for i in range(N)])
    X_low = multiprod(tmp, X)
    X_low = np.array([qr(X_low[i])[0] for i in range(N)])

    return X_low, A, B_
def ManoptOptimization(A, m):
    n = A.shape[0]
    T = A.shape[2]
    manifold = Stiefel(n, m, k=1)

    mycost = lambda x: cost(A, x)
    myegrad = lambda x: egrad(A, x)
    problem = Problem(manifold=manifold, cost=mycost, egrad=myegrad)

    solver = TrustRegions()
    print('# Start optimization using solver: trustregion')
    Xopt = solver.solve(problem)

    return Xopt
Ejemplo n.º 27
0
def closest_unit_norm_column_approximation(A):
    """
    Returns the matrix with unit-norm columns that is closests to A w.r.t. the
    Frobenius norm.
    """
    m, n = A.shape

    manifold = Oblique(m, n)
    solver = ConjugateGradient()
    X = T.matrix()
    cost = 0.5 * T.sum((X - A)**2)

    problem = Problem(manifold=manifold, cost=cost, arg=X)
    return solver.solve(problem)
def low_rank_matrix_approximation(A, k):
    manifold, solver = _bootstrap_problem(A, k)

    def cost(Y):
        return la.norm(Y.dot(Y.T) - A, "fro")**2

    def egrad(Y):
        return 4 * (Y.dot(Y.T) - A).dot(Y)

    def ehess(Y, U):
        return 4 * ((Y.dot(U.T) + U.dot(Y.T)).dot(Y) + (Y.dot(Y.T) - A).dot(U))

    problem = Problem(man=manifold, cost=cost, egrad=egrad, ehess=ehess)
    return solver.solve(problem)
Ejemplo n.º 29
0
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
    n = 128
    manifold = Sphere(n)

    # Generate random problem data.
    matrix = np.random.normal(size=(n, n))
    matrix = 0.5 * (matrix + matrix.T)
    cost, euclidean_gradient = create_cost_and_derivates(
        manifold, matrix, backend)

    # Create the problem structure.
    problem = Problem(manifold, cost, euclidean_gradient=euclidean_gradient)

    # Numerically check gradient consistency (optional).
    check_gradient(problem)
Ejemplo n.º 30
0
def solve_manopt(X, d, cost, egrad):

    D = X.shape[1]
    manifold = Grassmann(height=D, width=d)
    problem = Problem(manifold=manifold, cost=cost, egrad=egrad, verbosity=0)

    solver = ConjugateGradient(mingradnorm=1e-3)

    M = mean_riemann(X)
    w, v = np.linalg.eig(M)
    idx = w.argsort()[::-1]
    v_ = v[:, idx]
    Wo = v_[:, :d]
    W = solver.solve(problem, x=Wo)
    return W
Ejemplo n.º 31
0
#https://github.com/pymanopt/pymanopt/blob/master/pymanopt/core/problem.py

import autograd.numpy as np
from pymanopt import Problem


def cost(theta):
    return np.square(theta)
    
problem = Problem(manifold=None, cost=cost, verbosity=1)

print problem.cost(5)

print problem.egrad(5.0)