示例#1
0
def maxvol(A):
    """ Find the rxr submatrix of maximal volume in A(nxr), n>=r

            We want to decompose matrix A as
                    A = A[:,J] * (A[I,J])^-1 * A[I,:]
            This algorithm helps us find this submatrix A[I,J] from A, which has the largest determinant.
            We greedily find vector of max norm, and subtract its projection from the rest of rows.

    Parameters
    ----------

    A: matrix
            The matrix to find maximal volume

    Returns
    -------
    row_idx: list of int
            is the list or rows of A forming the matrix with maximal volume,
    A_inv: matrix
            is the inverse of the matrix with maximal volume.

    References
    ----------
    S. A. Goreinov, I. V. Oseledets, D. V. Savostyanov, E. E. Tyrtyshnikov, N. L. Zamarashkin.
    How to find a good submatrix.Goreinov, S. A., et al.
    Matrix Methods: Theory, Algorithms and Applications: Dedicated to the Memory of Gene Golub. 2010. 247-256.

    Ali Çivril, Malik Magdon-Ismail
    On selecting a maximum volume sub-matrix of a matrix and related problems
    Theoretical Computer Science. Volume 410, Issues 47–49, 6 November 2009, Pages 4801-4811
    """

    (n, r) = tl.shape(A)

    # The index of row of the submatrix
    row_idx = tl.zeros(r)

    # Rest of rows / unselected rows
    rest_of_rows = tl.tensor(list(range(n)),dtype= tl.int64)

    # Find r rows iteratively
    i = 0
    A_new = A
    while i < r:
        mask = list(range(tl.shape(A_new)[0]))
        # Compute the square of norm of each row
        rows_norms = tl.sum(A_new ** 2, axis=1)

        # If there is only one row of A left, let's just return it. MxNet is not robust about this case.
        if tl.shape(rows_norms) == ():
            row_idx[i] = rest_of_rows
            break

        # If a row is 0, we delete it.
        if any(rows_norms == 0):
            zero_idx = tl.argmin(rows_norms,axis=0)
            mask.pop(zero_idx)
            rest_of_rows = rest_of_rows[mask]
            A_new = A_new[mask,:]
            continue

        # Find the row of max norm
        max_row_idx = tl.argmax(rows_norms, axis=0)
        max_row = A[rest_of_rows[max_row_idx], :]

        # Compute the projection of max_row to other rows
        # projection a to b is computed as: <a,b> / sqrt(|a|*|b|)
        projection = tl.dot(A_new, tl.transpose(max_row))
        normalization = tl.sqrt(rows_norms[max_row_idx] * rows_norms)
        # make sure normalization vector is of the same shape of projection (causing bugs for MxNet)
        normalization = tl.reshape(normalization, tl.shape(projection))
        projection = projection/normalization

        # Subtract the projection from A_new:  b <- b - a * projection
        A_new = A_new - A_new * tl.reshape(projection, (tl.shape(A_new)[0], 1))

        # Delete the selected row
        mask.pop(max_row_idx)
        A_new = A_new[mask,:]

        # update the row_idx and rest_of_rows
        row_idx[i] = rest_of_rows[max_row_idx]
        rest_of_rows = rest_of_rows[mask]
        i = i + 1

    row_idx = tl.tensor(row_idx, dtype=tl.int64)
    inverse = tl.solve(A[row_idx,:],
                 tl.eye(tl.shape(A[row_idx,:])[0], **tl.context(A)))
    row_idx = tl.to_numpy(row_idx)

    return row_idx, inverse
示例#2
0
def active_set_nnls(Utm, UtU, x=None, n_iter_max=100, tol=10e-8):
    """
     Active set algorithm for non-negative least square solution.

     Computes an approximate non-negative solution for Ux=m linear system.

     Parameters
     ----------
     Utm : vectorized ndarray
        Pre-computed product of the transposed of U and m
     UtU : ndarray
        Pre-computed Kronecker product of the transposed of U and U
     x : init
        Default: None
     n_iter_max : int
         Maximum number of iteration
         Default: 100
     tol : float
         Early stopping criterion

     Returns
     -------
     x : ndarray

     Notes
     -----
     This function solves following problem:
     .. math::
        \\begin{equation}
             \\min_{x} ||Ux - m||^2
        \\end{equation}

     According to [1], non-negativity-constrained least square estimation problem becomes:
     .. math::
        \\begin{equation}
             x' = (Utm) - (UTU)\\times x
        \\end{equation}

     Reference
     ----------
     [1] : Bro, R., & De Jong, S. (1997). A fast non‐negativity‐constrained
           least squares algorithm. Journal of Chemometrics: A Journal of
           the Chemometrics Society, 11(5), 393-401.
     """
    if tl.get_backend() == 'tensorflow':
        raise ValueError(
            "Active set is not supported with the tensorflow backend. Consider using fista method with tensorflow."
        )

    if x is None:
        x_vec = tl.zeros(tl.shape(UtU)[1], **tl.context(UtU))
    else:
        x_vec = tl.base.tensor_to_vec(x)

    x_gradient = Utm - tl.dot(UtU, x_vec)
    passive_set = x_vec > 0
    active_set = x_vec <= 0
    support_vec = tl.zeros(tl.shape(x_vec), **tl.context(x_vec))

    for iteration in range(n_iter_max):

        if iteration > 0 or tl.all(x_vec == 0):
            indice = tl.argmax(x_gradient)
            passive_set = tl.index_update(passive_set, tl.index[indice], True)
            active_set = tl.index_update(active_set, tl.index[indice], False)
        # To avoid singularity error when initial x exists
        try:
            passive_solution = tl.solve(UtU[passive_set, :][:, passive_set],
                                        Utm[passive_set])
            indice_list = []
            for i in range(tl.shape(support_vec)[0]):
                if passive_set[i]:
                    indice_list.append(i)
                    support_vec = tl.index_update(
                        support_vec, tl.index[int(i)],
                        passive_solution[len(indice_list) - 1])
                else:
                    support_vec = tl.index_update(support_vec,
                                                  tl.index[int(i)], 0)
        # Start from zeros if solve is not achieved
        except:
            x_vec = tl.zeros(tl.shape(UtU)[1])
            support_vec = tl.zeros(tl.shape(x_vec), **tl.context(x_vec))
            passive_set = x_vec > 0
            active_set = x_vec <= 0
            if tl.any(active_set):
                indice = tl.argmax(x_gradient)
                passive_set = tl.index_update(passive_set, tl.index[indice],
                                              True)
                active_set = tl.index_update(active_set, tl.index[indice],
                                             False)
            passive_solution = tl.solve(UtU[passive_set, :][:, passive_set],
                                        Utm[passive_set])
            indice_list = []
            for i in range(tl.shape(support_vec)[0]):
                if passive_set[i]:
                    indice_list.append(i)
                    support_vec = tl.index_update(
                        support_vec, tl.index[int(i)],
                        passive_solution[len(indice_list) - 1])
                else:
                    support_vec = tl.index_update(support_vec,
                                                  tl.index[int(i)], 0)

        # update support vector if it is necessary
        if tl.min(support_vec[passive_set]) <= 0:
            for i in range(len(passive_set)):
                alpha = tl.min(
                    x_vec[passive_set][support_vec[passive_set] <= 0] /
                    (x_vec[passive_set][support_vec[passive_set] <= 0] -
                     support_vec[passive_set][support_vec[passive_set] <= 0]))
                update = alpha * (support_vec - x_vec)
                x_vec = x_vec + update
                passive_set = x_vec > 0
                active_set = x_vec <= 0
                passive_solution = tl.solve(
                    UtU[passive_set, :][:, passive_set], Utm[passive_set])
                indice_list = []
                for i in range(tl.shape(support_vec)[0]):
                    if passive_set[i]:
                        indice_list.append(i)
                        support_vec = tl.index_update(
                            support_vec, tl.index[int(i)],
                            passive_solution[len(indice_list) - 1])
                    else:
                        support_vec = tl.index_update(support_vec,
                                                      tl.index[int(i)], 0)

                if tl.any(passive_set) != True or tl.min(
                        support_vec[passive_set]) > 0:
                    break
        # set x to s
        x_vec = tl.clip(support_vec, 0, tl.max(support_vec))

        # gradient update
        x_gradient = Utm - tl.dot(UtU, x_vec)

        if tl.any(active_set) != True or tl.max(x_gradient[active_set]) <= tol:
            break

    return x_vec