def maxvol(A): """ Find the rxr submatrix of maximal volume in A(nxr), n>=r We want to decompose matrix A as A = A[:,J] * (A[I,J])^-1 * A[I,:] This algorithm helps us find this submatrix A[I,J] from A, which has the largest determinant. We greedily find vector of max norm, and subtract its projection from the rest of rows. Parameters ---------- A: matrix The matrix to find maximal volume Returns ------- row_idx: list of int is the list or rows of A forming the matrix with maximal volume, A_inv: matrix is the inverse of the matrix with maximal volume. References ---------- S. A. Goreinov, I. V. Oseledets, D. V. Savostyanov, E. E. Tyrtyshnikov, N. L. Zamarashkin. How to find a good submatrix.Goreinov, S. A., et al. Matrix Methods: Theory, Algorithms and Applications: Dedicated to the Memory of Gene Golub. 2010. 247-256. Ali Çivril, Malik Magdon-Ismail On selecting a maximum volume sub-matrix of a matrix and related problems Theoretical Computer Science. Volume 410, Issues 47–49, 6 November 2009, Pages 4801-4811 """ (n, r) = tl.shape(A) # The index of row of the submatrix row_idx = tl.zeros(r) # Rest of rows / unselected rows rest_of_rows = tl.tensor(list(range(n)),dtype= tl.int64) # Find r rows iteratively i = 0 A_new = A while i < r: mask = list(range(tl.shape(A_new)[0])) # Compute the square of norm of each row rows_norms = tl.sum(A_new ** 2, axis=1) # If there is only one row of A left, let's just return it. MxNet is not robust about this case. if tl.shape(rows_norms) == (): row_idx[i] = rest_of_rows break # If a row is 0, we delete it. if any(rows_norms == 0): zero_idx = tl.argmin(rows_norms,axis=0) mask.pop(zero_idx) rest_of_rows = rest_of_rows[mask] A_new = A_new[mask,:] continue # Find the row of max norm max_row_idx = tl.argmax(rows_norms, axis=0) max_row = A[rest_of_rows[max_row_idx], :] # Compute the projection of max_row to other rows # projection a to b is computed as: <a,b> / sqrt(|a|*|b|) projection = tl.dot(A_new, tl.transpose(max_row)) normalization = tl.sqrt(rows_norms[max_row_idx] * rows_norms) # make sure normalization vector is of the same shape of projection (causing bugs for MxNet) normalization = tl.reshape(normalization, tl.shape(projection)) projection = projection/normalization # Subtract the projection from A_new: b <- b - a * projection A_new = A_new - A_new * tl.reshape(projection, (tl.shape(A_new)[0], 1)) # Delete the selected row mask.pop(max_row_idx) A_new = A_new[mask,:] # update the row_idx and rest_of_rows row_idx[i] = rest_of_rows[max_row_idx] rest_of_rows = rest_of_rows[mask] i = i + 1 row_idx = tl.tensor(row_idx, dtype=tl.int64) inverse = tl.solve(A[row_idx,:], tl.eye(tl.shape(A[row_idx,:])[0], **tl.context(A))) row_idx = tl.to_numpy(row_idx) return row_idx, inverse
def unimodality_prox(tensor): """ This function projects each column of the input array on the set of arrays so that x[1] <= x[2] <= x[j] >= x[j+1]... >= x[n] is satisfied columnwise. Parameters ---------- tensor : ndarray Returns ------- ndarray A tensor of which columns' distribution are unimodal. References ---------- .. [1]: Bro, R., & Sidiropoulos, N. D. (1998). Least squares algorithms under unimodality and non‐negativity constraints. Journal of Chemometrics: A Journal of the Chemometrics Society, 12(4), 223-247. """ if tl.ndim(tensor) == 1: tensor = tl.vec_to_tensor(tensor, [tl.shape(tensor)[0], 1]) elif tl.ndim(tensor) > 2: raise ValueError( "Unimodality prox doesn't support an input which has more than 2 dimensions." ) tensor_unimodal = tl.copy(tensor) monotone_increasing = tl.tensor(monotonicity_prox(tensor), **tl.context(tensor)) monotone_decreasing = tl.tensor(monotonicity_prox(tensor, decreasing=True), **tl.context(tensor)) # Next line finds mutual peak points values = tl.tensor( tl.to_numpy((tensor - monotone_decreasing >= 0)) * tl.to_numpy( (tensor - monotone_increasing >= 0)), **tl.context(tensor)) sum_inc = tl.where(values == 1, tl.cumsum(tl.abs(tensor - monotone_increasing), axis=0), tl.tensor(0, **tl.context(tensor))) sum_inc = tl.where(values == 1, sum_inc - tl.abs(tensor - monotone_increasing), tl.tensor(0, **tl.context(tensor))) sum_dec = tl.where( tl.flip(values, axis=0) == 1, tl.cumsum(tl.abs( tl.flip(tensor, axis=0) - tl.flip(monotone_decreasing, axis=0)), axis=0), tl.tensor(0, **tl.context(tensor))) sum_dec = tl.where( tl.flip(values, axis=0) == 1, sum_dec - tl.abs(tl.flip(tensor, axis=0) - tl.flip(monotone_decreasing, axis=0)), tl.tensor(0, **tl.context(tensor))) difference = tl.where(values == 1, sum_inc + tl.flip(sum_dec, axis=0), tl.max(sum_inc + tl.flip(sum_dec, axis=0))) min_indice = tl.argmin(tl.tensor(difference), axis=0) for i in range(len(min_indice)): tensor_unimodal = tl.index_update( tensor_unimodal, tl.index[:int(min_indice[i]), i], monotone_increasing[:int(min_indice[i]), i]) tensor_unimodal = tl.index_update( tensor_unimodal, tl.index[int(min_indice[i] + 1):, i], monotone_decreasing[int(min_indice[i] + 1):, i]) return tensor_unimodal