Exemple #1
0
 def testAddSparseWrapp(self):
     X = numpy.random.rand(10, 5)
     U, s, V = numpy.linalg.svd(X)
     def myTest(U, s, V, X, k):
         self.assertTrue(X.shape == (10, 5))
         self.assertTrue(U.shape[0] == 10)
         self.assertTrue(V.shape[0] == 5)
         return U, s, V
     SVDUpdate._addSparseWrapp(myTest, U, s, V, X)
     SVDUpdate._addSparseWrapp(myTest, V, s, U, X.T)
Exemple #2
0
    def testAddSparseWrapp(self):
        X = numpy.random.rand(10, 5)
        U, s, V = numpy.linalg.svd(X)

        def myTest(U, s, V, X, k):
            self.assertTrue(X.shape == (10, 5))
            self.assertTrue(U.shape[0] == 10)
            self.assertTrue(V.shape[0] == 5)
            return U, s, V

        SVDUpdate._addSparseWrapp(myTest, U, s, V, X)
        SVDUpdate._addSparseWrapp(myTest, V, s, U, X.T)
Exemple #3
0
    def testAddRows(self):

        #Test case when k = rank
        Utilde, Stilde, Vtilde = SVDUpdate.addRows(self.U, self.s, self.V,
                                                   self.C)

        nptst.assert_array_almost_equal(Utilde.T.dot(Utilde),
                                        numpy.eye(Utilde.shape[1]))
        nptst.assert_array_almost_equal(Vtilde.T.dot(Vtilde),
                                        numpy.eye(Vtilde.shape[1]))

        self.assertEquals(Stilde.shape[0], self.k)

        #Check we get the original solution with full SVD
        U, s, V = numpy.linalg.svd(self.A)
        inds = numpy.flipud(numpy.argsort(s))
        U, s, V = Util.indSvd(U, s, V, inds)

        Utilde, Stilde, Vtilde = SVDUpdate.addRows(U, s, V, self.C)
        D = numpy.r_[self.A, self.C]

        nptst.assert_array_almost_equal(D, (Utilde * Stilde).dot(Vtilde.T), 4)

        #Check solution for partial rank SVD
        k = 20
        U, s, V = numpy.linalg.svd(self.A)
        inds = numpy.flipud(numpy.argsort(s))[0:k]
        U, s, V = Util.indSvd(U, s, V, inds)

        Utilde, Stilde, Vtilde = SVDUpdate.addRows(U, s, V, self.C)
        D = numpy.r_[(U * s).dot(V.T), self.C]
        U, s, V = numpy.linalg.svd(D)
        inds = numpy.flipud(numpy.argsort(s))[0:k]
        U, s, V = Util.indSvd(U, s, V, inds)

        nptst.assert_array_almost_equal((U * s).dot(V.T),
                                        (Utilde * Stilde).dot(Vtilde.T), 4)

        #Test if same as add cols
        U, s, V = numpy.linalg.svd(self.A)
        inds = numpy.flipud(numpy.argsort(s))[0:k]
        U, s, V = Util.indSvd(U, s, V, inds)
        Utilde, sTilde, Vtilde = SVDUpdate.addRows(U, s, V, self.C)
        Vtilde2, sTilde2, Utilde2 = SVDUpdate.addCols(V, s, U, self.C.T)

        nptst.assert_array_almost_equal((Utilde * sTilde).dot(Vtilde.T),
                                        (Utilde2 * sTilde2).dot(Vtilde2.T))
Exemple #4
0
    def testAddCols2(self):
        Utilde, Stilde, Vtilde = SVDUpdate.addCols2(self.U, self.s, self.V, self.B)
        ABkEst = numpy.dot((Utilde*Stilde), Vtilde.T)

        print(numpy.linalg.norm(self.AB))
        print(numpy.linalg.norm(self.AB - self.ABk))
        print(numpy.linalg.norm(self.AB - ABkEst))
        print(numpy.linalg.norm(self.ABk - ABkEst))
Exemple #5
0
 def f(U, s, V, X, k):
     return SVDUpdate.addSparseRSVD(U,
                                    s,
                                    V,
                                    X,
                                    k,
                                    kX=2 * self.k,
                                    kRand=2 * self.k,
                                    q=1)
Exemple #6
0
    def testAddCols2(self):
        Utilde, Stilde, Vtilde = SVDUpdate.addCols2(self.U, self.s, self.V,
                                                    self.B)
        ABkEst = numpy.dot((Utilde * Stilde), Vtilde.T)

        print(numpy.linalg.norm(self.AB))
        print(numpy.linalg.norm(self.AB - self.ABk))
        print(numpy.linalg.norm(self.AB - ABkEst))
        print(numpy.linalg.norm(self.ABk - ABkEst))
Exemple #7
0
 def testAddRows(self): 
     
     #Test case when k = rank 
     Utilde, Stilde, Vtilde = SVDUpdate.addRows(self.U, self.s, self.V, self.C)
     
     nptst.assert_array_almost_equal(Utilde.T.dot(Utilde), numpy.eye(Utilde.shape[1]))
     nptst.assert_array_almost_equal(Vtilde.T.dot(Vtilde), numpy.eye(Vtilde.shape[1]))
     
     self.assertEquals(Stilde.shape[0], self.k)
     
     #Check we get the original solution with full SVD 
     U, s, V = numpy.linalg.svd(self.A)
     inds = numpy.flipud(numpy.argsort(s))
     U, s, V = Util.indSvd(U, s, V, inds)
     
     Utilde, Stilde, Vtilde = SVDUpdate.addRows(U, s, V, self.C)
     D = numpy.r_[self.A, self.C]
     
     nptst.assert_array_almost_equal(D, (Utilde*Stilde).dot(Vtilde.T), 4)
     
     #Check solution for partial rank SVD 
     k = 20 
     U, s, V = numpy.linalg.svd(self.A)
     inds = numpy.flipud(numpy.argsort(s))[0:k]
     U, s, V = Util.indSvd(U, s, V, inds)
     
     Utilde, Stilde, Vtilde = SVDUpdate.addRows(U, s, V, self.C)
     D = numpy.r_[(U*s).dot(V.T), self.C]
     U, s, V = numpy.linalg.svd(D)
     inds = numpy.flipud(numpy.argsort(s))[0:k]
     U, s, V = Util.indSvd(U, s, V, inds)
     
     nptst.assert_array_almost_equal((U*s).dot(V.T), (Utilde*Stilde).dot(Vtilde.T), 4)
     
     #Test if same as add cols 
     U, s, V = numpy.linalg.svd(self.A)
     inds = numpy.flipud(numpy.argsort(s))[0:k]
     U, s, V = Util.indSvd(U, s, V, inds)
     Utilde, sTilde, Vtilde = SVDUpdate.addRows(U, s, V, self.C)
     Vtilde2, sTilde2, Utilde2 = SVDUpdate.addCols(V, s, U, self.C.T)
     
     nptst.assert_array_almost_equal((Utilde*sTilde).dot(Vtilde.T),  (Utilde2*sTilde2).dot(Vtilde2.T))
Exemple #8
0
    def setUp(self):
        numpy.set_printoptions(suppress=True, precision=3, linewidth=150)
        numpy.random.seed(21)
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

        # To test functions
        m = 100
        n = 80
        r = 20
        self.k = 10
        p = 0.1  # proportion of coefficients in the sparse matrix

        self.A = numpy.random.rand(m, n)
        U, s, VT = numpy.linalg.svd(self.A)
        V = VT.T
        inds = numpy.flipud(numpy.argsort(s))
        self.U = U[:, inds[0:self.k]]
        self.s = s[inds[0:self.k]]
        self.V = V[:, inds[0:self.k]]

        self.C = numpy.random.rand(r, n)

        # Specific to addCols functions
        self.B = numpy.random.rand(m, r)
        self.AB = numpy.c_[self.A, self.B]

        UAB, sAB, VABT = numpy.linalg.svd(self.AB, full_matrices=False)
        VAB = VABT.T
        inds = numpy.flipud(numpy.argsort(sAB))
        UAB = UAB[:, inds[0:self.k]]
        sAB = sAB[inds[0:self.k]]
        VAB = VAB[:, inds[0:self.k]]
        self.ABk = numpy.dot(numpy.dot(UAB, numpy.diag(sAB)), VAB.T)

        # Specific to addSparse functions
        X = numpy.random.rand(m, n)
        X[numpy.random.rand(m, n) < 1 - p] = 0
        self.X = scipy.sparse.csc_matrix(X)
        self.AX = self.A + self.X.todense()
        UAX, sAX, VAXT = numpy.linalg.svd(self.AX, full_matrices=False)
        VAX = VAXT.T
        inds = numpy.flipud(numpy.argsort(sAX))
        UAX = UAX[:, inds[0:self.k]]
        sAX = sAX[inds[0:self.k]]
        VAX = VAX[:, inds[0:self.k]]
        self.AXk = numpy.dot(numpy.dot(UAX, numpy.diag(sAX)), VAX.T)
        UAkXk, sAkXk, VAkXk = SVDUpdate.addSparse(self.U, self.s, self.V,
                                                  self.X, self.k)
        self.AkXk = numpy.dot(numpy.dot(UAkXk, numpy.diag(sAkXk)), VAkXk.T)
Exemple #9
0
    def setUp(self):
        numpy.set_printoptions(suppress=True, precision=3, linewidth=150)
        numpy.random.seed(21)
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

        # To test functions
        m = 100
        n = 80
        r = 20
        self.k = 10
        p = 0.1  # proportion of coefficients in the sparse matrix

        self.A = numpy.random.rand(m, n)
        U, s, VT = numpy.linalg.svd(self.A)
        V = VT.T
        inds = numpy.flipud(numpy.argsort(s))
        self.U = U[:, inds[0:self.k]]
        self.s = s[inds[0:self.k]]
        self.V = V[:, inds[0:self.k]]

        self.C = numpy.random.rand(r, n)
        
        # Specific to addCols functions
        self.B = numpy.random.rand(m, r)
        self.AB = numpy.c_[self.A, self.B]
        
        UAB, sAB, VABT = numpy.linalg.svd(self.AB, full_matrices=False)
        VAB = VABT.T 
        inds = numpy.flipud(numpy.argsort(sAB))
        UAB = UAB[:, inds[0:self.k]]
        sAB = sAB[inds[0:self.k]]
        VAB = VAB[:, inds[0:self.k]]
        self.ABk = numpy.dot(numpy.dot(UAB, numpy.diag(sAB)), VAB.T)

        # Specific to addSparse functions
        X = numpy.random.rand(m, n)
        X[numpy.random.rand(m, n) < 1-p] = 0
        self.X = scipy.sparse.csc_matrix(X)
        self.AX = self.A + self.X.todense()
        UAX, sAX, VAXT = numpy.linalg.svd(self.AX, full_matrices=False)
        VAX = VAXT.T 
        inds = numpy.flipud(numpy.argsort(sAX))
        UAX = UAX[:, inds[0:self.k]]
        sAX = sAX[inds[0:self.k]]
        VAX = VAX[:, inds[0:self.k]]
        self.AXk = numpy.dot(numpy.dot(UAX, numpy.diag(sAX)), VAX.T)
        UAkXk, sAkXk, VAkXk = SVDUpdate.addSparse(self.U, self.s, self.V, self.X, self.k)
        self.AkXk = numpy.dot(numpy.dot(UAkXk, numpy.diag(sAkXk)), VAkXk.T)
Exemple #10
0
 def f(U, s, V, X, k):
     return SVDUpdate.addSparseRSVD(U, s, V, X, k, kX=2*self.k, kRand=2*self.k, q=1)
Exemple #11
0
            def next(self):
                X = self.XIterator.next()
                logging.debug("Learning on matrix with shape: " +
                              str(X.shape) + " and " + str(X.nnz) +
                              " non-zeros")

                if self.iterativeSoftImpute.weighted:
                    #Compute row and col probabilities
                    up, vp = SparseUtils.nonzeroRowColsProbs(X)
                    nzuInds = up == 0
                    nzvInds = vp == 0
                    u = numpy.sqrt(1 / (up + numpy.array(nzuInds, numpy.int)))
                    v = numpy.sqrt(1 / (vp + numpy.array(nzvInds, numpy.int)))
                    u[nzuInds] = 0
                    v[nzvInds] = 0

                if self.rhos != None:
                    self.iterativeSoftImpute.setRho(self.rhos.next())

                if not scipy.sparse.isspmatrix_csc(X):
                    raise ValueError("X must be a csc_matrix not " +
                                     str(type(X)))

                #Figure out what lambda should be
                #PROPACK has problems with convergence
                Y = scipy.sparse.csc_matrix(X, dtype=numpy.float)
                U, s, V = ExpSU.SparseUtils.svdArpack(Y, 1, kmax=20)
                del Y
                #U, s, V = SparseUtils.svdPropack(X, 1, kmax=20)
                maxS = s[0]
                logging.debug("Largest singular value : " + str(maxS))

                (n, m) = X.shape

                if self.j == 0:
                    self.oldU = numpy.zeros((n, 1))
                    self.oldS = numpy.zeros(1)
                    self.oldV = numpy.zeros((m, 1))
                else:
                    oldN = self.oldU.shape[0]
                    oldM = self.oldV.shape[0]

                    if self.iterativeSoftImpute.updateAlg == "initial":
                        if n > oldN:
                            self.oldU = Util.extendArray(
                                self.oldU, (n, self.oldU.shape[1]))
                        elif n < oldN:
                            self.oldU = self.oldU[0:n, :]

                        if m > oldM:
                            self.oldV = Util.extendArray(
                                self.oldV, (m, self.oldV.shape[1]))
                        elif m < oldN:
                            self.oldV = self.oldV[0:m, :]
                    elif self.iterativeSoftImpute.updateAlg == "zero":
                        self.oldU = numpy.zeros((n, 1))
                        self.oldS = numpy.zeros(1)
                        self.oldV = numpy.zeros((m, 1))
                    else:
                        raise ValueError("Unknown SVD update algorithm: " +
                                         self.updateAlg)

                rowInds, colInds = X.nonzero()

                gamma = self.iterativeSoftImpute.eps + 1
                i = 0

                self.iterativeSoftImpute.measures = numpy.zeros(
                    (self.iterativeSoftImpute.maxIterations, 4))

                while gamma > self.iterativeSoftImpute.eps:
                    if i == self.iterativeSoftImpute.maxIterations:
                        logging.debug("Maximum number of iterations reached")
                        break

                    ZOmega = SparseUtilsCython.partialReconstructPQ(
                        (rowInds, colInds), self.oldU * self.oldS, self.oldV)
                    Y = X - ZOmega
                    #Y = Y.tocsc()
                    #del ZOmega
                    Y = csarray(Y, storagetype="row")
                    gc.collect()

                    #os.system('taskset -p 0xffffffff %d' % os.getpid())

                    if self.iterativeSoftImpute.svdAlg == "propack":
                        L = LinOperatorUtils.sparseLowRankOp(Y,
                                                             self.oldU,
                                                             self.oldS,
                                                             self.oldV,
                                                             parallel=False)
                        newU, newS, newV = SparseUtils.svdPropack(
                            L,
                            k=self.iterativeSoftImpute.k,
                            kmax=self.iterativeSoftImpute.kmax)
                    elif self.iterativeSoftImpute.svdAlg == "arpack":
                        L = LinOperatorUtils.sparseLowRankOp(Y,
                                                             self.oldU,
                                                             self.oldS,
                                                             self.oldV,
                                                             parallel=False)
                        newU, newS, newV = SparseUtils.svdArpack(
                            L,
                            k=self.iterativeSoftImpute.k,
                            kmax=self.iterativeSoftImpute.kmax)
                    elif self.iterativeSoftImpute.svdAlg == "svdUpdate":
                        newU, newS, newV = SVDUpdate.addSparseProjected(
                            self.oldU, self.oldS, self.oldV, Y,
                            self.iterativeSoftImpute.k)
                    elif self.iterativeSoftImpute.svdAlg == "rsvd":
                        L = LinOperatorUtils.sparseLowRankOp(Y,
                                                             self.oldU,
                                                             self.oldS,
                                                             self.oldV,
                                                             parallel=True)
                        newU, newS, newV = RandomisedSVD.svd(
                            L,
                            self.iterativeSoftImpute.k,
                            p=self.iterativeSoftImpute.p,
                            q=self.iterativeSoftImpute.q)
                    elif self.iterativeSoftImpute.svdAlg == "rsvdUpdate":
                        L = LinOperatorUtils.sparseLowRankOp(Y,
                                                             self.oldU,
                                                             self.oldS,
                                                             self.oldV,
                                                             parallel=True)
                        if self.j == 0:
                            newU, newS, newV = RandomisedSVD.svd(
                                L,
                                self.iterativeSoftImpute.k,
                                p=self.iterativeSoftImpute.p,
                                q=self.iterativeSoftImpute.q)
                        else:
                            newU, newS, newV = RandomisedSVD.svd(
                                L,
                                self.iterativeSoftImpute.k,
                                p=self.iterativeSoftImpute.p,
                                q=self.iterativeSoftImpute.qu,
                                omega=self.oldV)
                    elif self.iterativeSoftImpute.svdAlg == "rsvdUpdate2":

                        if self.j == 0:
                            L = LinOperatorUtils.sparseLowRankOp(Y,
                                                                 self.oldU,
                                                                 self.oldS,
                                                                 self.oldV,
                                                                 parallel=True)
                            newU, newS, newV = RandomisedSVD.svd(
                                L,
                                self.iterativeSoftImpute.k,
                                p=self.iterativeSoftImpute.p,
                                q=self.iterativeSoftImpute.q)
                        else:
                            #Need linear operator which is U s V
                            L = LinOperatorUtils.lowRankOp(
                                self.oldU, self.oldS, self.oldV)
                            Y = GeneralLinearOperator.asLinearOperator(
                                Y, parallel=True)
                            newU, newS, newV = RandomisedSVD.updateSvd(
                                L,
                                self.oldU,
                                self.oldS,
                                self.oldV,
                                Y,
                                self.iterativeSoftImpute.k,
                                p=self.iterativeSoftImpute.p)
                    else:
                        raise ValueError("Unknown SVD algorithm: " +
                                         self.iterativeSoftImpute.svdAlg)

                    if self.iterativeSoftImpute.weighted and i == 0:
                        delta = numpy.diag((u * newU.T).dot(newU))
                        pi = numpy.diag((v * newV.T).dot(newV))
                        lmbda = (maxS / numpy.max(
                            delta * pi)) * self.iterativeSoftImpute.rho
                        lmbdav = lmbda * delta * pi
                    elif not self.iterativeSoftImpute.weighted:
                        lmbda = maxS * self.iterativeSoftImpute.rho
                        if i == 0:
                            logging.debug("lambda: " + str(lmbda))
                        lmbdav = lmbda

                    newS = newS - lmbdav
                    #Soft threshold
                    newS = numpy.clip(newS, 0, numpy.max(newS))

                    normOldZ = (self.oldS**2).sum()
                    normNewZmOldZ = (self.oldS**2).sum() + (
                        newS**2).sum() - 2 * numpy.trace(
                            (self.oldV.T.dot(newV * newS)).dot(
                                newU.T.dot(self.oldU * self.oldS)))

                    #We can get newZ == oldZ in which case we break
                    if normNewZmOldZ < self.tol:
                        gamma = 0
                    elif abs(normOldZ) < self.tol:
                        gamma = self.iterativeSoftImpute.eps + 1
                    else:
                        gamma = normNewZmOldZ / normOldZ

                    if self.iterativeSoftImpute.verbose:
                        theta1 = (
                            self.iterativeSoftImpute.k -
                            numpy.linalg.norm(self.oldU.T.dot(newU), 'fro')**
                            2) / self.iterativeSoftImpute.k
                        theta2 = (
                            self.iterativeSoftImpute.k -
                            numpy.linalg.norm(self.oldV.T.dot(newV), 'fro')**
                            2) / self.iterativeSoftImpute.k
                        thetaS = numpy.linalg.norm(
                            newS - self.oldS)**2 / numpy.linalg.norm(newS)**2
                        self.iterativeSoftImpute.measures[i, :] = numpy.array(
                            [gamma, theta1, theta2, thetaS])

                    self.oldU = newU.copy()
                    self.oldS = newS.copy()
                    self.oldV = newV.copy()

                    logging.debug("Iteration " + str(i) + " gamma=" +
                                  str(gamma))
                    i += 1

                if self.iterativeSoftImpute.postProcess:
                    #Add the mean vectors
                    previousS = newS
                    newU = numpy.c_[newU, numpy.array(X.mean(1)).ravel()]
                    newV = numpy.c_[newV, numpy.array(X.mean(0)).ravel()]
                    newS = self.iterativeSoftImpute.unshrink(X, newU, newV)

                    #Note that this increases the rank of U and V by 1
                    #print("Difference in s after postprocessing: " + str(numpy.linalg.norm(previousS - newS[0:-1])))
                    logging.debug("Difference in s after postprocessing: " +
                                  str(numpy.linalg.norm(previousS -
                                                        newS[0:-1])))

                logging.debug("Number of iterations for rho=" +
                              str(self.iterativeSoftImpute.rho) + ": " +
                              str(i))
                self.j += 1
                return (newU, newS, newV)
            def next(self):
                X = self.XIterator.next()
                logging.debug("Learning on matrix with shape: " + str(X.shape) + " and " + str(X.nnz) + " non-zeros")    
                
                if self.iterativeSoftImpute.weighted: 
                    #Compute row and col probabilities 
                    up, vp = SparseUtils.nonzeroRowColsProbs(X)
                    nzuInds = up==0
                    nzvInds = vp==0
                    u = numpy.sqrt(1/(up + numpy.array(nzuInds, numpy.int))) 
                    v = numpy.sqrt(1/(vp + numpy.array(nzvInds, numpy.int)))
                    u[nzuInds] = 0 
                    v[nzvInds] = 0 
                
                if self.rhos != None: 
                    self.iterativeSoftImpute.setRho(self.rhos.next())

                if not scipy.sparse.isspmatrix_csc(X):
                    raise ValueError("X must be a csc_matrix not " + str(type(X)))
                    
                #Figure out what lambda should be 
                #PROPACK has problems with convergence 
                Y = scipy.sparse.csc_matrix(X, dtype=numpy.float)
                U, s, V = ExpSU.SparseUtils.svdArpack(Y, 1, kmax=20)
                del Y
                #U, s, V = SparseUtils.svdPropack(X, 1, kmax=20)
                maxS = s[0]
                logging.debug("Largest singular value : " + str(maxS))

                (n, m) = X.shape

                if self.j == 0:
                    self.oldU = numpy.zeros((n, 1))
                    self.oldS = numpy.zeros(1)
                    self.oldV = numpy.zeros((m, 1))
                else:
                    oldN = self.oldU.shape[0]
                    oldM = self.oldV.shape[0]

                    if self.iterativeSoftImpute.updateAlg == "initial":
                        if n > oldN:
                            self.oldU = Util.extendArray(self.oldU, (n, self.oldU.shape[1]))
                        elif n < oldN:
                            self.oldU = self.oldU[0:n, :]

                        if m > oldM:
                            self.oldV = Util.extendArray(self.oldV, (m, self.oldV.shape[1]))
                        elif m < oldN:
                            self.oldV = self.oldV[0:m, :]
                    elif self.iterativeSoftImpute.updateAlg == "zero":
                        self.oldU = numpy.zeros((n, 1))
                        self.oldS = numpy.zeros(1)
                        self.oldV = numpy.zeros((m, 1))
                    else:
                        raise ValueError("Unknown SVD update algorithm: " + self.updateAlg)

                rowInds, colInds = X.nonzero()

                gamma = self.iterativeSoftImpute.eps + 1
                i = 0

                self.iterativeSoftImpute.measures = numpy.zeros((self.iterativeSoftImpute.maxIterations, 4))

                while gamma > self.iterativeSoftImpute.eps:
                    if i == self.iterativeSoftImpute.maxIterations: 
                        logging.debug("Maximum number of iterations reached")
                        break 
                    
                    ZOmega = SparseUtilsCython.partialReconstructPQ((rowInds, colInds), self.oldU*self.oldS, self.oldV)
                    Y = X - ZOmega
                    #Y = Y.tocsc()
                    #del ZOmega
                    Y = csarray(Y, storagetype="row")
                    gc.collect()
                    
                    #os.system('taskset -p 0xffffffff %d' % os.getpid())

                    if self.iterativeSoftImpute.svdAlg=="propack":
                        L = LinOperatorUtils.sparseLowRankOp(Y, self.oldU, self.oldS, self.oldV, parallel=False)                        
                        newU, newS, newV = SparseUtils.svdPropack(L, k=self.iterativeSoftImpute.k, kmax=self.iterativeSoftImpute.kmax)
                    elif self.iterativeSoftImpute.svdAlg=="arpack":
                        L = LinOperatorUtils.sparseLowRankOp(Y, self.oldU, self.oldS, self.oldV, parallel=False)                        
                        newU, newS, newV = SparseUtils.svdArpack(L, k=self.iterativeSoftImpute.k, kmax=self.iterativeSoftImpute.kmax)
                    elif self.iterativeSoftImpute.svdAlg=="svdUpdate":
                        newU, newS, newV = SVDUpdate.addSparseProjected(self.oldU, self.oldS, self.oldV, Y, self.iterativeSoftImpute.k)
                    elif self.iterativeSoftImpute.svdAlg=="rsvd":
                        L = LinOperatorUtils.sparseLowRankOp(Y, self.oldU, self.oldS, self.oldV, parallel=True)
                        newU, newS, newV = RandomisedSVD.svd(L, self.iterativeSoftImpute.k, p=self.iterativeSoftImpute.p, q=self.iterativeSoftImpute.q)
                    elif self.iterativeSoftImpute.svdAlg=="rsvdUpdate": 
                        L = LinOperatorUtils.sparseLowRankOp(Y, self.oldU, self.oldS, self.oldV, parallel=True)
                        if self.j == 0: 
                            newU, newS, newV = RandomisedSVD.svd(L, self.iterativeSoftImpute.k, p=self.iterativeSoftImpute.p, q=self.iterativeSoftImpute.q)
                        else: 
                            newU, newS, newV = RandomisedSVD.svd(L, self.iterativeSoftImpute.k, p=self.iterativeSoftImpute.p, q=self.iterativeSoftImpute.qu, omega=self.oldV)
                    elif self.iterativeSoftImpute.svdAlg=="rsvdUpdate2":
                        
                        if self.j == 0: 
                            L = LinOperatorUtils.sparseLowRankOp(Y, self.oldU, self.oldS, self.oldV, parallel=True)
                            newU, newS, newV = RandomisedSVD.svd(L, self.iterativeSoftImpute.k, p=self.iterativeSoftImpute.p, q=self.iterativeSoftImpute.q)
                        else: 
                            #Need linear operator which is U s V 
                            L = LinOperatorUtils.lowRankOp(self.oldU, self.oldS, self.oldV)
                            Y = GeneralLinearOperator.asLinearOperator(Y, parallel=True)
                            newU, newS, newV = RandomisedSVD.updateSvd(L, self.oldU, self.oldS, self.oldV, Y, self.iterativeSoftImpute.k, p=self.iterativeSoftImpute.p)
                    else:
                        raise ValueError("Unknown SVD algorithm: " + self.iterativeSoftImpute.svdAlg)

                    if self.iterativeSoftImpute.weighted and i==0: 
                        delta = numpy.diag((u*newU.T).dot(newU))
                        pi = numpy.diag((v*newV.T).dot(newV))
                        lmbda = (maxS/numpy.max(delta*pi))*self.iterativeSoftImpute.rho
                        lmbdav = lmbda*delta*pi
                    elif not self.iterativeSoftImpute.weighted: 
                        lmbda = maxS*self.iterativeSoftImpute.rho
                        if i==0: 
                            logging.debug("lambda: " + str(lmbda))
                        lmbdav = lmbda
                        
                    newS = newS - lmbdav                    
                    #Soft threshold
                    newS = numpy.clip(newS, 0, numpy.max(newS))
                    

                    normOldZ = (self.oldS**2).sum()
                    normNewZmOldZ = (self.oldS**2).sum() + (newS**2).sum() - 2*numpy.trace((self.oldV.T.dot(newV*newS)).dot(newU.T.dot(self.oldU*self.oldS)))

                    #We can get newZ == oldZ in which case we break
                    if normNewZmOldZ < self.tol:
                        gamma = 0
                    elif abs(normOldZ) < self.tol:
                        gamma = self.iterativeSoftImpute.eps + 1
                    else:
                        gamma = normNewZmOldZ/normOldZ
                        
                    if self.iterativeSoftImpute.verbose: 
                        theta1 = (self.iterativeSoftImpute.k - numpy.linalg.norm(self.oldU.T.dot(newU), 'fro')**2)/self.iterativeSoftImpute.k
                        theta2 = (self.iterativeSoftImpute.k - numpy.linalg.norm(self.oldV.T.dot(newV), 'fro')**2)/self.iterativeSoftImpute.k
                        thetaS = numpy.linalg.norm(newS - self.oldS)**2/numpy.linalg.norm(newS)**2
                        self.iterativeSoftImpute.measures[i, :] = numpy.array([gamma, theta1, theta2, thetaS])

                    self.oldU = newU.copy()
                    self.oldS = newS.copy()
                    self.oldV = newV.copy()

                    logging.debug("Iteration " + str(i) + " gamma="+str(gamma))
                    i += 1

                if self.iterativeSoftImpute.postProcess: 
                    #Add the mean vectors 
                    previousS = newS
                    newU = numpy.c_[newU, numpy.array(X.mean(1)).ravel()]
                    newV = numpy.c_[newV, numpy.array(X.mean(0)).ravel()]
                    newS = self.iterativeSoftImpute.unshrink(X, newU, newV)  
                    
                    #Note that this increases the rank of U and V by 1 
                    #print("Difference in s after postprocessing: " + str(numpy.linalg.norm(previousS - newS[0:-1]))) 
                    logging.debug("Difference in s after postprocessing: " + str(numpy.linalg.norm(previousS - newS[0:-1]))) 

                logging.debug("Number of iterations for rho="+str(self.iterativeSoftImpute.rho) + ": " + str(i))
                self.j += 1
                return (newU, newS, newV)