예제 #1
0
    def merge(self, other, decay=1.0):
        """Merge current :class:`~gensim.models.lsimodel.Projection` instance with another.

        Warnings
        --------
        The content of `other` is destroyed in the process, so pass this function a copy of `other`
        if you need it further. The `other` :class:`~gensim.models.lsimodel.Projection` is expected to contain
        the same number of features.

        Parameters
        ----------
        other : :class:`~gensim.models.lsimodel.Projection`
            The Projection object to be merged into the current one. It will be destroyed after merging.
        decay : float, optional
            Weight of existing observations relatively to new ones.
            Setting `decay` < 1.0 causes re-orientation towards new data trends in the input document stream,
            by giving less emphasis to old observations. This allows LSA to gradually "forget" old observations
            (documents) and give more preference to new ones.

        """
        if other.u is None:
            # the other projection is empty => do nothing
            return
        if self.u is None:
            # we are empty => result of merge is the other projection, whatever it is
            self.u = other.u.copy()
            self.s = other.s.copy()
            return
        if self.m != other.m:
            raise ValueError(
                "vector space mismatch: update is using %s features, expected %s" % (other.m, self.m)
            )
        logger.info("merging projections: %s + %s", str(self.u.shape), str(other.u.shape))
        m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
        # TODO Maybe keep the bases as elementary reflectors, without
        # forming explicit matrices with ORGQR.
        # The only operation we ever need is basis^T*basis ond basis*component.
        # But how to do that in scipy? And is it fast(er)?

        # find component of u2 orthogonal to u1
        logger.debug("constructing orthogonal component")
        self.u = asfarray(self.u, 'self.u')
        c = np.dot(self.u.T, other.u)
        self.u = ascarray(self.u, 'self.u')
        other.u -= np.dot(self.u, c)

        other.u = [other.u]  # do some reference magic and call qr_destroy, to save RAM
        q, r = matutils.qr_destroy(other.u)  # q, r = QR(component)
        assert not other.u

        # find the rotation that diagonalizes r
        k = np.bmat([
            [np.diag(decay * self.s), np.multiply(c, other.s)],
            [matutils.pad(np.array([]).reshape(0, 0), min(m, n2), n1), np.multiply(r, other.s)]
        ])
        logger.debug("computing SVD of %s dense matrix", k.shape)
        try:
            # in np < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
            # for these early versions of np, catch the error and try to compute
            # SVD again, but over k*k^T.
            # see http://www.mail-archive.com/[email protected]/msg07224.html and
            # bug ticket http://projects.scipy.org/np/ticket/706
            # sdoering: replaced np's linalg.svd with scipy's linalg.svd:

            # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper
            # for partial svd/eigendecomp in np :( //sdoering: maybe there is one in scipy?
            u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False)
        except scipy.linalg.LinAlgError:
            logger.error("SVD(A) failed; trying SVD(A * A^T)")
            # if this fails too, give up with an exception
            u_k, s_k, _ = scipy.linalg.svd(np.dot(k, k.T), full_matrices=False)
            s_k = np.sqrt(s_k)  # go back from eigen values to singular values

        k = clip_spectrum(s_k ** 2, self.k)
        u1_k, u2_k, s_k = np.array(u_k[:n1, :k]), np.array(u_k[n1:, :k]), s_k[:k]

        # update & rotate current basis U = [U, U']*[U1_k, U2_k]
        logger.debug("updating orthonormal basis U")
        self.s = s_k
        self.u = ascarray(self.u, 'self.u')
        self.u = np.dot(self.u, u1_k)

        q = ascarray(q, 'q')
        q = np.dot(q, u2_k)
        self.u += q

        # make each column of U start with a non-negative number (to force canonical decomposition)
        if self.u.shape[0] > 0:
            for i in range(self.u.shape[1]):
                if self.u[0, i] < 0.0:
                    self.u[:, i] *= -1.0
예제 #2
0
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
                   power_iters=0, dtype=np.float64, eps=1e-6):
    """Run truncated Singular Value Decomposition (SVD) on a sparse input.

    Parameters
    ----------
    corpus : {iterable of list of (int, float), scipy.sparse}
        Input corpus as a stream (does not have to fit in RAM)
        or a sparse matrix of shape (`num_terms`, num_documents).
    rank : int
        Desired number of factors to be retained after decomposition.
    num_terms : int
        The number of features (terms) in `corpus`.
    chunksize :  int, optional
        Number of documents to be used in each training chunk.
    extra_dims : int, optional
        Extra samples to be used besides the rank `k`. Can improve accuracy.
    power_iters: int, optional
        Number of power iteration steps to be used. Increasing the number of power iterations improves accuracy,
        but lowers performance.
    dtype : numpy.dtype, optional
        Enforces a type for elements of the decomposed matrix.
    eps: float, optional
        Percentage of the spectrum's energy to be discarded.

    Notes
    -----
    The corpus may be larger than RAM (iterator of vectors), if `corpus` is a `scipy.sparse.csc` instead,
    it is assumed the whole corpus fits into core memory and a different (more efficient) code path is chosen.
    This may return less than the requested number of top `rank` factors, in case the input itself is of lower rank.
    The `extra_dims` (oversampling) and especially `power_iters` (power iterations) parameters affect accuracy of the
    decomposition.

    This algorithm uses `2 + power_iters` passes over the input data. In case you can only afford a single pass,
    set `onepass=True` in :class:`~gensim.models.lsimodel.LsiModel` and avoid using this function directly.

    The decomposition algorithm is based on `"Finding structure with randomness:
    Probabilistic algorithms for constructing approximate matrix decompositions" <https://arxiv.org/abs/0909.4061>`_.


    Returns
    -------
    (np.ndarray 2D, np.ndarray 1D)
        The left singular vectors and the singular values of the `corpus`.

    """
    rank = int(rank)
    if extra_dims is None:
        samples = max(10, 2 * rank)  # use more samples than requested factors, to improve accuracy
    else:
        samples = rank + int(extra_dims)
    logger.info("using %i extra samples and %i power iterations", samples - rank, power_iters)

    num_terms = int(num_terms)

    # first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
    # build Y in blocks of `chunksize` documents (much faster than going one-by-one
    # and more memory friendly than processing all documents at once)
    y = np.zeros(dtype=dtype, shape=(num_terms, samples))
    logger.info("1st phase: constructing %s action matrix", str(y.shape))

    if scipy.sparse.issparse(corpus):
        m, n = corpus.shape
        assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
        o = np.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype)  # draw a random gaussian matrix
        sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
                                corpus.data, o.ravel(), y.ravel())  # y = corpus * o
        del o

        # unlike np, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
        # so check for equal dtype explicitly, to avoid the extra memory footprint if possible
        if y.dtype != dtype:
            y = y.astype(dtype)

        logger.info("orthonormalizing %s action matrix", str(y.shape))
        y = [y]
        q, _ = matutils.qr_destroy(y)  # orthonormalize the range

        logger.debug("running %i power iterations", power_iters)
        for _ in range(power_iters):
            q = corpus.T * q
            q = [corpus * q]
            q, _ = matutils.qr_destroy(q)  # orthonormalize the range after each power iteration step
    else:
        num_docs = 0
        for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
            logger.info('PROGRESS: at document #%i', (chunk_no * chunksize))
            # construct the chunk as a sparse matrix, to minimize memory overhead
            # definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
            s = sum(len(doc) for doc in chunk)
            chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype)  # documents = columns of sparse CSC
            m, n = chunk.shape
            assert m == num_terms
            assert n <= chunksize  # the very last chunk of A is allowed to be smaller in size
            num_docs += n
            logger.debug("multiplying chunk * gauss")
            o = np.random.normal(0.0, 1.0, (n, samples)).astype(dtype)  # draw a random gaussian matrix
            sparsetools.csc_matvecs(
                m, n, samples, chunk.indptr, chunk.indices,  # y = y + chunk * o
                chunk.data, o.ravel(), y.ravel()
            )
            del chunk, o
        y = [y]
        q, _ = matutils.qr_destroy(y)  # orthonormalize the range

        for power_iter in range(power_iters):
            logger.info("running power iteration #%i", power_iter + 1)
            yold = q.copy()
            q[:] = 0.0
            for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
                logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
                # documents = columns of sparse CSC
                chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype)
                tmp = chunk.T * yold
                tmp = chunk * tmp
                del chunk
                q += tmp
            del yold
            q = [q]
            q, _ = matutils.qr_destroy(q)  # orthonormalize the range

    qt = q[:, :samples].T.copy()
    del q

    if scipy.sparse.issparse(corpus):
        b = qt * corpus
        logger.info("2nd phase: running dense svd on %s matrix", str(b.shape))
        u, s, vt = scipy.linalg.svd(b, full_matrices=False)
        del b, vt
    else:
        # second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
        # again, construct X incrementally, in chunks of `chunksize` documents from the streaming
        # input corpus A, to avoid using O(number of documents) memory
        x = np.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=dtype)
        logger.info("2nd phase: constructing %s covariance matrix", str(x.shape))
        for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
            logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
            chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
            b = qt * chunk  # dense * sparse matrix multiply
            del chunk
            x += np.dot(b, b.T)  # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
            del b

        # now we're ready to compute decomposition of the small matrix X
        logger.info("running dense decomposition on %s covariance matrix", str(x.shape))
        # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
        u, s, vt = scipy.linalg.svd(x)
        # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
        s = np.sqrt(s)
    q = qt.T.copy()
    del qt

    logger.info("computing the final decomposition")
    keep = clip_spectrum(s ** 2, rank, discard=eps)
    u = u[:, :keep].copy()
    s = s[:keep]
    u = np.dot(q, u)
    return u.astype(dtype), s.astype(dtype)
예제 #3
0
파일: lsimodel.py 프로젝트: AmitShah/gensim
    def merge(self, other, decay=1.0):
        """
        Merge this Projection with another.

        The content of `other` is destroyed in the process, so pass this function a
        copy of `other` if you need it further.
        """
        if other.u is None:
            # the other projection is empty => do nothing
            return
        if self.u is None:
            # we are empty => result of merge is the other projection, whatever it is
            self.u = other.u.copy()
            self.s = other.s.copy()
            return
        if self.m != other.m:
            raise ValueError("vector space mismatch: update is using %s features, expected %s" %
                             (other.m, self.m))
        logger.info("merging projections: %s + %s" % (str(self.u.shape), str(other.u.shape)))
        m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
        # TODO Maybe keep the bases as elementary reflectors, without
        # forming explicit matrices with ORGQR.
        # The only operation we ever need is basis^T*basis ond basis*component.
        # But how to do that in scipy? And is it fast(er)?

        # find component of u2 orthogonal to u1
        logger.debug("constructing orthogonal component")
        self.u = asfarray(self.u, 'self.u')
        c = numpy.dot(self.u.T, other.u)
        self.u = ascarray(self.u, 'self.u')
        other.u -= numpy.dot(self.u, c)

        other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
        q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
        assert not other.u

        # find the rotation that diagonalizes r
        k = numpy.bmat([[numpy.diag(decay * self.s), numpy.multiply(c, other.s)],
                        [matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1), numpy.multiply(r, other.s)]])
        logger.debug("computing SVD of %s dense matrix" % str(k.shape))
        try:
            # in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
            # for these early versions of numpy, catch the error and try to compute
            # SVD again, but over k*k^T.
            # see http://www.mail-archive.com/[email protected]/msg07224.html and
            # bug ticket http://projects.scipy.org/numpy/ticket/706
            # sdoering: replaced numpy's linalg.svd with scipy's linalg.svd:
            u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :( //sdoering: maybe there is one in scipy?
        except scipy.linalg.LinAlgError:
            logger.error("SVD(A) failed; trying SVD(A * A^T)")
            u_k, s_k, _ = scipy.linalg.svd(numpy.dot(k, k.T), full_matrices=False) # if this fails too, give up with an exception
            s_k = numpy.sqrt(s_k) # go back from eigen values to singular values

        k = clip_spectrum(s_k**2, self.k)
        u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(u_k[n1:, :k]), s_k[:k]

        # update & rotate current basis U = [U, U']*[U1_k, U2_k]
        logger.debug("updating orthonormal basis U")
        self.s = s_k
        self.u = ascarray(self.u, 'self.u')
        self.u = numpy.dot(self.u, u1_k)

        q = ascarray(q, 'q')
        q = numpy.dot(q, u2_k)
        self.u += q

        # make each column of U start with a non-negative number (to force canonical decomposition)
        if self.u.shape[0] > 0:
            for i in xrange(self.u.shape[1]):
                if self.u[0, i] < 0.0:
                    self.u[:, i] *= -1.0
예제 #4
0
파일: lsimodel.py 프로젝트: AmitShah/gensim
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
                  power_iters=0, dtype=numpy.float64, eps=1e-6):
    """
    Run truncated Singular Value Decomposition (SVD) on a sparse input.

    Return (U, S): the left singular vectors and the singular values of the input
    data stream `corpus` [4]_. The corpus may be larger than RAM (iterator of vectors).

    This may return less than the requested number of top `rank` factors, in case
    the input itself is of lower rank. The `extra_dims` (oversampling) and especially
    `power_iters` (power iterations) parameters affect accuracy of the decomposition.

    This algorithm uses `2+power_iters` passes over the input data. In case you can only
    afford a single pass, set `onepass=True` in :class:`LsiModel` and avoid using
    this function directly.

    The decomposition algorithm is based on
    **Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**

    .. [4] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
       corpus fits into core memory and a different (more efficient) code path is chosen.
    """
    rank = int(rank)
    if extra_dims is None:
        samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
    else:
        samples = rank + int(extra_dims)
    logger.info("using %i extra samples and %i power iterations" % (samples - rank, power_iters))

    num_terms = int(num_terms)

    # first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
    # build Y in blocks of `chunksize` documents (much faster than going one-by-one
    # and more memory friendly than processing all documents at once)
    y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
    logger.info("1st phase: constructing %s action matrix" % str(y.shape))

    if scipy.sparse.issparse(corpus):
        m, n = corpus.shape
        assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
        o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
        sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
                                corpus.data, o.ravel(), y.ravel()) # y = corpus * o
        del o

        # unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
        # so check for equal dtype explicitly, to avoid the extra memory footprint if possible
        if y.dtype != dtype:
            y = y.astype(dtype)

        logger.info("orthonormalizing %s action matrix" % str(y.shape))
        y = [y]
        q, _ = matutils.qr_destroy(y) # orthonormalize the range

        logger.debug("running %i power iterations" % power_iters)
        for power_iter in xrange(power_iters):
            q = corpus.T * q
            q = [corpus * q]
            q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
    else:
        num_docs = 0
        for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
            logger.info('PROGRESS: at document #%i' % (chunk_no * chunksize))
            # construct the chunk as a sparse matrix, to minimize memory overhead
            # definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
            s = sum(len(doc) for doc in chunk)
            chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
            m, n = chunk.shape
            assert m == num_terms
            assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
            num_docs += n
            logger.debug("multiplying chunk * gauss")
            o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
            sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
                                    chunk.data, o.ravel(), y.ravel())
            del chunk, o
        y = [y]
        q, _ = matutils.qr_destroy(y) # orthonormalize the range

        for power_iter in xrange(power_iters):
            logger.info("running power iteration #%i" % (power_iter + 1))
            yold = q.copy()
            q[:] = 0.0
            for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
                logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
                chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
                tmp = chunk.T * yold
                tmp = chunk * tmp
                del chunk
                q += tmp
            del yold
            q = [q]
            q, _ = matutils.qr_destroy(q) # orthonormalize the range

    qt = q[:, :samples].T.copy()
    del q

    if scipy.sparse.issparse(corpus):
        b = qt * corpus
        logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
        u, s, vt = scipy.linalg.svd(b, full_matrices=False)
        del b, vt
    else:
        # second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
        # again, construct X incrementally, in chunks of `chunksize` documents from the streaming
        # input corpus A, to avoid using O(number of documents) memory
        x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
        logger.info("2nd phase: constructing %s covariance matrix" % str(x.shape))
        for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
            logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
            chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
            b = qt * chunk # dense * sparse matrix multiply
            del chunk
            x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
            del b

        # now we're ready to compute decomposition of the small matrix X
        logger.info("running dense decomposition on %s covariance matrix" % str(x.shape))
        u, s, vt = scipy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
        s = numpy.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
    q = qt.T.copy()
    del qt

    logger.info("computing the final decomposition")
    keep = clip_spectrum(s**2, rank, discard=eps)
    u = u[:, :keep].copy()
    s = s[:keep]
    u = numpy.dot(q, u)
    return u.astype(dtype), s.astype(dtype)
예제 #5
0
파일: lsimodel.py 프로젝트: sxjzwq/gensim
    def merge(self, other, decay=1.0):
        """
        Merge this Projection with another.

        The content of `other` is destroyed in the process, so pass this function a
        copy of `other` if you need it further.
        """
        if other.u is None:
            # the other projection is empty => do nothing
            return
        if self.u is None:
            # we are empty => result of merge is the other projection, whatever it is
            self.u = other.u.copy()
            self.s = other.s.copy()
            return
        if self.m != other.m:
            raise ValueError(
                "vector space mismatch: update is using %s features, expected %s"
                % (other.m, self.m))
        logger.info("merging projections: %s + %s" %
                    (str(self.u.shape), str(other.u.shape)))
        m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
        # TODO Maybe keep the bases as elementary reflectors, without
        # forming explicit matrices with ORGQR.
        # The only operation we ever need is basis^T*basis ond basis*component.
        # But how to do that in scipy? And is it fast(er)?

        # find component of u2 orthogonal to u1
        logger.debug("constructing orthogonal component")
        self.u = asfarray(self.u, 'self.u')
        c = numpy.dot(self.u.T, other.u)
        self.u = ascarray(self.u, 'self.u')
        other.u -= numpy.dot(self.u, c)

        other.u = [
            other.u
        ]  # do some reference magic and call qr_destroy, to save RAM
        q, r = matutils.qr_destroy(other.u)  # q, r = QR(component)
        assert not other.u

        # find the rotation that diagonalizes r
        k = numpy.bmat(
            [[numpy.diag(decay * self.s),
              numpy.multiply(c, other.s)],
             [
                 matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1),
                 numpy.multiply(r, other.s)
             ]])
        logger.debug("computing SVD of %s dense matrix" % str(k.shape))
        try:
            # in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
            # for these early versions of numpy, catch the error and try to compute
            # SVD again, but over k*k^T.
            # see http://www.mail-archive.com/[email protected]/msg07224.html and
            # bug ticket http://projects.scipy.org/numpy/ticket/706
            # sdoering: replaced numpy's linalg.svd with scipy's linalg.svd:
            u_k, s_k, _ = scipy.linalg.svd(
                k, full_matrices=False
            )  # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :( //sdoering: maybe there is one in scipy?
        except scipy.linalg.LinAlgError:
            logger.error("SVD(A) failed; trying SVD(A * A^T)")
            u_k, s_k, _ = scipy.linalg.svd(
                numpy.dot(k, k.T), full_matrices=False
            )  # if this fails too, give up with an exception
            s_k = numpy.sqrt(
                s_k)  # go back from eigen values to singular values

        k = clip_spectrum(s_k**2, self.k)
        u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(
            u_k[n1:, :k]), s_k[:k]

        # update & rotate current basis U = [U, U']*[U1_k, U2_k]
        logger.debug("updating orthonormal basis U")
        self.s = s_k
        self.u = ascarray(self.u, 'self.u')
        self.u = numpy.dot(self.u, u1_k)

        q = ascarray(q, 'q')
        q = numpy.dot(q, u2_k)
        self.u += q

        # make each column of U start with a non-negative number (to force canonical decomposition)
        if self.u.shape[0] > 0:
            for i in xrange(self.u.shape[1]):
                if self.u[0, i] < 0.0:
                    self.u[:, i] *= -1.0
예제 #6
0
    def merge(self, other, decay=1.0):
        """Merge current :class:`~gensim.models.lsimodel.Projection` instance with another.

        Warnings
        --------
        The content of `other` is destroyed in the process, so pass this function a copy of `other`
        if you need it further. The `other` :class:`~gensim.models.lsimodel.Projection` is expected to contain
        the same number of features.

        Parameters
        ----------
        other : :class:`~gensim.models.lsimodel.Projection`
            The Projection object to be merged into the current one. It will be destroyed after merging.
        decay : float, optional
            Weight of existing observations relatively to new ones.
            Setting `decay` < 1.0 causes re-orientation towards new data trends in the input document stream,
            by giving less emphasis to old observations. This allows LSA to gradually "forget" old observations
            (documents) and give more preference to new ones.

        """
        if other.u is None:
            # the other projection is empty => do nothing
            return
        if self.u is None:
            # we are empty => result of merge is the other projection, whatever it is
            self.u = other.u.copy()
            self.s = other.s.copy()
            return
        if self.m != other.m:
            raise ValueError(
                "vector space mismatch: update is using %s features, expected %s" % (other.m, self.m)
            )
        logger.info("merging projections: %s + %s", str(self.u.shape), str(other.u.shape))
        m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
        # TODO Maybe keep the bases as elementary reflectors, without
        # forming explicit matrices with ORGQR.
        # The only operation we ever need is basis^T*basis ond basis*component.
        # But how to do that in scipy? And is it fast(er)?

        # find component of u2 orthogonal to u1
        logger.debug("constructing orthogonal component")
        self.u = asfarray(self.u, 'self.u')
        c = np.dot(self.u.T, other.u)
        self.u = ascarray(self.u, 'self.u')
        other.u -= np.dot(self.u, c)

        other.u = [other.u]  # do some reference magic and call qr_destroy, to save RAM
        q, r = matutils.qr_destroy(other.u)  # q, r = QR(component)
        assert not other.u

        # find the rotation that diagonalizes r
        k = np.bmat([
            [np.diag(decay * self.s), np.multiply(c, other.s)],
            [matutils.pad(np.array([]).reshape(0, 0), min(m, n2), n1), np.multiply(r, other.s)]
        ])
        logger.debug("computing SVD of %s dense matrix", k.shape)
        try:
            # in np < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
            # for these early versions of np, catch the error and try to compute
            # SVD again, but over k*k^T.
            # see http://www.mail-archive.com/[email protected]/msg07224.html and
            # bug ticket http://projects.scipy.org/np/ticket/706
            # sdoering: replaced np's linalg.svd with scipy's linalg.svd:

            # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper
            # for partial svd/eigendecomp in np :( //sdoering: maybe there is one in scipy?
            u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False)
        except scipy.linalg.LinAlgError:
            logger.error("SVD(A) failed; trying SVD(A * A^T)")
            # if this fails too, give up with an exception
            u_k, s_k, _ = scipy.linalg.svd(np.dot(k, k.T), full_matrices=False)
            s_k = np.sqrt(s_k)  # go back from eigen values to singular values

        k = clip_spectrum(s_k ** 2, self.k)
        u1_k, u2_k, s_k = np.array(u_k[:n1, :k]), np.array(u_k[n1:, :k]), s_k[:k]

        # update & rotate current basis U = [U, U']*[U1_k, U2_k]
        logger.debug("updating orthonormal basis U")
        self.s = s_k
        self.u = ascarray(self.u, 'self.u')
        self.u = np.dot(self.u, u1_k)

        q = ascarray(q, 'q')
        q = np.dot(q, u2_k)
        self.u += q

        # make each column of U start with a non-negative number (to force canonical decomposition)
        if self.u.shape[0] > 0:
            for i in range(self.u.shape[1]):
                if self.u[0, i] < 0.0:
                    self.u[:, i] *= -1.0
예제 #7
0
파일: lsimodel.py 프로젝트: andremi/gensim
def stochasticSvd(corpus,
                  rank,
                  num_terms,
                  chunks=20000,
                  extra_dims=None,
                  power_iters=0,
                  dtype=numpy.float64,
                  eps=1e-6):
    """
    Return (U, S): the left singular vectors and the singular values of the streamed 
    input corpus `corpus` [3]_.
    
    This may actually return less than the requested number of top `rank` factors, 
    in case the input is of lower rank. The `extra_dims` (oversampling) and especially
    `power_iters` (power iterations) parameters affect accuracy of the decomposition.
    
    This algorithm uses `2+power_iters` passes over the data. In case you can only 
    afford a single pass over the input corpus, set `onepass=True` in :class:`LsiModel` 
    and avoid using this algorithm directly.

    The decomposition algorithm is based on 
    **Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
    
    .. [3] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole 
       corpus fits into core memory and a different (more efficient) code path is chosen.
    """
    rank = int(rank)
    if extra_dims is None:
        samples = max(
            10, 2 * rank
        )  # use more samples than requested factors, to improve accuracy
    else:
        samples = rank + int(extra_dims)
    logger.info("using %i extra samples and %i power iterations" %
                (samples - rank, power_iters))

    num_terms = int(num_terms)

    eps = max(
        float(eps), 1e-9
    )  # must ignore near-zero eigenvalues (probably numerical error); the associated eigenvectors are typically unstable/garbage

    # first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
    # build Y in blocks of `chunks` documents (much faster than going one-by-one
    # and more memory friendly than processing all documents at once)
    y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
    logger.info("1st phase: constructing %s action matrix" % str(y.shape))

    if scipy.sparse.issparse(corpus):
        m, n = corpus.shape
        assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (
            m, num_terms)
        o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(
            y.dtype)  # draw a random gaussian matrix
        sparsetools.csc_matvecs(m, n, samples, corpus.indptr,
                                corpus.indices, corpus.data, o.ravel(),
                                y.ravel())  # y = corpus * o
        del o
        y = y.astype(
            dtype
        )  # TODO unlike numpy, scipy actually makes a copy even when dtype=y.dtype...marginally inefficient
        logger.debug("running %i power iterations" % power_iters)
        for power_iter in xrange(power_iters):
            y = corpus.T * y
            y = corpus * y
    else:
        chunker = itertools.groupby(enumerate(corpus),
                                    key=lambda (docno, doc): docno / chunks)
        num_docs = 0
        for chunk_no, (key, group) in enumerate(chunker):
            logger.info('PROGRESS: at document #%i' % (chunk_no * chunks))
            # construct the chunk as a sparse matrix, to minimize memory overhead
            # definitely avoid materializing it as a dense (num_terms x chunks) matrix!
            chunk = matutils.corpus2csc(
                (doc for _, doc in group), num_terms=num_terms,
                dtype=dtype)  # documents = columns of sparse CSC
            m, n = chunk.shape
            assert m == num_terms
            assert n <= chunks  # the very last chunk of A is allowed to be smaller in size
            num_docs += n
            logger.debug("multiplying chunk * gauss")
            o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(
                dtype)  # draw a random gaussian matrix
            sparsetools.csc_matvecs(
                num_terms,
                n,
                samples,
                chunk.indptr,  # y = y + chunk * o
                chunk.indices,
                chunk.data,
                o.ravel(),
                y.ravel())
            del chunk, o

        for power_iter in xrange(power_iters):
            logger.info("running power iteration #%i" % (power_iter + 1))
            yold = y.copy()
            y[:] = 0.0
            chunker = itertools.groupby(enumerate(corpus),
                                        key=lambda
                                        (docno, doc): docno / chunks)
            for chunk_no, (key, group) in enumerate(chunker):
                logger.info('PROGRESS: at document #%i/%i' %
                            (chunk_no * chunks, num_docs))
                chunk = matutils.corpus2csc(
                    (doc for _, doc in group),
                    num_terms=num_terms,
                    dtype=dtype)  # documents = columns of sparse CSC
                tmp = chunk.T * yold
                tmp = chunk * tmp
                del chunk
                y += tmp
            del yold

    logger.info("orthonormalizing %s action matrix" % str(y.shape))
    y = [y]
    q, r = matutils.qr_destroy(y)  # orthonormalize the range
    del y
    samples = clipSpectrum(numpy.diag(r), samples, discard=eps)
    qt = numpy.asfortranarray(
        q[:, :samples].T
    )  # discard bogus columns, in case Y was rank-deficient
    del q

    if scipy.sparse.issparse(corpus):
        b = qt * corpus
        logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
        u, s, vt = numpy.linalg.svd(b, full_matrices=False)
        del b, vt
    else:
        # second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
        # again, construct X incrementally, in chunks of `chunks` documents from the streaming
        # input corpus A, to avoid using O(number of documents) memory
        x = numpy.zeros(shape=(samples, samples), dtype=dtype)
        logger.info("2nd phase: constructing %s covariance matrix" %
                    str(x.shape))
        chunker = itertools.groupby(enumerate(corpus),
                                    key=lambda (docno, doc): docno / chunks)
        for chunk_no, (key, group) in enumerate(chunker):
            logger.info('PROGRESS: at document #%i/%i' %
                        (chunk_no * chunks, num_docs))
            chunk = matutils.corpus2csc((doc for _, doc in group),
                                        num_terms=num_terms,
                                        dtype=dtype)
            b = qt * chunk  # dense * sparse matrix multiply
            x += numpy.dot(
                b, b.T
            )  # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
            del chunk, b

        # now we're ready to compute decomposition of the small matrix X
        logger.info("running dense decomposition on %s covariance matrix" %
                    str(x.shape))
        u, s, vt = numpy.linalg.svd(
            x
        )  # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
        s = numpy.sqrt(
            s
        )  # sqrt to go back from singular values of X to singular values of B = singular values of the corpus

    logger.info("computing the final decomposition")
    keep = clipSpectrum(s**2, rank, discard=eps)
    u = numpy.asfortranarray(u[:, :keep])
    s = s[:keep]
    gemm = matutils.blas('gemm', u)
    u = gemm(1.0, qt, u, trans_a=True)
    return u, s
예제 #8
0
파일: lsimodel.py 프로젝트: andremi/gensim
    def merge(self, other, decay=1.0):
        """
        Merge this Projection with another. 
        
        The content of `other` is destroyed in the process, so pass this function a 
        copy of `other` if you need it further.
        """
        if other.u is None:
            # the other projection is empty => do nothing
            return
        if self.u is None:
            # we are empty => result of merge is the other projection, whatever it is
            self.u = other.u.copy('F')
            self.s = other.s.copy()
            return
        if self.m != other.m:
            raise ValueError(
                "vector space mismatch: update is using %s features, expected %s"
                % (other.m, self.m))
        logger.info("merging projections: %s + %s" %
                    (str(self.u.shape), str(other.u.shape)))
        m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
        # TODO Maybe keep the bases as elementary reflectors, without
        # forming explicit matrices with ORGQR.
        # The only operation we ever need is basis^T*basis ond basis*component.
        # But how to do that in scipy? And is it fast(er)?

        # find component of u2 orthogonal to u1
        # IMPORTANT: keep matrices in memory suitable order for matrix products; failing to do so gives 8x lower performance :(
        self.u = numpy.asfortranarray(
            self.u)  # does nothing if input already fortran-order array
        other.u = numpy.asfortranarray(other.u)
        gemm = matutils.blas('gemm', self.u)
        logger.debug("constructing orthogonal component")
        c = gemm(1.0, self.u, other.u, trans_a=True)
        gemm(-1.0, self.u, c, beta=1.0, c=other.u, overwrite_c=True)

        other.u = [
            other.u
        ]  # do some reference magic and call qr_destroy, to save RAM
        q, r = matutils.qr_destroy(other.u)  # q, r = QR(component)
        assert not other.u

        # find the rotation that diagonalizes r
        k = numpy.bmat([[numpy.diag(decay * self.s), c * other.s],
                        [
                            matutils.pad(
                                numpy.matrix([]).reshape(0, 0), min(m, n2),
                                n1), r * other.s
                        ]])
        logger.debug("computing SVD of %s dense matrix" % str(k.shape))
        try:
            # in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
            # for these early versions of numpy, catch the error and try to compute
            # SVD again, but over k*k^T.
            # see http://www.mail-archive.com/[email protected]/msg07224.html and
            # bug ticket http://projects.scipy.org/numpy/ticket/706
            u_k, s_k, _ = numpy.linalg.svd(
                k, full_matrices=False
            )  # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :(
        except numpy.linalg.LinAlgError:
            logging.error("SVD(A) failed; trying SVD(A * A^T)")
            u_k, s_k, _ = numpy.linalg.svd(
                numpy.dot(k, k.T), full_matrices=False
            )  # if this fails too, give up with an exception
            s_k = numpy.sqrt(
                s_k)  # go back from eigen values to singular values

        k = clipSpectrum(s_k**2, self.k)
        u1_k, u2_k, s_k = u_k[:n1, :k].copy('F'), u_k[n1:, :k].copy(
            'F'), s_k[:k]

        # update & rotate current basis U = [U, U']*[U1_k, U2_k]
        logger.debug("updating orthonormal basis U")
        self.u = gemm(
            1.0, self.u, u1_k
        )  # TODO temporarily creates an extra (m,k) dense array in memory. find a way to avoid this!
        gemm(1.0, q, u2_k, beta=1.0, c=self.u, overwrite_c=True)
        self.s = s_k
예제 #9
0
파일: lsimodel.py 프로젝트: andremi/gensim
def stochasticSvd(corpus, rank, num_terms, chunks=20000, extra_dims=None, 
                  power_iters=0, dtype=numpy.float64, eps=1e-6):
    """
    Return (U, S): the left singular vectors and the singular values of the streamed 
    input corpus `corpus` [3]_.
    
    This may actually return less than the requested number of top `rank` factors, 
    in case the input is of lower rank. The `extra_dims` (oversampling) and especially
    `power_iters` (power iterations) parameters affect accuracy of the decomposition.
    
    This algorithm uses `2+power_iters` passes over the data. In case you can only 
    afford a single pass over the input corpus, set `onepass=True` in :class:`LsiModel` 
    and avoid using this algorithm directly.

    The decomposition algorithm is based on 
    **Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
    
    .. [3] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole 
       corpus fits into core memory and a different (more efficient) code path is chosen.
    """
    rank = int(rank)
    if extra_dims is None:
        samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
    else:
        samples = rank + int(extra_dims)
    logger.info("using %i extra samples and %i power iterations" % (samples - rank, power_iters))
    
    num_terms = int(num_terms)
    
    eps = max(float(eps), 1e-9) # must ignore near-zero eigenvalues (probably numerical error); the associated eigenvectors are typically unstable/garbage
    
    # first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
    # build Y in blocks of `chunks` documents (much faster than going one-by-one 
    # and more memory friendly than processing all documents at once)
    y = numpy.zeros(dtype = dtype, shape = (num_terms, samples))
    logger.info("1st phase: constructing %s action matrix" % str(y.shape))
    
    if scipy.sparse.issparse(corpus):
        m, n = corpus.shape
        assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
        o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
        sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices, 
                                corpus.data, o.ravel(), y.ravel()) # y = corpus * o
        del o
        y = y.astype(dtype) # TODO unlike numpy, scipy actually makes a copy even when dtype=y.dtype...marginally inefficient
        logger.debug("running %i power iterations" % power_iters)
        for power_iter in xrange(power_iters):
            y = corpus.T * y
            y = corpus * y
    else:
        chunker = itertools.groupby(enumerate(corpus), key = lambda (docno, doc): docno / chunks)
        num_docs = 0
        for chunk_no, (key, group) in enumerate(chunker):
            logger.info('PROGRESS: at document #%i' % (chunk_no * chunks))
            # construct the chunk as a sparse matrix, to minimize memory overhead
            # definitely avoid materializing it as a dense (num_terms x chunks) matrix!
            chunk = matutils.corpus2csc((doc for _, doc in group), num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
            m, n = chunk.shape
            assert m == num_terms
            assert n <= chunks # the very last chunk of A is allowed to be smaller in size
            num_docs += n
            logger.debug("multiplying chunk * gauss")
            o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
            sparsetools.csc_matvecs(num_terms, n, samples, chunk.indptr, # y = y + chunk * o
                                    chunk.indices, chunk.data, o.ravel(), y.ravel())
            del chunk, o
        
        for power_iter in xrange(power_iters):
            logger.info("running power iteration #%i" % (power_iter + 1))
            yold = y.copy()
            y[:] = 0.0
            chunker = itertools.groupby(enumerate(corpus), key = lambda (docno, doc): docno / chunks)
            for chunk_no, (key, group) in enumerate(chunker):
                logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunks, num_docs))
                chunk = matutils.corpus2csc((doc for _, doc in group), num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
                tmp = chunk.T * yold
                tmp = chunk * tmp
                del chunk
                y += tmp
            del yold
    
    logger.info("orthonormalizing %s action matrix" % str(y.shape))
    y = [y]
    q, r = matutils.qr_destroy(y) # orthonormalize the range
    del y
    samples = clipSpectrum(numpy.diag(r), samples, discard = eps)
    qt = numpy.asfortranarray(q[:, :samples].T) # discard bogus columns, in case Y was rank-deficient
    del q
    
    if scipy.sparse.issparse(corpus):
        b = qt * corpus
        logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
        u, s, vt = numpy.linalg.svd(b, full_matrices=False)
        del b, vt
    else:
        # second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
        # again, construct X incrementally, in chunks of `chunks` documents from the streaming 
        # input corpus A, to avoid using O(number of documents) memory
        x = numpy.zeros(shape = (samples, samples), dtype = dtype)
        logger.info("2nd phase: constructing %s covariance matrix" % str(x.shape))
        chunker = itertools.groupby(enumerate(corpus), key = lambda (docno, doc): docno / chunks)
        for chunk_no, (key, group) in enumerate(chunker):
            logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunks, num_docs))
            chunk = matutils.corpus2csc((doc for _, doc in group), num_terms=num_terms, dtype=dtype)
            b = qt * chunk # dense * sparse matrix multiply
            x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
            del chunk, b
    
        # now we're ready to compute decomposition of the small matrix X
        logger.info("running dense decomposition on %s covariance matrix" % str(x.shape))
        u, s, vt = numpy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
        s = numpy.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
        
    logger.info("computing the final decomposition")
    keep = clipSpectrum(s**2, rank, discard=eps)
    u = numpy.asfortranarray(u[:, :keep])
    s = s[:keep]
    gemm = matutils.blas('gemm', u)
    u = gemm(1.0, qt, u, trans_a=True)
    return u, s
예제 #10
0
파일: lsimodel.py 프로젝트: andremi/gensim
 def merge(self, other, decay=1.0):
     """
     Merge this Projection with another. 
     
     The content of `other` is destroyed in the process, so pass this function a 
     copy of `other` if you need it further.
     """
     if other.u is None:
         # the other projection is empty => do nothing
         return
     if self.u is None:
         # we are empty => result of merge is the other projection, whatever it is
         self.u = other.u.copy('F')
         self.s = other.s.copy()
         return
     if self.m != other.m:
         raise ValueError("vector space mismatch: update is using %s features, expected %s" %
                          (other.m, self.m))
     logger.info("merging projections: %s + %s" % (str(self.u.shape), str(other.u.shape)))
     m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
     # TODO Maybe keep the bases as elementary reflectors, without 
     # forming explicit matrices with ORGQR.
     # The only operation we ever need is basis^T*basis ond basis*component.
     # But how to do that in scipy? And is it fast(er)?
     
     # find component of u2 orthogonal to u1
     # IMPORTANT: keep matrices in memory suitable order for matrix products; failing to do so gives 8x lower performance :(
     self.u = numpy.asfortranarray(self.u) # does nothing if input already fortran-order array
     other.u = numpy.asfortranarray(other.u)
     gemm = matutils.blas('gemm', self.u)
     logger.debug("constructing orthogonal component")
     c = gemm(1.0, self.u, other.u, trans_a = True)
     gemm(-1.0, self.u, c, beta = 1.0, c = other.u, overwrite_c = True)
     
     other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
     q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
     assert not other.u
     
     # find the rotation that diagonalizes r
     k = numpy.bmat([[numpy.diag(decay * self.s), c * other.s], [matutils.pad(numpy.matrix([]).reshape(0, 0), min(m, n2), n1), r * other.s]])
     logger.debug("computing SVD of %s dense matrix" % str(k.shape))
     try:
         # in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
         # for these early versions of numpy, catch the error and try to compute
         # SVD again, but over k*k^T.
         # see http://www.mail-archive.com/[email protected]/msg07224.html and
         # bug ticket http://projects.scipy.org/numpy/ticket/706
         u_k, s_k, _ = numpy.linalg.svd(k, full_matrices = False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :(
     except numpy.linalg.LinAlgError:
         logging.error("SVD(A) failed; trying SVD(A * A^T)")
         u_k, s_k, _ = numpy.linalg.svd(numpy.dot(k, k.T), full_matrices = False) # if this fails too, give up with an exception
         s_k = numpy.sqrt(s_k) # go back from eigen values to singular values
     
     k = clipSpectrum(s_k ** 2, self.k)
     u1_k, u2_k, s_k = u_k[:n1, :k].copy('F'), u_k[n1:, :k].copy('F'), s_k[:k]
     
     # update & rotate current basis U = [U, U']*[U1_k, U2_k]
     logger.debug("updating orthonormal basis U")
     self.u = gemm(1.0, self.u, u1_k) # TODO temporarily creates an extra (m,k) dense array in memory. find a way to avoid this!
     gemm(1.0, q, u2_k, beta = 1.0, c = self.u, overwrite_c = True)
     self.s = s_k
예제 #11
0
def stochastic_svd(corpus,
                   rank,
                   num_terms,
                   chunksize=20000,
                   extra_dims=None,
                   power_iters=0,
                   dtype=numpy.float64,
                   eps=1e-6):
    """
    Return (U, S): the left singular vectors and the singular values of the streamed
    input corpus `corpus` [3]_.

    This may actually return less than the requested number of top `rank` factors,
    in case the input is of lower rank. The `extra_dims` (oversampling) and especially
    `power_iters` (power iterations) parameters affect accuracy of the decomposition.

    This algorithm uses `2+power_iters` passes over the data. In case you can only
    afford a single pass over the input corpus, set `onepass=True` in :class:`LsiModel`
    and avoid using this algorithm directly.

    The decomposition algorithm is based on
    **Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**

    .. [3] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
       corpus fits into core memory and a different (more efficient) code path is chosen.
    """
    rank = int(rank)
    if extra_dims is None:
        samples = max(
            10, 2 * rank
        )  # use more samples than requested factors, to improve accuracy
    else:
        samples = rank + int(extra_dims)
    logger.info("using %i extra samples and %i power iterations" %
                (samples - rank, power_iters))

    num_terms = int(num_terms)

    # first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
    # build Y in blocks of `chunksize` documents (much faster than going one-by-one
    # and more memory friendly than processing all documents at once)
    y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
    logger.info("1st phase: constructing %s action matrix" % str(y.shape))

    if scipy.sparse.issparse(corpus):
        m, n = corpus.shape
        assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (
            m, num_terms)
        o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(
            y.dtype)  # draw a random gaussian matrix
        sparsetools.csc_matvecs(m, n, samples, corpus.indptr,
                                corpus.indices, corpus.data, o.ravel(),
                                y.ravel())  # y = corpus * o
        del o

        # unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
        # so check for equal dtype explicitly, to avoid the extra memory footprint if possible
        if y.dtype != dtype:
            y = y.astype(dtype)

        logger.info("orthonormalizing %s action matrix" % str(y.shape))
        y = [y]
        q, _ = matutils.qr_destroy(y)  # orthonormalize the range

        logger.debug("running %i power iterations" % power_iters)
        for power_iter in xrange(power_iters):
            q = corpus.T * q
            q = [corpus * q]
            q, _ = matutils.qr_destroy(
                q)  # orthonormalize the range after each power iteration step
    else:
        num_docs = 0
        for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
            logger.info('PROGRESS: at document #%i' % (chunk_no * chunksize))
            # construct the chunk as a sparse matrix, to minimize memory overhead
            # definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
            s = sum(len(doc) for doc in chunk)
            chunk = matutils.corpus2csc(
                chunk, num_terms=num_terms,
                dtype=dtype)  # documents = columns of sparse CSC
            m, n = chunk.shape
            assert m == num_terms
            assert n <= chunksize  # the very last chunk of A is allowed to be smaller in size
            num_docs += n
            logger.debug("multiplying chunk * gauss")
            o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(
                dtype)  # draw a random gaussian matrix
            sparsetools.csc_matvecs(
                m,
                n,
                samples,
                chunk.indptr,
                chunk.indices,  # y = y + chunk * o
                chunk.data,
                o.ravel(),
                y.ravel())
            del chunk, o
        y = [y]
        q, _ = matutils.qr_destroy(y)  # orthonormalize the range

        for power_iter in xrange(power_iters):
            logger.info("running power iteration #%i" % (power_iter + 1))
            yold = q.copy()
            q[:] = 0.0
            for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
                logger.info('PROGRESS: at document #%i/%i' %
                            (chunk_no * chunksize, num_docs))
                chunk = matutils.corpus2csc(
                    chunk, num_terms=num_terms,
                    dtype=dtype)  # documents = columns of sparse CSC
                tmp = chunk.T * yold
                tmp = chunk * tmp
                del chunk
                q += tmp
            del yold
            q = [q]
            q, _ = matutils.qr_destroy(q)  # orthonormalize the range

    qt = q[:, :samples].T.copy()
    del q

    if scipy.sparse.issparse(corpus):
        b = qt * corpus
        logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
        u, s, vt = numpy.linalg.svd(b, full_matrices=False)
        del b, vt
    else:
        # second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
        # again, construct X incrementally, in chunks of `chunksize` documents from the streaming
        # input corpus A, to avoid using O(number of documents) memory
        x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
        logger.info("2nd phase: constructing %s covariance matrix" %
                    str(x.shape))
        for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
            logger.info('PROGRESS: at document #%i/%i' %
                        (chunk_no * chunksize, num_docs))
            chunk = matutils.corpus2csc(chunk,
                                        num_terms=num_terms,
                                        dtype=qt.dtype)
            b = qt * chunk  # dense * sparse matrix multiply
            del chunk
            x += numpy.dot(
                b, b.T
            )  # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
            del b

        # now we're ready to compute decomposition of the small matrix X
        logger.info("running dense decomposition on %s covariance matrix" %
                    str(x.shape))
        u, s, vt = numpy.linalg.svd(
            x
        )  # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
        s = numpy.sqrt(
            s
        )  # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
    q = qt.T.copy()
    del qt

    logger.info("computing the final decomposition")
    keep = clip_spectrum(s**2, rank, discard=eps)
    u = u[:, :keep].copy()
    s = s[:keep]
    u = numpy.dot(q, u)
    return u.astype(dtype), s.astype(dtype)