Пример #1
0
def symeig_semidefinite_reg(A,
                            B=None,
                            eigenvectors=True,
                            turbo="on",
                            range=None,
                            type=1,
                            overwrite=False,
                            rank_threshold=1e-12,
                            dfc_out=None):
    """Regularization-based routine to solve generalized symmetric positive
    semidefinite eigenvalue problems.

    This can be used if the normal ``symeig()`` call in ``_stop_training()``
    throws ``SymeigException('Covariance matrices may be singular')``.

    This solver applies a moderate regularization to B before applying
    ``eigh``/``symeig``. Afterwards it properly detects the rank deficit and
    filters out malformed features.
    For full range, this procedure is (approximately) as efficient as the
    ordinary ``eigh`` implementation, because all additional steps are
    computationally cheap.
    For shorter range, the LDL method should be preferred.

    .. note::
        For efficiency reasons it actually modifies the matrix B
        (even if ``overwrite=False``), but the changes are negligible.

    The signature of this function equals that of ``mdp.utils.symeig``,
    but has two additional parameters:
    
    :param rank_threshold:
        A threshold to determine if an eigenvalue counts as zero.
    :type rank_threshold: float
    
    :param dfc_out:
        If ``dfc_out`` is not ``None``, ``dfc_out.rank_deficit`` will be set
        to an integer indicating how many zero-eigenvalues were detected.
    """
    if type != 1:
        raise ValueError('Only type=1 is supported.')

    # apply some regularization...
    # The following is equivalent to B += 1e-12*np.eye(B.shape[0]),
    # but works more in place, i.e. saves memory consumption of np.eye().
    Bflat = B.reshape(B.shape[0] * B.shape[1])
    idx = numx.arange(0, len(Bflat), B.shape[0] + 1)
    diag_tmp = Bflat[idx]
    Bflat[idx] += rank_threshold

    eg, ev = mdp.utils.symeig(A, B, True, turbo, None, type, overwrite)

    Bflat[idx] = diag_tmp
    m = numx.absolute(
        numx.sqrt(numx.absolute(numx.sum(ev * mdp.utils.mult(B, ev), 0))) - 1)
    off = 0
    # In theory all values in m should be close to one or close to zero.
    # So we use the mean of these values as threshold to distinguish cases:
    while m[off] > 0.5:
        off += 1
    m_off_sum = numx.sum(m[off:])
    if m_off_sum < 0.5:
        if off > 0:
            if not dfc_out is None:
                dfc_out.rank_deficit = off
            eg = eg[off:]
            ev = ev[:, off:]
    else:
        # Sometimes (unlikely though) the values in m are not sorted
        # In this case we search all indices:
        m_idx = (m < 0.5).nonzero()[0]
        eg = eg[m_idx]
        ev = ev[:, m_idx]
    if range is None:
        return eg, ev
    else:
        return eg[range[0] - 1:range[1]], ev[:, range[0] - 1:range[1]]
Пример #2
0
def symeig_semidefinite_reg(
        A, B = None, eigenvectors=True, turbo="on", range=None,
        type=1, overwrite=False, rank_threshold=1e-12, dfc_out=None):
    """
    Regularization-based routine to solve generalized symmetric positive
    semidefinite eigenvalue problems.
    This can be used in case the normal symeig() call in _stop_training()
    throws SymeigException ('Covariance matrices may be singular').

    This solver applies a moderate regularization to B before applying
    eigh/symeig. Afterwards it properly detects the rank deficit and
    filters out malformed features.
    For full range, this procedure is (approximately) as efficient as the
    ordinary eigh implementation, because all additional steps are
    computationally cheap.
    For shorter range, the LDL method should be preferred.


    The signature of this function equals that of mdp.utils.symeig, but
    has two additional parameters:
    
    rank_threshold: A threshold to determine if an eigenvalue counts as zero.
    
    dfc_out: If dfc_out is not None dfc_out.rank_deficit will be set to an
             integer indicating how many zero-eigenvalues were detected.


    Note:
    For efficiency reasons it actually modifies the matrix B
    (even if overwrite=False), but the changes are negligible.
    """
    if type != 1:
        raise ValueError('Only type=1 is supported.')

    # apply some regularization...
    # The following is equivalent to B += 1e-12*np.eye(B.shape[0]),
    # but works more in place, i.e. saves memory consumption of np.eye().
    Bflat = B.reshape(B.shape[0]*B.shape[1])
    idx = numx.arange(0, len(Bflat), B.shape[0]+1)
    diag_tmp = Bflat[idx]
    Bflat[idx] += rank_threshold

    eg, ev = mdp.utils.symeig(A, B, True, turbo, None, type, overwrite)

    Bflat[idx] = diag_tmp
    m = numx.absolute(numx.sqrt(numx.absolute(
            numx.sum(ev * mdp.utils.mult(B, ev), 0)))-1)
    off = 0
    # In theory all values in m should be close to one or close to zero.
    # So we use the mean of these values as threshold to distinguish cases:
    while m[off] > 0.5:
        off += 1
    m_off_sum = numx.sum(m[off:])
    if m_off_sum < 0.5:
        if off > 0:
            if not dfc_out is None:
                dfc_out.rank_deficit = off
            eg = eg[off:]
            ev = ev[:, off:]
    else:
        # Sometimes (unlikely though) the values in m are not sorted
        # In this case we search all indices:
        m_idx = (m < 0.5).nonzero()[0]
        eg = eg[m_idx]
        ev = ev[:, m_idx]
    if range is None:
        return eg, ev
    else:
        return eg[range[0]-1:range[1]], ev[:, range[0]-1:range[1]]
Пример #3
0
    def _stop_training(self):
        Cumulator._stop_training(self)

        k = self.k
        M = self.data
        N = M.shape[0]

        if k > N:
            err = ('k=%i must be less than'
                   ' or equal to number of training points N=%i' % (k, N))
            raise TrainingException(err)

        if self.verbose:
            print 'performing HLLE on %i points in %i dimensions...' % M.shape

        # determines number of output dimensions: if desired_variance
        # is specified, we need to learn it from the data. Otherwise,
        # it's easy
        learn_outdim = False
        if self.output_dim is None:
            if self.desired_variance is None:
                self.output_dim = self.input_dim
            else:
                learn_outdim = True

        # determine number of output dims, precalculate useful stuff
        if learn_outdim:
            Qs, sig2s, nbrss = self._adjust_output_dim()

        d_out = self.output_dim

        #dp = d_out + (d_out-1) + (d_out-2) + ...
        dp = d_out * (d_out + 1) / 2

        if min(k, N) <= d_out:
            err = ('k=%i and n=%i (number of input data points) must be'
                   ' larger than output_dim=%i' % (k, N, d_out))
            raise TrainingException(err)

        if k < 1 + d_out + dp:
            wrn = ('The number of neighbours, k=%i, is smaller than'
                   ' 1 + output_dim + output_dim*(output_dim+1)/2 = %i,'
                   ' which might result in unstable results.' %
                   (k, 1 + d_out + dp))
            _warnings.warn(wrn, MDPWarning)

        #build the weight matrix
        #XXX   for faster implementation, W should be a sparse matrix
        W = numx.zeros((N, dp * N), dtype=self.dtype)

        if self.verbose:
            print ' - constructing [%i x %i] weight matrix...' % W.shape

        for row in range(N):
            if learn_outdim:
                nbrs = nbrss[row, :]
            else:
                # -----------------------------------------------
                #  find k nearest neighbors
                # -----------------------------------------------
                M_Mi = M - M[row]
                nbrs = numx.argsort((M_Mi**2).sum(1))[1:k + 1]

            #-----------------------------------------------
            #  center the neighborhood using the mean
            #-----------------------------------------------
            nbrhd = M[nbrs]  # this makes a copy
            nbrhd -= nbrhd.mean(0)

            #-----------------------------------------------
            #  compute local coordinates
            #   using a singular value decomposition
            #-----------------------------------------------
            U, sig, VT = svd(nbrhd)
            nbrhd = U.T[:d_out]
            del VT

            #-----------------------------------------------
            #  build Hessian estimator
            #-----------------------------------------------
            Yi = numx.zeros((dp, k), dtype=self.dtype)
            ct = 0
            for i in range(d_out):
                Yi[ct:ct + d_out - i, :] = nbrhd[i] * nbrhd[i:, :]
                ct += d_out - i
            Yi = numx.concatenate(
                [numx.ones((1, k), dtype=self.dtype), nbrhd, Yi], 0)

            #-----------------------------------------------
            #  orthogonalize linear and quadratic forms
            #   with QR factorization
            #  and make the weights sum to 1
            #-----------------------------------------------
            if k >= 1 + d_out + dp:
                Q, R = numx_linalg.qr(Yi.T)
                w = Q[:, d_out + 1:d_out + 1 + dp]
            else:
                q, r = _mgs(Yi.T)
                w = q[:, -dp:]

            S = w.sum(0)  #sum along columns
            #if S[i] is too small, set it equal to 1.0
            # this prevents weights from blowing up
            S[numx.where(numx.absolute(S) < 1E-4)] = 1.0
            #print w.shape, S.shape, (w/S).shape
            #print W[nbrs, row*dp:(row+1)*dp].shape
            W[nbrs, row * dp:(row + 1) * dp] = w / S

        #-----------------------------------------------
        # To find the null space, we want the
        #  first d+1 eigenvectors of W.T*W
        # Compute this using an svd of W
        #-----------------------------------------------

        if self.verbose:
            msg = (' - finding [%i x %i] '
                   'null space of weight matrix...' % (d_out, N))
            print msg

        #XXX future work:
        #XXX  use of upcoming ARPACK interface for bottom few eigenvectors
        #XXX   of a sparse matrix will significantly increase the speed
        #XXX   of the next step

        if self.svd:
            sig, U = nongeneral_svd(W.T, range=(2, d_out + 1))
            Y = U * numx.sqrt(N)
        else:
            WW = mult(W, W.T)
            # regularizes the eigenvalues, does not change the eigenvectors:
            W_diag_idx = numx.arange(N)
            WW[W_diag_idx, W_diag_idx] += 0.01
            sig, U = symeig(WW, range=(2, self.output_dim + 1), overwrite=True)
            Y = U * numx.sqrt(N)
            del WW
        del W

        #-----------------------------------------------
        # Normalize Y
        #
        # Alternative way to do it:
        #  we need R = (Y.T*Y)^(-1/2)
        #   do this with an SVD of Y            del VT

        #      Y = U*sig*V.T
        #      Y.T*Y = (V*sig.T*U.T) * (U*sig*V.T)
        #            = V * (sig*sig.T) * V.T
        #            = V * sig^2 V.T
        #   so
        #      R = V * sig^-1 * V.T
        # The code is:
        #    U, sig, VT = svd(Y)
        #    del U
        #    S = numx.diag(sig**-1)
        #    self.training_projection = mult(Y, mult(VT.T, mult(S, VT)))
        #-----------------------------------------------
        if self.verbose:
            print ' - normalizing null space...'

        C = sqrtm(mult(Y.T, Y))
        self.training_projection = mult(Y, C)
Пример #4
0
    def _stop_training(self):
        Cumulator._stop_training(self)

        k = self.k
        M = self.data
        N = M.shape[0]

        if k > N:
            err = ('k=%i must be less than'
                   ' or equal to number of training points N=%i' % (k, N))
            raise TrainingException(err)

        if self.verbose:
            print 'performing HLLE on %i points in %i dimensions...' % M.shape

        # determines number of output dimensions: if desired_variance
        # is specified, we need to learn it from the data. Otherwise,
        # it's easy
        learn_outdim = False
        if self.output_dim is None:
            if self.desired_variance is None:
                self.output_dim = self.input_dim
            else:
                learn_outdim = True

        # determine number of output dims, precalculate useful stuff
        if learn_outdim:
            Qs, sig2s, nbrss = self._adjust_output_dim()

        d_out = self.output_dim

        #dp = d_out + (d_out-1) + (d_out-2) + ...
        dp = d_out*(d_out+1)/2

        if min(k, N) <= d_out:
            err = ('k=%i and n=%i (number of input data points) must be'
                   ' larger than output_dim=%i' % (k, N, d_out))
            raise TrainingException(err)

        if k < 1+d_out+dp:
            wrn = ('The number of neighbours, k=%i, is smaller than'
                   ' 1 + output_dim + output_dim*(output_dim+1)/2 = %i,'
                   ' which might result in unstable results.'
                   % (k, 1+d_out+dp))
            _warnings.warn(wrn, MDPWarning)

        #build the weight matrix
        #XXX   for faster implementation, W should be a sparse matrix
        W = numx.zeros((N, dp*N), dtype=self.dtype)

        if self.verbose:
            print ' - constructing [%i x %i] weight matrix...' % W.shape

        for row in range(N):
            if learn_outdim:
                nbrs = nbrss[row, :]
            else:
                # -----------------------------------------------
                #  find k nearest neighbors
                # -----------------------------------------------
                M_Mi = M-M[row]
                nbrs = numx.argsort((M_Mi**2).sum(1))[1:k+1]

            #-----------------------------------------------
            #  center the neighborhood using the mean
            #-----------------------------------------------
            nbrhd = M[nbrs] # this makes a copy
            nbrhd -= nbrhd.mean(0)

            #-----------------------------------------------
            #  compute local coordinates
            #   using a singular value decomposition
            #-----------------------------------------------
            U, sig, VT = svd(nbrhd)
            nbrhd = U.T[:d_out]
            del VT

            #-----------------------------------------------
            #  build Hessian estimator
            #-----------------------------------------------
            Yi = numx.zeros((dp, k), dtype=self.dtype)
            ct = 0
            for i in range(d_out):
                Yi[ct:ct+d_out-i, :] = nbrhd[i] * nbrhd[i:, :]
                ct += d_out-i
            Yi = numx.concatenate([numx.ones((1, k), dtype=self.dtype),
                                   nbrhd, Yi], 0)

            #-----------------------------------------------
            #  orthogonalize linear and quadratic forms
            #   with QR factorization
            #  and make the weights sum to 1
            #-----------------------------------------------
            if k >= 1+d_out+dp:
                Q, R = numx_linalg.qr(Yi.T)
                w = Q[:, d_out+1:d_out+1+dp]
            else:
                q, r = _mgs(Yi.T)
                w = q[:, -dp:]

            S = w.sum(0) #sum along columns
            #if S[i] is too small, set it equal to 1.0
            # this prevents weights from blowing up
            S[numx.where(numx.absolute(S)<1E-4)] = 1.0
            #print w.shape, S.shape, (w/S).shape
            #print W[nbrs, row*dp:(row+1)*dp].shape
            W[nbrs, row*dp:(row+1)*dp] = w / S

        #-----------------------------------------------
        # To find the null space, we want the
        #  first d+1 eigenvectors of W.T*W
        # Compute this using an svd of W
        #-----------------------------------------------

        if self.verbose:
            msg = (' - finding [%i x %i] '
                   'null space of weight matrix...' % (d_out, N))
            print msg

        #XXX future work:
        #XXX  use of upcoming ARPACK interface for bottom few eigenvectors
        #XXX   of a sparse matrix will significantly increase the speed
        #XXX   of the next step

        if self.svd:
            sig, U = nongeneral_svd(W.T, range=(2, d_out+1))
            Y = U*numx.sqrt(N)
        else:
            WW = mult(W, W.T)
            # regularizes the eigenvalues, does not change the eigenvectors:
            W_diag_idx = numx.arange(N)
            WW[W_diag_idx, W_diag_idx] += 0.01
            sig, U = symeig(WW, range=(2, self.output_dim+1), overwrite=True)
            Y = U*numx.sqrt(N)
            del WW
        del W

        #-----------------------------------------------
        # Normalize Y
        #
        # Alternative way to do it:
        #  we need R = (Y.T*Y)^(-1/2)
        #   do this with an SVD of Y            del VT

        #      Y = U*sig*V.T
        #      Y.T*Y = (V*sig.T*U.T) * (U*sig*V.T)
        #            = V * (sig*sig.T) * V.T
        #            = V * sig^2 V.T
        #   so
        #      R = V * sig^-1 * V.T
        # The code is:
        #    U, sig, VT = svd(Y)
        #    del U
        #    S = numx.diag(sig**-1)
        #    self.training_projection = mult(Y, mult(VT.T, mult(S, VT)))
        #-----------------------------------------------
        if self.verbose:
            print ' - normalizing null space...'

        C = sqrtm(mult(Y.T, Y))
        self.training_projection = mult(Y, C)