Exemple #1
0
def quadratic_model_check(X, f, gamma, weights=None):
    """
    Use the Hessian of a least-squares-fit quadratic model to identify active
    and inactive subspaces

    :param ndarray X: M-by-m matrix containing points in the simulation input
        space.
    :param ndarray f: M-by-1 containing the corresponding simulation outputs.
    :param ndarray gamma: The variance of the simulation inputs. If the inputs
        are bounded by a hypercube, then `gamma` is 1/3.
    :param ndarray weights: M-by-1 containing weights for the least-squares.

    :return: e, m-by-1 that contains the eigenvalues of the quadratic model's
        Hessian.
    :rtype: ndarray

    :return: W, m-by-m matrix that contains the eigenvectors of the quadratic
        model's Hessian.
    :rtype: ndarray

    **See Also**

    sdr.linear_gradient_check

    **Notes**

    This approach is very similar to Ker Chau Li's principal Hessian directions.
    """

    M, m = X.shape
    gamma = gamma.reshape((1, m))

    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute eigenpairs
    e, W = np.linalg.eig(np.outer(b, b.T) + \
        np.dot(A, np.dot(np.diagflat(gamma), A)))
    ind = np.argsort(e)[::-1]
    e, W = e[ind], W[:,ind]*np.sign(W[0,ind])

    return e.reshape((m,1)), W.reshape((m,m))
def qphd_subspace(X, f, weights):
    """Estimate active subspace with global quadratic model.
    
    This approach is similar to Ker-Chau Li's approach for principal Hessian
    directions based on a global quadratic model of the data. In contrast to
    Li's approach, this method uses the average outer product of the gradient
    of the quadratic model, as opposed to just its Hessian.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors

    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0
    
    # compute a quadratic approximation
    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute C
    C = np.outer(b, b.transpose()) + gamma*np.dot(A, A.transpose())

    return sorted_eigh(C)
Exemple #3
0
def qphd_subspace(X, f, weights):
    """Estimate active subspace with global quadratic model.

    This approach is similar to Ker-Chau Li's approach for principal Hessian
    directions based on a global quadratic model of the data. In contrast to
    Li's approach, this method uses the average outer product of the gradient
    of the quadratic model, as opposed to just its Hessian.

    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace

    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors

    """
    X, f, M, m = process_inputs_outputs(X, f)

    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0

    # compute a quadratic approximation
    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute C
    C = np.outer(b, b.transpose()) + gamma*np.dot(A, A.transpose())

    return sorted_eigh(C)
Exemple #4
0
    def train(self, X, f):
        """Train the global quadratic for the regularization.

        Parameters
        ----------
        X : ndarray
            input points used to train a global quadratic used in the
            `regularize_z` function
        f : ndarray
            simulation outputs used to train a global quadratic in the
            `regularize_z` function
        """

        X, f, M, m = process_inputs_outputs(X, f)

        W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
        m, n = W1.shape
        W = self.domain.subspaces.eigenvecs

        # train quadratic surface on p>n active vars
        if m-n>2:
            p = n+2
        else:
            p = n+1

        Yp = np.dot(X, W[:,:p])
        pr = PolynomialApproximation(N=2)
        pr.train(Yp, f)
        br, Ar = pr.g, pr.H

        # get coefficients
        b = np.dot(W[:,:p], br)
        A = np.dot(W[:,:p], np.dot(Ar, W[:,:p].T))

        # some private attributes used in the regularize_z function
        self._bz = np.dot(W2.T, b)
        self._zAy = np.dot(W2.T, np.dot(A, W1))
        self._zAz = np.dot(W2.T, np.dot(A, W2)) + 0.01*np.eye(m-n)
    def train(self, X, f):
        """
        Train the global quadratic for the regularization.

        :param ndarray X: input points used to train a global quadratic used in
            the `regularize_z` function.
        :param ndarray f: simulation outputs used to train a global quadratic in
            the `regularize_z` function.
        """

        X, f, M, m = process_inputs_outputs(X, f)

        W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
        m, n = W1.shape
        W = self.domain.subspaces.eigenvectors

        # train quadratic surface on p>n active vars
        if m-n>2:
            p = n+2
        else:
            p = n+1

        logging.getLogger(__name__).debug('Training a MinVariableMap on {:d} active variables out of {:d} for a {:d}-dim active subspace.'\
                                .format(p, m, n))

        Yp = np.dot(X, W[:,:p])
        pr = PolynomialApproximation(N=2)
        pr.train(Yp, f)
        br, Ar = pr.g, pr.H

        # get coefficients
        b = np.dot(W[:,:p], br)
        A = np.dot(W[:,:p], np.dot(Ar, W[:,:p].T))

        # some private attributes used in the regularize_z function
        self._bz = np.dot(W2.T, b)
        self._zAy = np.dot(W2.T, np.dot(A, W1))
        self._zAz = np.dot(W2.T, np.dot(A, W2)) + 0.01*np.eye(m-n)
    def train(self, X, f):
        """Train the global quadratic for the regularization.

        Parameters
        ----------
        X : ndarray 
            input points used to train a global quadratic used in the 
            `regularize_z` function
        f : ndarray 
            simulation outputs used to train a global quadratic in the 
            `regularize_z` function
        """

        X, f, M, m = process_inputs_outputs(X, f)

        W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
        m, n = W1.shape
        W = self.domain.subspaces.eigenvecs

        # train quadratic surface on p>n active vars
        if m-n>2:
            p = n+2
        else:
            p = n+1

        Yp = np.dot(X, W[:,:p])
        pr = PolynomialApproximation(N=2)
        pr.train(Yp, f)
        br, Ar = pr.g, pr.H

        # get coefficients
        b = np.dot(W[:,:p], br)
        A = np.dot(W[:,:p], np.dot(Ar, W[:,:p].T))

        # some private attributes used in the regularize_z function
        self._bz = np.dot(W2.T, b)
        self._zAy = np.dot(W2.T, np.dot(A, W1))
        self._zAz = np.dot(W2.T, np.dot(A, W2)) + 0.01*np.eye(m-n)
Exemple #7
0
def qphd_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0
    
    # compute a quadratic approximation
    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute C
    C = np.outer(b, b.transpose()) + gamma*np.dot(A, A.transpose())
    
    return sorted_eigh(C)