Ejemplo n.º 1
0
def opg_subspace(X, f, weights):
    """Estimate active subspace with local linear models.
    
    This approach is related to the sufficient dimension reduction method known 
    sometimes as the outer product of gradient method. See the 2001 paper 
    'Structure adaptive approach for dimension reduction' from Hristache, et al.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # Obtain gradient approximations using local linear regressions
    df = local_linear_gradients(X, f, weights=weights)

    # Use gradient approximations to compute active subspace
    opg_weights = np.ones((df.shape[0], 1)) / df.shape[0]
    e, W = active_subspace(df, opg_weights)

    return e, W
    def train(self, X, f, weights=None):
        """Train the least-squares-fit polynomial approximation.

        Parameters
        ----------
        X : ndarray
            an ndarray of training points for the polynomial approximation. The 
            shape is M-by-m, where m is the number of dimensions.
        f : ndarray
            an ndarray of function values used to train the polynomial 
            approximation. The shape of `f` is M-by-1.
        weights : ndarray, optional 
            an ndarray of weights for the least-squares. (default is None, which
            means uniform weights)

        Notes
        -----
        This method sets all the attributes of the class for use in the 
        `predict` method.
        """
        X, f, M, m = process_inputs_outputs(X, f)

        # check that there are enough points to train the polynomial
        if M < comb(self.N + m, m):
            raise Exception(
                'Not enough points to fit response surface of order {:d}'.
                format(self.N))

        B, indices = polynomial_bases(X, self.N)
        p = B.shape[1]
        if weights is not None:
            B, f = weights * B, weights * f

        poly_weights = np.linalg.lstsq(B, f, rcond=None)[0]
        Rsqr = 1.0 - (np.linalg.norm(np.dot(B, poly_weights) - f)**2 /
                      (M * np.var(f)))

        # store data
        self.X, self.f = X, f
        self.poly_weights = poly_weights.reshape((p, 1))
        self.Rsqr = Rsqr

        # organize linear and quadratic coefficients
        self.g = poly_weights[1:m + 1].copy().reshape((m, 1))
        if self.N > 1:
            H = np.zeros((m, m))
            for i in range(m + 1, int(m + 1 + comb(m + 1, 2))):
                ind = indices[i, :]
                loc = np.nonzero(ind != 0)[0]
                if loc.size == 1:
                    H[loc, loc] = 2.0 * poly_weights[i]
                elif loc.size == 2:
                    H[loc[0], loc[1]] = poly_weights[i]
                    H[loc[1], loc[0]] = poly_weights[i]
                else:
                    raise Exception('Error creating quadratic coefficients.')
            self.H = H
Ejemplo n.º 3
0
def minimize(asrs, X, f):
    """Minimize a response surface constructed with the active subspace.
    
    Parameters
    ----------
    asrs : ActiveSubspaceResponseSurface 
        a trained response_surfaces.ActiveSubspaceResponseSurface
    X : ndarray 
        input points used to train the MinVariableMap
    f : ndarray 
        simulation outputs used to train the MinVariableMap
        
    Returns
    -------
    xstar : ndarray 
        the estimated minimizer of the function modeled by the
        ActiveSubspaceResponseSurface `asrs`
    fstar : float 
        the estimated minimum of the function modeled by `asrs`

    Notes
    -----
    This function has two stages. First it uses the scipy.optimize package to
    minimize the response surface of the active variables. Then it trains
    a MinVariableMap with the given input/output pairs, which it uses to map
    the minimizer back to the space of simulation inputs.

    This is very heuristic. 
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # ActiveVariableDomain
    avdom = asrs.avmap.domain

    # wrappers
    def avfun(y):
        f = asrs.predict_av(y.reshape((1, y.size)))[0]
        return f[0, 0]

    def avdfun(y):
        df = asrs.gradient_av(y.reshape((1, y.size)))
        return df.reshape((y.size, ))

    if isinstance(avdom, UnboundedActiveVariableDomain):
        mvm = UnboundedMinVariableMap(avdom)
    elif isinstance(avdom, BoundedActiveVariableDomain):
        mvm = BoundedMinVariableMap(avdom)
    else:
        raise Exception('There is a problem with the avmap.domain.')

    ystar, fstar = av_minimize(avfun, avdom, avdfun=avdfun)
    mvm.train(X, f)
    xstar = mvm.inverse(ystar)[0]
    return xstar, fstar
Ejemplo n.º 4
0
def qphd_subspace(X, f, weights):
    """Estimate active subspace with global quadratic model.
    
    This approach is similar to Ker-Chau Li's approach for principal Hessian
    directions based on a global quadratic model of the data. In contrast to
    Li's approach, this method uses the average outer product of the gradient
    of the quadratic model, as opposed to just its Hessian.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors

    """
    X, f, M, m = process_inputs_outputs(X, f)

    # check if the points are uniform or Gaussian, set 2nd moment

    if np.amax(X) > 1.0 or np.amin(X) < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0

    # compute a quadratic approximation
    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute C
    C = np.outer(b, b.transpose()) + gamma * np.dot(A, A.transpose())

    return sorted_eigh(C)
Ejemplo n.º 5
0
def ols_subspace(X, f, weights):
    """Estimate one-dimensional subspace with global linear model.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors
        
    Notes
    -----
    Although the method returns a full set of eigenpairs (to be consistent with
    the other subspace functions), only the first eigenvalue will be nonzero,
    and only the first eigenvector will have any relationship to the input 
    parameters. The remaining m-1 eigenvectors are only orthogonal to the first.
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # solve weighted least squares
    A = np.hstack((np.ones((M, 1)), X)) * np.sqrt(weights)
    b = f * np.sqrt(weights)
    u = np.linalg.lstsq(A, b, rcond=None)[0]
    w = u[1:].reshape((m, 1))

    # compute rank-1 C
    C = np.dot(w, w.transpose())

    return sorted_eigh(C)
Ejemplo n.º 6
0
    def train(self, X, f):
        """Train the global quadratic for the regularization.

        Parameters
        ----------
        X : ndarray 
            input points used to train a global quadratic used in the 
            `regularize_z` function
        f : ndarray 
            simulation outputs used to train a global quadratic in the 
            `regularize_z` function
        """

        X, f, M, m = process_inputs_outputs(X, f)

        W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
        m, n = W1.shape
        W = self.domain.subspaces.eigenvecs

        # train quadratic surface on p>n active vars
        if m - n > 2:
            p = n + 2
        else:
            p = n + 1

        Yp = np.dot(X, W[:, :p])
        pr = PolynomialApproximation(N=2)
        pr.train(Yp, f)
        br, Ar = pr.g, pr.H

        # get coefficients
        b = np.dot(W[:, :p], br)
        A = np.dot(W[:, :p], np.dot(Ar, W[:, :p].T))

        # some private attributes used in the regularize_z function
        self._bz = np.dot(W2.T, b)
        self._zAy = np.dot(W2.T, np.dot(A, W1))
        self._zAz = np.dot(W2.T, np.dot(A, W2)) + 0.01 * np.eye(m - n)
    def train(self, X, f, v=None, e=None):
        """Train the radial basis approximation.

        Parameters
        ----------
        X : ndarray
            an ndarray of training points for the polynomial approximation. The 
            shape is M-by-m, where m is the number of dimensions.
        f : ndarray
            an ndarray of function values used to train the polynomial 
            approximation. The shape of `f` is M-by-1.
        v : ndarray, optional
            contains the regularization parameters that model error in the 
            function values (default None)
        e : ndarray, optional
            an ndarray containing the eigenvalues from the active subspace 
            analysis. If present, the radial basis uses it to determine the 
            appropriate anisotropy in the length scales. (default None)

        Notes
        -----
        The approximation uses an multivariate, squared exponential radial
        basis. If `e` is not None, then the radial basis is anisotropic with
        length scales determined by `e`. Otherwise, the basis is isotropic.
        The length scale parameters (i.e., the rbf shape parameters) are
        determined with a maximum likelihood heuristic inspired by
        techniques for fitting a Gaussian process model.

        The approximation also includes a monomial basis with monomials of
        total degree up to order `N`. These are fit with weighted least-squares,
        where the weight matrix is the inverse of the matrix of radial basis
        functions evaluated at the training points.

        This method sets all the attributes of the class for use in the
        `predict` method.
        """
        X, f, M, m = process_inputs_outputs(X, f)

        # check that there are enough points to train the polynomial
        if M < comb(self.N + m, m):
            raise Exception(
                'Not enough points to fit response surface of order {:d}'.
                format(self.N))

        # use maximum likelihood to tune parameters
        log10g = fminbound(_rbf_objective,
                           -10.0,
                           1.0,
                           args=(
                               X,
                               f,
                               v,
                               self.N,
                               e,
                           ))
        g = 10**(log10g)

        if e is None:
            ell = g * np.ones((m, 1))
            if v is None:
                v = 1e-6 * np.ones(f.shape)
        else:
            ell = g * np.sum(e) / e[:m]
            if v is None:
                v = g * np.sum(e[m:]) * np.ones(f.shape)

        # ensure conditioning
        v = np.amax([v.reshape(f.shape), 1e-6 * np.ones(f.shape)], axis=0)

        # covariance matrix of observations
        K = exponential_squared(X, X, 1.0, ell)
        K += np.diag(v.reshape((M, )))
        B = polynomial_bases(X, self.N)[0]
        p = B.shape[1]

        C = np.hstack((np.vstack((K, B.T)), np.vstack((B, np.zeros((p, p))))))
        weights = np.linalg.solve(C, np.vstack((f, np.zeros((p, 1)))))

        radial_weights, poly_weights = weights[:M], weights[M:]

        res = f - np.dot(B, poly_weights)
        Rsqr = 1.0 - (np.dot(res.T, np.linalg.solve(K, res)) /
                      np.dot(f.T, np.linalg.solve(K, f)))

        # store parameters
        self.X, self.f = X, f
        self.ell, self.K = ell, K
        self.Rsqr = Rsqr[0, 0]
        self.radial_weights, self.poly_weights = radial_weights, poly_weights