def opg_subspace(X, f, weights):
    """Estimate active subspace with local linear models.
    
    This approach is related to the sufficient dimension reduction method known 
    sometimes as the outer product of gradient method. See the 2001 paper 
    'Structure adaptive approach for dimension reduction' from Hristache, et al.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # Obtain gradient approximations using local linear regressions
    df = local_linear_gradients(X, f, weights=weights)
    
    # Use gradient approximations to compute active subspace
    opg_weights = np.ones((df.shape[0], 1)) / df.shape[0]
    e, W = active_subspace(df, opg_weights)
    
    return e, W
Beispiel #2
0
def sir_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0
    
    # Center and normalize data
    Z = (1.0 / np.sqrt(gamma)) * (X - np.mean(X, axis=0).reshape((1, m)))
    
    # Bin data according to responses
    H = 10
    bins = np.percentile(f, np.linspace(0, 100, H+1))
    bins[0] = bins[0] - SQRTEPS
    
    # Compute C matrix
    C = np.zeros((m, m))
    for i in range(H):
        in_slice = ((f > bins[i]) & (f <= bins[i+1])).reshape(M)
        if np.any(in_slice):
            sweights = weights[in_slice] / np.sum(weights[in_slice])
            m_hat = np.sum(Z[in_slice, :] * sweights, axis=0).reshape((m, 1))
            p_hat = np.sum(in_slice) / float(M)
            C += p_hat*np.dot(m_hat, m_hat.T)
    
    return sorted_eigh(C)
Beispiel #3
0
def opg_subspace(X, f, weights):
    """Estimate active subspace with local linear models.

    This approach is related to the sufficient dimension reduction method known
    sometimes as the outer product of gradient method. See the 2001 paper
    'Structure adaptive approach for dimension reduction' from Hristache, et al.

    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace

    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # Obtain gradient approximations using local linear regressions
    df = local_linear_gradients(X, f, weights=weights)

    # Use gradient approximations to compute active subspace
    opg_weights = np.ones((df.shape[0], 1)) / df.shape[0]
    e, W = active_subspace(df, opg_weights)

    return e, W
def minimize(asrs, X, f):
    """Minimize a response surface constructed with the active subspace.
    
    Parameters
    ----------
    asrs : ActiveSubspaceResponseSurface 
        a trained response_surfaces.ActiveSubspaceResponseSurface
    X : ndarray 
        input points used to train the MinVariableMap
    f : ndarray 
        simulation outputs used to train the MinVariableMap
        
    Returns
    -------
    xstar : ndarray 
        the estimated minimizer of the function modeled by the
        ActiveSubspaceResponseSurface `asrs`
    fstar : float 
        the estimated minimum of the function modeled by `asrs`

    Notes
    -----
    This function has two stages. First it uses the scipy.optimize package to
    minimize the response surface of the active variables. Then it trains
    a MinVariableMap with the given input/output pairs, which it uses to map
    the minimizer back to the space of simulation inputs.

    This is very heuristic. 
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # ActiveVariableDomain
    avdom = asrs.avmap.domain

    # wrappers
    def avfun(y):
        f = asrs.predict_av(y.reshape((1, y.size)))[0]
        return f[0, 0]

    def avdfun(y):
        df = asrs.gradient_av(y.reshape((1, y.size)))
        return df.reshape((y.size, ))

    if isinstance(avdom, UnboundedActiveVariableDomain):
        mvm = UnboundedMinVariableMap(avdom)
    elif isinstance(avdom, BoundedActiveVariableDomain):
        mvm = BoundedMinVariableMap(avdom)
    else:
        raise Exception('There is a problem with the avmap.domain.')

    ystar, fstar = av_minimize(avfun, avdom, avdfun=avdfun)
    mvm.train(X, f)
    xstar = mvm.inverse(ystar)[0]
    return xstar, fstar
def minimize(asrs, X, f):
    """Minimize a response surface constructed with the active subspace.
    
    Parameters
    ----------
    asrs : ActiveSubspaceResponseSurface 
        a trained response_surfaces.ActiveSubspaceResponseSurface
    X : ndarray 
        input points used to train the MinVariableMap
    f : ndarray 
        simulation outputs used to train the MinVariableMap
        
    Returns
    -------
    xstar : ndarray 
        the estimated minimizer of the function modeled by the
        ActiveSubspaceResponseSurface `asrs`
    fstar : float 
        the estimated minimum of the function modeled by `asrs`

    Notes
    -----
    This function has two stages. First it uses the scipy.optimize package to
    minimize the response surface of the active variables. Then it trains
    a MinVariableMap with the given input/output pairs, which it uses to map
    the minimizer back to the space of simulation inputs.

    This is very heuristic. 
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # ActiveVariableDomain
    avdom = asrs.avmap.domain

    # wrappers
    def avfun(y):
        f = asrs.predict_av(y.reshape((1,y.size)))[0]
        return f[0,0]
    def avdfun(y):
        df = asrs.gradient_av(y.reshape((1,y.size)))
        return df.reshape((y.size,))

    if isinstance(avdom, UnboundedActiveVariableDomain):
        mvm = UnboundedMinVariableMap(avdom)
    elif isinstance(avdom, BoundedActiveVariableDomain):
        mvm = BoundedMinVariableMap(avdom)
    else:
        raise Exception('There is a problem with the avmap.domain.')

    ystar, fstar = av_minimize(avfun, avdom, avdfun=avdfun)
    mvm.train(X, f)
    xstar = mvm.inverse(ystar)[0]
    return xstar, fstar
Beispiel #6
0
def minimize(asrs, X, f):
    """
    Minimize a response surface constructed with the aid of the active subspace.

    :param ActiveSubspaceResponseSurface asrs: A trained
        response_surfaces.ActiveSubspaceResponseSurface.
    :param ndarray X: input points used to train the MinVariableMap.
    :param ndarray f: simulation outputs used to train the MinVariableMap.

    :return: xstar, The estimated minimizer of the function modeled by the
        ActiveSubspaceResponseSurface `asrs`.
    :rtype: ndarray

    :return: fstar, The estimated minimum of the function modeled by `asrs`.
    :rtype: float

    **Notes**

    This function has two stages. First it uses the scipy.optimize package to
    minimize the response surface of the active variables. Then it trains
    a MinVariableMap with the given input/output pairs, which it uses to map
    the minimizer back to the space of simulation inputs.

    This is very heuristic. But hey, so is all of global optimization.
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # ActiveVariableDomain
    avdom = asrs.avmap.domain

    logging.getLogger(__name__).debug('Minimizing a {:d}-variate function exploiting a {:d}-dim active subspace.'\
                                .format(m, avdom.subspaces.W1.shape[1]))
    # wrappers
    def avfun(y):
        f = asrs.predict_av(y.reshape((1,y.size)))[0]
        return f[0,0]
    def avdfun(y):
        df = asrs.gradient_av(y.reshape((1,y.size)))
        return df.reshape((y.size,))

    if isinstance(avdom, UnboundedActiveVariableDomain):
        mvm = UnboundedMinVariableMap(avdom)
    elif isinstance(avdom, BoundedActiveVariableDomain):
        mvm = BoundedMinVariableMap(avdom)
    else:
        raise Exception('There is a problem with the avmap.domain.')

    ystar, fstar = av_minimize(avfun, avdom, avdfun=avdfun)
    mvm.train(X, f)
    xstar = mvm.inverse(ystar)[0]
    return xstar, fstar
Beispiel #7
0
def opg_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # Obtain gradient approximations using local linear regressions
    df = local_linear_gradients(X, f, weights=weights)
    
    # Use gradient approximations to compute active subspace
    opg_weights = np.ones((df.shape[0], 1)) / df.shape[0]
    e, W = active_subspace(df, opg_weights)
    
    return e, W
Beispiel #8
0
def ols_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # solve weighted least squares
    A = np.hstack((np.ones((M, 1)), X)) * np.sqrt(weights)
    b = f * np.sqrt(weights)
    u = np.linalg.lstsq(A, b)[0]
    w = u[1:].reshape((m, 1))
    
    # compute rank-1 C
    C = np.dot(w, w.transpose())
    
    return sorted_eigh(C)
def qphd_subspace(X, f, weights):
    """Estimate active subspace with global quadratic model.
    
    This approach is similar to Ker-Chau Li's approach for principal Hessian
    directions based on a global quadratic model of the data. In contrast to
    Li's approach, this method uses the average outer product of the gradient
    of the quadratic model, as opposed to just its Hessian.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors

    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0
    
    # compute a quadratic approximation
    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute C
    C = np.outer(b, b.transpose()) + gamma*np.dot(A, A.transpose())

    return sorted_eigh(C)
Beispiel #10
0
def qphd_subspace(X, f, weights):
    """Estimate active subspace with global quadratic model.

    This approach is similar to Ker-Chau Li's approach for principal Hessian
    directions based on a global quadratic model of the data. In contrast to
    Li's approach, this method uses the average outer product of the gradient
    of the quadratic model, as opposed to just its Hessian.

    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace

    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors

    """
    X, f, M, m = process_inputs_outputs(X, f)

    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0

    # compute a quadratic approximation
    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute C
    C = np.outer(b, b.transpose()) + gamma*np.dot(A, A.transpose())

    return sorted_eigh(C)
Beispiel #11
0
def phd_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0
        
    # Center data
    Z = X - np.mean(X, axis=0).reshape((1, m))
    
    # Compute C matrix
    C =  (1.0 / np.sqrt(gamma)) * np.dot(Z.T, (f - np.mean(f)) * weights * Z)
    
    return sorted_eigh(C)
def ols_subspace(X, f, weights):
    """Estimate one-dimensional subspace with global linear model.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors
        
    Notes
    -----
    Although the method returns a full set of eigenpairs (to be consistent with
    the other subspace functions), only the first eigenvalue will be nonzero,
    and only the first eigenvector will have any relationship to the input 
    parameters. The remaining m-1 eigenvectors are only orthogonal to the first.
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # solve weighted least squares
    A = np.hstack((np.ones((M, 1)), X)) * np.sqrt(weights)
    b = f * np.sqrt(weights)
    u = np.linalg.lstsq(A, b)[0]
    w = u[1:].reshape((m, 1))
    
    # compute rank-1 C
    C = np.dot(w, w.transpose())
    
    return sorted_eigh(C)
Beispiel #13
0
def ols_subspace(X, f, weights):
    """Estimate one-dimensional subspace with global linear model.

    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace

    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors

    Notes
    -----
    Although the method returns a full set of eigenpairs (to be consistent with
    the other subspace functions), only the first eigenvalue will be nonzero,
    and only the first eigenvector will have any relationship to the input
    parameters. The remaining m-1 eigenvectors are only orthogonal to the first.
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # solve weighted least squares
    A = np.hstack((np.ones((M, 1)), X)) * np.sqrt(weights)
    b = f * np.sqrt(weights)
    u = np.linalg.lstsq(A, b)[0]
    w = u[1:].reshape((m, 1))

    # compute rank-1 C
    C = np.dot(w, w.transpose())

    return sorted_eigh(C)
Beispiel #14
0
    def train(self, X, f):
        """
        Train the global quadratic for the regularization.

        :param ndarray X: input points used to train a global quadratic used in
            the `regularize_z` function.
        :param ndarray f: simulation outputs used to train a global quadratic in
            the `regularize_z` function.
        """

        X, f, M, m = process_inputs_outputs(X, f)

        W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
        m, n = W1.shape
        W = self.domain.subspaces.eigenvectors

        # train quadratic surface on p>n active vars
        if m-n>2:
            p = n+2
        else:
            p = n+1

        logging.getLogger(__name__).debug('Training a MinVariableMap on {:d} active variables out of {:d} for a {:d}-dim active subspace.'\
                                .format(p, m, n))

        Yp = np.dot(X, W[:,:p])
        pr = PolynomialApproximation(N=2)
        pr.train(Yp, f)
        br, Ar = pr.g, pr.H

        # get coefficients
        b = np.dot(W[:,:p], br)
        A = np.dot(W[:,:p], np.dot(Ar, W[:,:p].T))

        # some private attributes used in the regularize_z function
        self._bz = np.dot(W2.T, b)
        self._zAy = np.dot(W2.T, np.dot(A, W1))
        self._zAz = np.dot(W2.T, np.dot(A, W2)) + 0.01*np.eye(m-n)
Beispiel #15
0
    def train(self, X, f):
        """Train the global quadratic for the regularization.

        Parameters
        ----------
        X : ndarray
            input points used to train a global quadratic used in the
            `regularize_z` function
        f : ndarray
            simulation outputs used to train a global quadratic in the
            `regularize_z` function
        """

        X, f, M, m = process_inputs_outputs(X, f)

        W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
        m, n = W1.shape
        W = self.domain.subspaces.eigenvecs

        # train quadratic surface on p>n active vars
        if m-n>2:
            p = n+2
        else:
            p = n+1

        Yp = np.dot(X, W[:,:p])
        pr = PolynomialApproximation(N=2)
        pr.train(Yp, f)
        br, Ar = pr.g, pr.H

        # get coefficients
        b = np.dot(W[:,:p], br)
        A = np.dot(W[:,:p], np.dot(Ar, W[:,:p].T))

        # some private attributes used in the regularize_z function
        self._bz = np.dot(W2.T, b)
        self._zAy = np.dot(W2.T, np.dot(A, W1))
        self._zAz = np.dot(W2.T, np.dot(A, W2)) + 0.01*np.eye(m-n)
Beispiel #16
0
    def train(self, X, f):
        """Train the global quadratic for the regularization.

        Parameters
        ----------
        X : ndarray 
            input points used to train a global quadratic used in the 
            `regularize_z` function
        f : ndarray 
            simulation outputs used to train a global quadratic in the 
            `regularize_z` function
        """

        X, f, M, m = process_inputs_outputs(X, f)

        W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
        m, n = W1.shape
        W = self.domain.subspaces.eigenvecs

        # train quadratic surface on p>n active vars
        if m-n>2:
            p = n+2
        else:
            p = n+1

        Yp = np.dot(X, W[:,:p])
        pr = PolynomialApproximation(N=2)
        pr.train(Yp, f)
        br, Ar = pr.g, pr.H

        # get coefficients
        b = np.dot(W[:,:p], br)
        A = np.dot(W[:,:p], np.dot(Ar, W[:,:p].T))

        # some private attributes used in the regularize_z function
        self._bz = np.dot(W2.T, b)
        self._zAy = np.dot(W2.T, np.dot(A, W1))
        self._zAz = np.dot(W2.T, np.dot(A, W2)) + 0.01*np.eye(m-n)
Beispiel #17
0
def qphd_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0
    
    # compute a quadratic approximation
    pr = PolynomialApproximation(2)
    pr.train(X, f, weights)

    # get regression coefficients
    b, A = pr.g, pr.H

    # compute C
    C = np.outer(b, b.transpose()) + gamma*np.dot(A, A.transpose())
    
    return sorted_eigh(C)
Beispiel #18
0
def save_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # check if the points are uniform or Gaussian, set 2nd moment
    if np.amax(X) > 1.0 or np.amin < -1.0:
        gamma = 1.0
    else:
        gamma = 1.0 / 3.0
    
    # Center and normalize data
    Z = (1.0 / np.sqrt(gamma))*(X - np.mean(X, axis=0).reshape((1, m)))
    
    # Bin data according to responses
    H = 10
    bins = np.percentile(f, np.linspace(0, 100, H+1))
    ind = np.digitize(f.reshape(M), bins)
    ind[ind == 0] = 1
    ind[ind == len(bins)] = H
    
    # Comute C matrix
    C = np.zeros((m, m))
    for i in range(H):
        in_slice = (ind == i+1)
        if np.any(in_slice):
            Z_tilde = Z[in_slice, :] - np.mean(Z[in_slice, :], axis=0)
            sweights = weights[in_slice] / np.sum(weights[in_slice])
            if sum(in_slice) > 1:
                V = np.eye(m) - (np.dot(Z_tilde.T, sweights * Z_tilde) / (1 - np.sum(sweights**2)))
            else:
                V = np.eye(m)
            C += np.dot(V, V)
    
    return sorted_eigh(C)
Beispiel #19
0
def swarm_subspace(X, f, weights):
    """
    TODO: docs
    """
    X, f, M, m = process_inputs_outputs(X, f)
    
    # integration weights
    W = np.dot(weights,weights.transpose())
    
    # distance matrix, getting rid of zeros
    D2 = np.power(distance_matrix(X,X),2)
    ind = D2 < SQRTEPS
    W[ind], D2[ind] = 0.0, 1.0
    
    # all weights
    A = (np.power(f-f.transpose(), 2) * W) / D2
    
    C = np.zeros((m, m))
    for i in range(M):
        P = X - X[i,:]
        a = A[:,i].reshape((M, 1))
        C = C + np.dot(P.transpose(), P * a)
    
    return sorted_eigh(C)
Beispiel #20
0
    def build_from_data(self, X, f, df=None, avdim=None):
        """
        Build the active subspace-enabled model with input/output pairs.

        :param ndarray X: M-by-m matrix with evaluations of the m-dimensional
            simulation inputs.
        :param ndarray f: M-by-1 matrix with corresponding simulation quantities
            of interest.
        :param ndarray df: M-by-m matrix that contains the gradients of the
            simulation quantity of interest, oriented row-wise, that correspond
            to the rows of `X`. If `df` is not present, then it is estimated
            with crude local linear models using the pairs `X` and `f`.
        :param int avdim: The dimension of the active subspace. If `avdim`
            is not present, a crude heuristic is used to choose an active
            subspace dimension based on the given data `X` and
            `f`---and possible `df`.

        **Notes**

        This method follows these steps:

        #. If `df` is None, estimate it with local linear models using the \
        input/output pairs `X` and `f`.
        #. Compute the active and inactive subspaces using `df`.
        #. Train a response surface using `X` and `f` that exploits the active \
        subspace.

        """
        X, f, M, m = process_inputs_outputs(X, f)

        # check if the given inputs satisfy the assumptions
        if self.bounded_inputs:
            if np.any(X) > 1.0 or np.any(X) < -1.0:
                raise Exception('The supposedly bounded inputs exceed the \
                    bounds [-1,1].')
        else:
            if np.any(X) > 10.0 or np.any(X) < -10.0:
                raise Exception('There is a very good chance that your \
                    unbounded inputs are not properly scaled.')
        self.X, self.f, self.m = X, f, m

        if df is not None:
            df, M_df, m_df = process_inputs(df)
            if m_df != m:
                raise ValueError('The dimension of the gradients should be \
                                the same as the dimension of the inputs.')
        else:
            # if gradients aren't available, estimate them from data
            df = local_linear_gradients(X, f)


        # compute the active subspace
        ss = Subspaces()
        ss.compute(df)
        if avdim is not None:
            if not isinstance(avdim, int):
                raise TypeError('avdim should be an integer.')
            else:
                ss.partition(avdim)
        self.n = ss.W1.shape[1]
        print 'The dimension of the active subspace is {:d}.'.format(self.n)

        # set up the active variable domain and map
        if self.bounded_inputs:
            avdom = BoundedActiveVariableDomain(ss)
            avmap = BoundedActiveVariableMap(avdom)
        else:
            avdom = UnboundedActiveVariableDomain(ss)
            avmap = UnboundedActiveVariableMap(avdom)

        # build the response surface
        asrs = ActiveSubspaceResponseSurface(avmap)
        asrs.train_with_data(X, f)

        # set the R-squared coefficient
        self.Rsqr = asrs.respsurf.Rsqr
        self.as_respsurf = asrs