def predict(self, X, compgrad=False):
        """
        Evaluate the least-squares-fit polynomial approximation at new points.

        :param ndarray X: An ndarray of points to evaluate the polynomial
            approximation. The shape is M-by-m, where m is the number of
            dimensions.
        :param bool compgrad: A flag to decide whether or not to compute the
            gradient of the polynomial approximation at the points `X`.

        :return: f, An ndarray of predictions from the polynomial approximation.
            The shape of `f` is M-by-1.
        :rtype: ndarray
        :return: df, An ndarray of gradient predictions from the polynomial
            approximation. The shape of `df` is M-by-m.
        :rtype: ndarray
        """
        X, M, m = process_inputs(X)

        B = polynomial_bases(X, self.N)[0]
        f = np.dot(B, self.poly_weights).reshape((M, 1))

        if compgrad:
            dB = grad_polynomial_bases(X, self.N)
            df = np.zeros((M, m))
            for i in range(m):
                df[:,i] = np.dot(dB[:,:,i], self.poly_weights).reshape((M))
            df = df.reshape((M, m))
        else:
            df = None

        return f, df
    def predict(self, X, compgrad=False):
        """Evaluate the radial basis approximation at new points.

        Parameters
        ----------
        X : ndarray
            an ndarray of points to evaluate the polynomial approximation. The 
            shape is M-by-m, where m is the number of dimensions.
        compgrad : bool, optional 
            a flag to decide whether or not to compute the gradient of the 
            polynomial approximation at the points `X`. (default False)
        
        Returns
        -------
        f : ndarray 
            an ndarray of predictions from the polynomial approximation. The 
            shape of `f` is M-by-1.
        df : ndarray 
            an ndarray of gradient predictions from the polynomial 
            approximation. The shape of `df` is M-by-m.

        Notes
        -----
        I'll tell you what. I just refactored this code to use terminology from
        radial basis functions instead of Gaussian processes, and I feel so
        much better about it. Now I don't have to compute that silly
        prediction variance and try to pretend that it has anything to do with
        the actual error in the approximation. Also, computing that variance
        requires another system solve, which might be expensive. So it's both
        expensive and of dubious value. So I got rid of it. Sorry, Gaussian
        processes.
        """
        X, M, m = process_inputs(X)

        #
        K = exponential_squared(X, self.X, 1.0, self.ell)
        B = polynomial_bases(X, self.N)[0]
        f = np.dot(K, self.radial_weights) + np.dot(B, self.poly_weights)
        f = f.reshape((M, 1))

        if compgrad:
            dK = grad_exponential_squared(self.X, X, 1.0, self.ell)
            dB = grad_polynomial_bases(X, self.N)
            df = np.zeros((M, m))
            for i in range(m):
                df[:,i] = (np.dot(dK[:,:,i].T, self.radial_weights) + \
                    np.dot(dB[:,:,i], self.poly_weights)).reshape((M, ))
            df = df.reshape((M, m))
        else:
            df = None

        return f, df
Beispiel #3
0
    def predict(self, X, compgrad=False):
        """Evaluate the radial basis approximation at new points.

        Parameters
        ----------
        X : ndarray
            an ndarray of points to evaluate the polynomial approximation. The 
            shape is M-by-m, where m is the number of dimensions.
        compgrad : bool, optional 
            a flag to decide whether or not to compute the gradient of the 
            polynomial approximation at the points `X`. (default False)
        
        Returns
        -------
        f : ndarray 
            an ndarray of predictions from the polynomial approximation. The 
            shape of `f` is M-by-1.
        df : ndarray 
            an ndarray of gradient predictions from the polynomial 
            approximation. The shape of `df` is M-by-m.

        Notes
        -----
        I'll tell you what. I just refactored this code to use terminology from
        radial basis functions instead of Gaussian processes, and I feel so
        much better about it. Now I don't have to compute that silly
        prediction variance and try to pretend that it has anything to do with
        the actual error in the approximation. Also, computing that variance
        requires another system solve, which might be expensive. So it's both
        expensive and of dubious value. So I got rid of it. Sorry, Gaussian
        processes.
        """
        X, M, m = process_inputs(X)

        #
        K = exponential_squared(X, self.X, 1.0, self.ell)
        B = polynomial_bases(X, self.N)[0]
        f = np.dot(K, self.radial_weights) + np.dot(B, self.poly_weights)
        f = f.reshape((M, 1))

        if compgrad:
            dK = grad_exponential_squared(self.X, X, 1.0, self.ell)
            dB = grad_polynomial_bases(X, self.N)
            df = np.zeros((M, m))
            for i in range(m):
                df[:,i] = (np.dot(dK[:,:,i].T, self.radial_weights) + \
                    np.dot(dB[:,:,i], self.poly_weights)).reshape((M, ))
            df = df.reshape((M, m))
        else:
            df = None

        return f, df
Beispiel #4
0
    def run(self, X):
        """Run at several input values.

        Run the simulation at several input values and return the gradients of
        the quantity of interest.

        Parameters
        ----------
        X : ndarray
            contains all input points where one wishes to run the simulation.
            If the simulation takes m inputs, then `X` must have shape M-by-m,
            where M is the number of simulations to run.

        Returns
        -------
        dF : ndarray
            contains the gradient of the quantity of interest at each given
            input point. The shape of `dF` is M-by-m.

        Notes
        -----
        In principle, the simulation calls can be executed independently and in
        parallel. Right now this function uses a sequential for-loop. Future
        development will take advantage of multicore architectures to
        parallelize this for-loop.
        """

        # right now this just wraps a sequential for-loop.
        # should be parallelized

        X, M, m = process_inputs(X)
        dF = np.zeros((M, m))

        # TODO: provide some timing information
        # start = time.time()

        for i in range(M):
            df = self.dfun(X[i,:].reshape((1,m)))
            dF[i,:] = df.reshape((1,m))

        # TODO: provide some timing information
        # end = time.time() - start

        return dF
    def run(self, X):
        """Run at several input values.
        
        Run the simulation at several input values and return the gradients of
        the quantity of interest.

        Parameters
        ----------
        X : ndarray 
            contains all input points where one wishes to run the simulation. 
            If the simulation takes m inputs, then `X` must have shape M-by-m, 
            where M is the number of simulations to run.

        Returns
        -------
        dF : ndarray 
            contains the gradient of the quantity of interest at each given 
            input point. The shape of `dF` is M-by-m.

        Notes
        -----
        In principle, the simulation calls can be executed independently and in
        parallel. Right now this function uses a sequential for-loop. Future
        development will take advantage of multicore architectures to
        parallelize this for-loop.
        """

        # right now this just wraps a sequential for-loop.
        # should be parallelized

        X, M, m = process_inputs(X)
        dF = np.zeros((M, m))

        # TODO: provide some timing information
        # start = time.time()
        
        for i in range(M):
            df = self.dfun(X[i,:].reshape((1,m)))
            dF[i,:] = df.reshape((1,m))
        
        # TODO: provide some timing information
        # end = time.time() - start

        return dF
Beispiel #6
0
    def run(self, X):
        """
        Run the simulation at several input values and return the gradients of
        the quantity of interest.

        :param ndarray X: Contains all input points where one wishes to run the
            simulation. If the simulation takes m inputs, then `X` must have
            shape M-by-m, where M is the number of simulations to run.

        :return: dF, ontains the gradient of the quantity of interest at each
            given input point. The shape of `dF` is M-by-m.
        :rtype: ndarray

        **Notes**

        In principle, the simulation calls can be executed independently and in
        parallel. Right now this function uses a sequential for-loop. Future
        development will take advantage of multicore architectures to
        parallelize this for-loop.
        """

        # right now this just wraps a sequential for-loop.
        # should be parallelized

        X, M, m = process_inputs(X)
        dF = np.zeros((M, m))

        logger = logging.getLogger(__name__)

        start = time.time()
        for i in range(M):
            df = self.dfun(X[i,:].reshape((1,m)))
            dF[i,:] = df.reshape((1,m))
            logger.debug('Completed {:d} of {:d} gradient evaluations.'.format(i+1, M))
        end = time.time() - start
        logger.info('Completed {:d} gradient evaluations in {:4.2f} seconds.'.format(M, end))

        return dF
Beispiel #7
0
    def predict(self, X, compgrad=False):
        """Evaluate least-squares-fit polynomial approximation at new points.

        Parameters
        ----------
        X : ndarray
            an ndarray of points to evaluate the polynomial approximation. The 
            shape is M-by-m, where m is the number of dimensions.
        compgrad : bool, optional 
            a flag to decide whether or not to compute the gradient of the 
            polynomial approximation at the points `X`. (default False)
        
        Returns
        -------
        f : ndarray 
            an ndarray of predictions from the polynomial approximation. The 
            shape of `f` is M-by-1.
        df : ndarray 
            an ndarray of gradient predictions from the polynomial 
            approximation. The shape of `df` is M-by-m.
        """
        X, M, m = process_inputs(X)

        B = polynomial_bases(X, self.N)[0]
        f = np.dot(B, self.poly_weights).reshape((M, 1))

        if compgrad:
            dB = grad_polynomial_bases(X, self.N)
            df = np.zeros((M, m))
            for i in range(m):
                df[:, i] = np.dot(dB[:, :, i], self.poly_weights).reshape((M))
            df = df.reshape((M, m))
        else:
            df = None

        return f, df