def solveLeastSquaresWithGradients(self, maximum_number_of_evals, function_values, gradient_values): """ Returns the coefficients for the effectively subsampled quadratures least squares problem. :param EffectiveSubsampling object: An instance of the EffectiveSubsampling class :param integer maximum_number_of_evals: The maximum number of evaluations the user would like. This value has to be atleast equivalent to the total number of basis terms of the index set. :param callable function_values: A function call to the simulation model, that takes in d inputs and returns one output. If users know the quadrature subsamples required, they may also input all the simulation outputs as a single ndarray. :param callable gradient_values: A function call to the simulation model, that takes in d inputs and returns the dx1 gradient vector at those inputs. If the user knows the quadrature subsampled required, they may also input all the simulation gradients as an nd array. :return: x, the coefficients of the least squares problem. :rtype: ndarray **Sample declaration** :: >> x = eq.solveLeastSquares(150, function_call) """ A, esq_pts, W, points = getSquareA(self, maximum_number_of_evals) A, normalizations = rowNormalize(A) C = self.getCsubsampled(esq_pts) # Check if user input is a function or a set of function values! if callable(function_values): fun_values = evalfunction(esq_pts, function_values) else: fun_values = function_values if callable(gradient_values): grad_values = evalfunction(esq_pts, gradient_values) else: grad_values = gradient_values # Weight and row normalize function values! b = W * fun_values b = np.dot(normalizations, b) # Weight and row normalize gradient values! # Assume that the gradient values are given as a matrix # First check if the dimensions make sense...then weight them # Then send them to the lsqr routine... # Now the gradient values will usually be arranged as a N-by-d matrix, # where N are the number of points and d is the number of dimensions. # This needs to be changed into a single vector p, q = grad_values.shape d_vec = np.zeros((p * q, 1)) counter = 0 for j in range(0, q): for i in range(0, p): d_vec[counter] = grad_values[i, j] counter = counter + 1 # Now solve the constrained least squares problem return solveCLSQ(A, b, C, d_vec)
def computeCoefficients(self, func, gradfunc=None, gradientmethod=None): # If there are no gradients, solve via standard least squares! if self.gradients is False: p, q = self.Wz.shape # Get function values! if callable(func): #scaled_points = super(Polylsq, self).scaleInputs(self.quadraturePoints) y = evalfunction(self.quadraturePoints, func) else: y = func self.functionEvaluations = y self.bz = np.dot(self.Wz, np.reshape(y, (p, 1))) alpha = np.linalg.lstsq(self.Az, self.bz) self.coefficients = alpha[0] # If there are gradients then use a constrained least squares approach! elif self.gradients is True and gradfunc is not None: p, q = self.Wz.shape # Get function values! if callable(func): y = evalfunction(self.quadraturePoints, func) else: y = func # Get gradient values! if callable(func): grad_values = evalgradients(self.quadraturePoints, gradfunc, 'matrix') else: grad_values = gradfunc # Assemble gradients into a single long vector called dy! p, q = grad_values.shape d = np.zeros((p * q, 1)) counter = 0 for j in range(0, q): for i in range(0, p): d[counter] = grad_values[i, j] counter = counter + 1 self.dy = d del d, grad_values self.bz = np.dot(self.Wz, np.reshape(y, (p, 1))) coefficients, cond = solveCLSQ(self.Az, self.bz, self.Cz, self.dy, gradientmethod) self.coefficients = coefficients elif self.gradients is True and gradfunc is None: raise ( ValueError, 'Polylsq:computeCoefficients:: Gradient function evaluations must be provided, either a callable function or as vectors.' ) super(Polylsq, self).__setCoefficients__(self.coefficients) super(Polylsq, self).__setQuadrature__(self.quadraturePoints, self.quadratureWeights)
def solveLeastSquaresWithGradients(self, maximum_number_of_evals, function_values, gradient_values): """ Returns the coefficients for the effectively subsampled quadratures least squares problem. :param EffectiveSubsampling object: An instance of the EffectiveSubsampling class :param integer maximum_number_of_evals: The maximum number of evaluations the user would like. This value has to be atleast equivalent to the total number of basis terms of the index set. :param callable function_values: A function call to the simulation model, that takes in d inputs and returns one output. If users know the quadrature subsamples required, they may also input all the simulation outputs as a single ndarray. :param callable gradient_values: A function call to the simulation model, that takes in d inputs and returns the dx1 gradient vector at those inputs. If the user knows the quadrature subsampled required, they may also input all the simulation gradients as an nd array. :return: x, the coefficients of the least squares problem. :rtype: ndarray **Sample declaration** :: >> x = eq.solveLeastSquares(150, function_call) """ A, esq_pts, W, points = getSquareA(self, maximum_number_of_evals, flag) A, normalizations = rowNormalize(A) C = getSubsampled(self, esq_pts) # Check if user input is a function or a set of function values! if callable(function_values): fun_values = evalfunction(esq_pts, function_values) else: fun_values = function_values if callable(gradient_values): grad_values = evalfunction(esq_pts, gradient_values) else: grad_values = gradient_values # Weight and row normalize function values! b = W * fun_values b = np.dot(normalizations, b) # Weight and row normalize gradient values! # Assume that the gradient values are given as a matrix # First check if the dimensions make sense...then weight them # Then send them to the lsqr routine... d = 0 # Now solve the constrained least squares problem x = solve_constrainedLSQ(A, b, C, d) return 0
def getSparseCoefficientsViaIntegration(self, function): # Preliminaries stackOfParameters = self.uq_parameters indexSets = self.index_sets dimensions = len(stackOfParameters) # Sparse grid integration rule pts, wts, sg_set_full = sparseGrid(stackOfParameters, indexSets) for i in range(0, len(sg_set_full)): for j in range(0, dimensions): sg_set_full[i, j] = int(sg_set_full[i, j]) P = getMultiOrthoPoly(self, pts, sg_set_full) f = evalfunction(pts, function) f = np.mat(f) Wdiag = np.diag(wts) # Allocate memory for the coefficients rows = len(sg_set_full) coefficients = np.zeros((1, rows)) # I multiply by P[0,:] because my zeroth order polynomial is not 1.0 for i in range(0, rows): coefficients[0, i] = np.mat(P[i, :]) * Wdiag * np.diag(P[0, :]) * f return coefficients, sg_set_full, pts
def solveLeastSquares(self, maximum_number_of_evals, function_values): """ Returns the coefficients for the effectively subsampled quadratures least squares problem. :param EffectiveSubsampling object: An instance of the EffectiveSubsampling class :param integer maximum_number_of_evals: The maximum number of evaluations the user would like. This value has to be atleast equivalent to the total number of basis terms of the index set. :param callable function_values: A function call to the simulation model, that takes in d inputs and returns one output. If users know the quadrature subsamples required, they may also input all the simulation outputs as a single ndarray. :return: x, the coefficients of the least squares problem. :rtype: ndarray **Sample declaration** :: >> x = eq.solveLeastSquares(150, function_call) """ A, esq_pts, W, points = getSquareA(self, maximum_number_of_evals) A, normalizations = rowNormalize(A) # Check if user input is a function or a set of function values! if callable(function_values): fun_values = evalfunction(esq_pts, function_values) else: fun_values = function_values b = W * fun_values b = np.dot(normalizations, b) x = solveLSQ(A, b) return x
def getPseudospectralCoefficients(self, function, override_orders=None): if override_orders is None: pts, wts = super(Polyint, self).getTensorQuadratureRule() tensor_elements = self.basis.elements P = super(Polyint, self).getPolynomial(pts) else: pts, wts = super(Polyint, self).getTensorQuadratureRule(override_orders) tensor_basis = Basis('Tensor grid', override_orders) tensor_elements = tensor_basis.elements P = super(Polyint, self).getPolynomial(pts, tensor_elements) m = len(wts) W = np.mat(np.diag(np.sqrt(wts))) A = np.mat(W * P.T) if callable(function): y = evalfunction(points=pts, function=function) else: y = function b = np.dot(W, np.reshape(y, (m, 1))) coefficients = np.dot(A.T, b) return coefficients, tensor_elements, pts, wts
def integrate(self, function): p, w = self._getLocalQuadrature() return float(np.dot(w, evalfunction(p)))
def computeCoefficients(self, func, gradfunc=None, gradientmethod=None): """ Computes the coefficients of the polynomial via least squares. :param Polylsq self: An instance of the Polylsq class. :param: callable func: The function that needs to be approximated. In the absence of a callable function, the input can be the function evaluated at the quadrature points. :param: callable gradfunc: The gradient of the function that needs to be approximated. In the absence of a callable gradient function, the input can be a matrix of gradient evaluations at the quadrature points. :param: string gradientmethod: The underlying strategy used to estimate the coefficients when gradient evaluations are provided. Options include: 'stacked', 'constrained-DE', 'constrained-NS'. """ # If there are no gradients, solve via standard least squares! if self.gradients is False: p, q = self.Wz.shape # Get function values! if callable(func): #scaled_points = super(Polylsq, self).scaleInputs(self.quadraturePoints) y = evalfunction(self.quadraturePoints, func) else: y = func self.functionEvaluations = y self.bz = np.dot(self.Wz, np.reshape(y, (p, 1))) alpha = np.linalg.lstsq(self.Az, self.bz) self.coefficients = alpha[0] # If there are gradients then use a constrained least squares approach! elif self.gradients is True and gradfunc is not None: p, q = self.Wz.shape # Get function values! if callable(func): y = evalfunction(self.quadraturePoints, func) else: y = func # Get gradient values! if callable(func): grad_values = evalgradients(self.quadraturePoints, gradfunc, 'matrix') else: grad_values = gradfunc # Assemble gradients into a single long vector called dy! p, q = grad_values.shape d = np.zeros((p * q, 1)) counter = 0 for j in range(0, q): for i in range(0, p): d[counter] = self.Wz[i, i] * grad_values[i, j] counter = counter + 1 self.dy = d del d, grad_values self.bz = np.dot(self.Wz, np.reshape(y, (p, 1))) coefficients, cond = solveCLSQ(self.Az, self.bz, self.Cz, self.dy, gradientmethod) self.coefficients = coefficients elif self.gradients is True and gradfunc is None: raise ( ValueError, 'Polylsq:computeCoefficients:: Gradient function evaluations must be provided, either a callable function or as vectors.' ) super(Polylsq, self).__setCoefficients__(self.coefficients) super(Polylsq, self).__setQuadrature__(self.quadraturePoints, self.quadratureWeights)