def effectivequadratures(stackOfParameters, q_parameter, function):
    """
    Computes an approximation of the integral using effective-quadratures; this routine uses least squares to estimate the integral.

    :param Parameter array stackOfParameters: A list of Parameter objects
    :param double q_parameter: By default, this routine uses a hyperbolic polynomial basis where the q_parameter (value between 0.1 and 1.0) adjusts the number
        of basis terms to be used for solving the least squares problem.
    :param callable function: The function whose integral needs to be computed. Can also be input as an array of function values at the
        quadrature points. The function must be provided either as a callable or an array of values for this routine to work. 
      
    :return: integral_esq: The effective quadratures approximation of the integral
    :rtype: double
    :return: points:  The quadrature points
    :rtype: numpy ndarray

    """

    # Determine the index set to be used!
    dimensions = len(stackOfParameters)
    orders = []
    flags = []
    uniform = 1
    not_uniform = 0
    for i in range(0, dimensions):
        orders.append(int(stackOfParameters[i].order - 1))
        if stackOfParameters[i].param_type is 'Uniform':
            flags.append(uniform)
        else:
            flags.append(not_uniform)

    # Define the hyperbolic cross
    hyperbolic = IndexSet('Hyperbolic basis', orders=orders, q=q_parameter)
    maximum_number_of_evals = hyperbolic.getCardinality()
    effectiveQuads = EffectiveSubsampling(stackOfParameters, hyperbolic)

    # Call the effective subsampling object
    points = effectiveQuads.getEffectivelySubsampledPoints(
        maximum_number_of_evals)
    xn = effectiveQuads.solveLeastSquares(maximum_number_of_evals, function)
    integral_esq = xn[0]

    # For normalizing!
    for i in range(0, dimensions):
        if flags[i] == 0:
            integral_esq = integral_esq
        elif flags[i] == 1:
            integral_esq = integral_esq * (stackOfParameters[i].upper -
                                           stackOfParameters[i].lower)

    return integral_esq[0], points
    def __init__(self, uq_parameters, index_sets=None):

        self.uq_parameters = uq_parameters

        # Here we set the index sets if they are not provided
        if index_sets is None:
            # Determine the highest orders for a tensor grid
            highest_orders = []
            for i in range(0, len(uq_parameters)):
                highest_orders.append(uq_parameters[i].order)

            self.index_sets = IndexSet('Tensor grid', highest_orders)
        else:
            self.index_sets = index_sets
    def __init__(self, uq_parameters, index_set=None, method=None):
        self.uq_parameters = uq_parameters
        dimensions = len(uq_parameters)

        # For increased flexibility, if the index_set is not given, we will assume a tensor grid basis
        if index_set is None:

            # determine the orders!
            orders_to_use = []
            for u in range(0, dimensions):
                orders_to_use.append(np.int(uq_parameters[u].order - 1))

            # Use the tensor grid option!
            self.index_set = IndexSet("Tensor grid", orders_to_use)

        else:
            # Now before we set self.index_set = index_set, we check to make sure that
            # the number of basis used is -1 the number of points!
            orders_to_use = []
            count = 0
            for u in range(0, dimensions):
                orders_to_use.append(np.int(uq_parameters[u].order))
                if orders_to_use[u] <= index_set.orders[u]:
                    count = count + 1
            if count > 0:
                error_function(
                    'IndexSet: Basis orders: Ensure that the basis order is always -1 the number of points!'
                )

            self.index_set = index_set

        if method is not None:
            self.method = method
        else:
            self.method = 'QR'
示例#4
0
文件: sparse.py 项目: rdwight/smobol
 def __init__(self, dim, quad, indexset=None, level=3, fill='simplex'):
     """
     Initialize either with a standard indexset (specify level and fill), or
     a custom indexset (specify indexset).  
     """
     self.dim = dim
     self.quad = quad  # Init 1d rule
     # Init description of sparse pattern
     self.set_indexset(indexset if indexset else IndexSet(
         dim=dim, level=level, fill=fill))
def tensorgrid(stackOfParameters, function=None):
    """
    Computes a tensor grid of quadrature points based on the distributions for each Parameter in stackOfParameters 

    :param Parameter array stackOfParameters: A list of Parameter objects
    :param callable function: The function whose integral needs to be computed. Can also be input as an array of function values at the
        quadrature points. If the function is given as a callable, then this routine outputs the integral of the function and an array of
        the points at which the function was evaluated at to estimate the integral. These are the quadrature points. In case the function is
        not given as a callable (or an array, for that matter), then this function outputs the quadrature points and weights. 
      
    :return: tensor_int: The tensor grid approximation of the integral
    :rtype: double
    :return: points:  The quadrature points
    :rtype: numpy ndarray
    :return: weights: The quadrature weights
    :rtype: numpy ndarray

    **Notes**
    For further details on this routine, see Polynomial.getPointsAndWeights()

    """
    # Determine the index set to be used!
    dimensions = len(stackOfParameters)
    orders = []
    flags = []
    uniform = 1
    not_uniform = 0
    for i in range(0, dimensions):
        orders.append(stackOfParameters[i].order)
        if stackOfParameters[i].param_type is 'Uniform':
            flags.append(uniform)
        else:
            flags.append(not_uniform)

    tensor = IndexSet('Tensor grid', orders)
    polyObject = Polynomial(stackOfParameters, tensor)

    # Now compute the points and weights
    points, weights = polyObject.getPointsAndWeights()

    # For normalizing!
    for i in range(0, dimensions):
        if flags[i] == 0:
            weights = weights
        elif flags[i] == 1:
            weights = weights * (stackOfParameters[i].upper -
                                 stackOfParameters[i].lower)
            weights = weights / (2.0)

    # Now if the function is a callable, then we can compute the integral:
    if function is not None and callable(function):
        tensor_int = np.mat(weights) * evalfunction(points, function)
        return tensor_int, points
    else:
        return points, weights
示例#6
0
def getA(self):
    stackOfParameters = self.uq_parameters
    polynomial_basis = self.index_set
    dimensions = len(stackOfParameters)
    indices = IndexSet.getIndexSet(polynomial_basis)
    no_of_indices = len(indices)

    # Crate a new PolynomialParam object to get tensor grid points & weights
    polyObject_for_pts = Polynomial(stackOfParameters)
    quadrature_pts, quadrature_wts = polyObject_for_pts.getPointsAndWeights()

    polyObject_for_basis = Polynomial(stackOfParameters, polynomial_basis)

    # Allocate memory for "unscaled points!"
    unscaled_quadrature_pts = np.zeros((len(quadrature_pts), dimensions))
    for i in range(0, dimensions):
        for j in range(0, len(quadrature_pts)):
            if (stackOfParameters[i].param_type == "Uniform"):
                unscaled_quadrature_pts[j, i] = (
                    (quadrature_pts[j, i] - stackOfParameters[i].lower) /
                    (stackOfParameters[i].upper -
                     stackOfParameters[i].lower)) * 2.0 - 1.0

            elif (stackOfParameters[i].param_type == "Beta"):
                unscaled_quadrature_pts[j, i] = (
                    quadrature_pts[j, i] - stackOfParameters[i].lower
                ) / (stackOfParameters[i].upper - stackOfParameters[i].lower)

    # Ensure that the quadrature weights sum up to 1.0
    quadrature_wts = quadrature_wts / np.sum(quadrature_wts)

    # Now we create another Polynomial object for the basis set!
    polynomial_expansions, no_used = polyObject_for_basis.getMultivariatePolynomial(
        unscaled_quadrature_pts)
    P = np.mat(polynomial_expansions)
    m, n = P.shape
    W = np.mat(np.diag(np.sqrt(quadrature_wts)))
    A = W * P.T
    return A, quadrature_pts, quadrature_wts
def sparseGrid(listOfParameters, indexSet):

    # Get the number of parameters
    dimensions = len(listOfParameters)

    # Get the sparse index set attributes
    sparse_index, a, sg_set = IndexSet.getIndexSet(indexSet)
    rows = len(sparse_index)

    # Get this into an array
    orders = np.zeros((rows, dimensions))
    points_store = []
    weights_store = []
    factor = 1

    for i in range(0, rows):

        # loop through the dimensions
        for j in range(0, dimensions):
            orders[i, j] = np.array(sparse_index[i][j])

        # points and weights for each order~
        tensorObject = PolyParent(listOfParameters, method="tensor grid")
        points, weights = PolyParent.getPointsAndWeights(
            tensorObject, orders[i, :])

        # Multiply weights by constant 'a':
        weights = weights * a[i]

        # Now store point sets ---> scratch this, use append instead!!!!
        for k in range(0, len(points)):
            points_store = np.append(points_store, points[k, :], axis=0)
            weights_store = np.append(weights_store, weights[k])

    dims1 = int(len(points_store) / dimensions)
    points_store = np.reshape(points_store, (dims1, dimensions))

    return points_store, weights_store, sg_set
def sparsegrid(stackOfParameters, level, growth_rule, function=None):
    """
    Computes a sparse grid of quadrature points based on the distributions for each Parameter in stackOfParameters 

    :param Parameter array stackOfParameters: A list of Parameter objects
    :param integer level: Level parameter of the sparse grid integration rule
    :param string growth_rule: Growth rule for the sparse grid. Choose from 'linear' or 'exponential'.
    :param callable function: The function whose integral needs to be computed. Can also be input as an array of function values at the
        quadrature points. If the function is given as a callable, then this routine outputs the integral of the function and an array of
        the points at which the function was evaluated at to estimate the integral. These are the quadrature points. In case the function is
        not given as a callable (or an array, for that matter), then this function outputs the quadrature points and weights. 
      
    :return: sparse_int: The sparse grid approximation of the integral
    :rtype: double
    :return: points:  The quadrature points
    :rtype: numpy ndarray
    :return: weights: The quadrature weights
    :rtype: numpy ndarray

    """
    # Determine the index set to be used!
    dimensions = len(stackOfParameters)
    orders = []
    flags = []
    uniform = 1
    not_uniform = 0
    for i in range(0, dimensions):
        orders.append(stackOfParameters[i].order)
        if stackOfParameters[i].param_type is 'Uniform':
            flags.append(uniform)
        else:
            flags.append(not_uniform)

    # Call the sparse grid index set
    sparse = IndexSet('Sparse grid',
                      level=level,
                      growth_rule=growth_rule,
                      dimension=dimensions)
    sparse_index, sparse_coeffs, sparse_all_elements = sparse.getIndexSet()

    # Get this into an array
    rows = len(sparse_index)
    orders = np.zeros((rows, dimensions))
    points_store = []
    weights_store = []
    factor = 1

    # Now get the tensor grid for each sparse_index
    for i in range(0, rows):

        # loop through the dimensions
        for j in range(0, dimensions):
            orders[i, j] = np.array(sparse_index[i][j])

        # points and weights for each order~
        tensor = IndexSet('Tensor grid', orders)
        polyObject = Polynomial(stackOfParameters, tensor)
        points, weights = polyObject.getPointsAndWeights(orders[i, :])

        # Multiply weights by constant 'a':
        weights = weights * sparse_coeffs[i]

        # Now store point sets ---> scratch this, use append instead!!!!
        for k in range(0, len(points)):
            points_store = np.append(points_store, points[k, :], axis=0)
            weights_store = np.append(weights_store, weights[k])

    dims1 = int(len(points_store) / dimensions)
    points_store = np.reshape(points_store, (dims1, dimensions))

    # For normalizing!
    for i in range(0, dimensions):
        if flags[i] == 0:
            weights_store = weights_store
        elif flags[i] == 1:
            weights_store = weights_store * (stackOfParameters[i].upper -
                                             stackOfParameters[i].lower)
            weights_store = weights_store / (2.0)

    # Now if the function is a callable, then we can compute the integral:
    if function is not None and callable(function):
        sparse_int = np.mat(weights_store) * evalfunction(
            points_store, function)
        point_store = removeDuplicates(points_store)
        return sparse_int, points_store
    else:
        point_store = removeDuplicates(points_store)
        return points_store, weights_store
def getPseudospectralCoefficients(stackOfParameters,
                                  function,
                                  additional_orders=None):

    dimensions = len(stackOfParameters)
    q0 = [1]
    Q = []
    orders = []

    # If additional orders are provided, then use those!
    if additional_orders is None:
        for i in range(0, dimensions):
            orders.append(stackOfParameters[i].order)
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors()
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    else:
        print 'Using custom coefficients!'
        for i in range(0, dimensions):
            orders.append(additional_orders[i])
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors(orders[i])
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    # Compute multivariate Gauss points and weights
    p, w = getGaussianQuadrature(stackOfParameters, orders)

    # Evaluate the first point to get the size of the system
    fun_value_first_point = function(p[0, :])
    u0 = q0[0, 0] * fun_value_first_point
    N = 1
    gn = int(np.prod(orders))
    Uc = np.zeros((N, gn))
    Uc[0, 1] = u0

    function_values = np.zeros((1, gn))
    for i in range(0, gn):
        function_values[0, i] = function(p[i, :])

    # Now we evaluate the solution at all the points
    for j in range(0, gn):  # 0
        Uc[0, j] = q0[0, j] * function_values[0, j]

    # Compute the corresponding tensor grid index set:
    order_correction = []
    for i in range(0, len(orders)):
        temp = orders[i] - 1
        order_correction.append(temp)

    tensor_grid_basis = IndexSet("tensor grid", order_correction)
    tensor_set = tensor_grid_basis.getIndexSet()

    # Now we use kronmult
    K = efficient_kron_mult(Q, Uc)
    F = function_values
    K = np.column_stack(K)
    return K, tensor_set, p

    def getPolynomialApproximation(self, function, plotting_pts):

        # Get the right polynomial coefficients
        if self.method == "tensor grid" or self.method == "Tensor grid":
            coefficients, indexset, evaled_pts = getPseudospectralCoefficients(
                self.uq_parameters, function)
        if self.method == "spam" or self.method == "Spam":
            coefficients, indexset, evaled_pts = getSparsePseudospectralCoefficients(
                self, function)
        if self.method == "sparse grid" or self.method == "Sparse grid":
            print('WARNING: Use spam as a method instead!')
            coefficients, indexset, evaled_pts = getSparseCoefficientsViaIntegration(
                self, function)

        P = getMultiOrthoPoly(self, plotting_pts, indexset)
        PolyApprox = np.mat(coefficients) * np.mat(P)
        return PolyApprox, evaled_pts

    def getMultivariatePolynomial(self, stackOfPoints):

        # "Unpack" parameters from "self"
        stackOfParameters = self.uq_parameters
        isets = self.index_sets
        index_set = isets.getIndexSet()
        dimensions = len(stackOfParameters)
        p = {}
        d = {}

        # Save time by returning if univariate!
        if (dimensions == 1):
            poly, derivatives = stackOfParameters[0].getOrthoPoly(
                stackOfPoints)
            return poly, derivatives
        else:
            for i in range(0, dimensions):
                G, D = stackOfParameters[i].getOrthoPoly(
                    stackOfPoints[:, i], int(np.max(index_set[:, i] + 1)))
                p[i] = G
                d[i] = D

        # Now we multiply components according to the index set
        no_of_points = len(stackOfPoints)
        polynomial = np.zeros((len(index_set), no_of_points))
        derivatives = np.zeros((len(index_set), no_of_points, dimensions))

        # One loop for polynomials
        for i in range(0, len(index_set)):
            temp = np.ones((1, no_of_points))
            for k in range(0, dimensions):
                polynomial[i, :] = p[k][0][int(index_set[i, k])] * temp
                temp = polynomial[i, :]

        # Second loop for derivatives!
        if stackOfParameters[0].derivative_flag == 1:
            print 'WIP'

        return polynomial, derivatives
def getSparsePseudospectralCoefficients(self, function):

    # INPUTS
    stackOfParameters = self.uq_parameters
    indexSets = self.index_sets
    dimensions = len(stackOfParameters)
    sparse_indices, sparse_factors, not_used = IndexSet.getIndexSet(indexSets)
    rows = len(sparse_indices)
    cols = len(sparse_indices[0])

    for i in range(0, rows):
        for j in range(0, cols):
            sparse_indices[i, j] = int(sparse_indices[i, j])

    # For storage we use dictionaries
    individual_tensor_coefficients = {}
    individual_tensor_indices = {}
    points_store = {}
    indices = np.zeros((rows, 1))

    for i in range(0, rows):
        orders = sparse_indices[i, :]
        K, I, points = getPseudospectralCoefficients(self.uq_parameters,
                                                     function, orders)
        individual_tensor_indices[i] = I
        individual_tensor_coefficients[i] = K
        points_store[i] = points
        indices[i, 0] = len(I)

    sum_indices = int(np.sum(indices))
    store = np.zeros((sum_indices, dimensions + 1))
    points_saved = np.zeros((sum_indices, dimensions))
    counter = int(0)
    for i in range(0, rows):
        for j in range(0, int(indices[i][0])):
            store[counter, 0] = sparse_factors[
                i] * individual_tensor_coefficients[i][0][j]
            for d in range(0, dimensions):
                store[counter, d + 1] = individual_tensor_indices[i][j][d]
                points_saved[counter, d] = points_store[i][j][d]
            counter = counter + 1

    # Now we use a while loop to iteratively delete the repeated elements while summing up the
    # coefficients!
    index_to_pick = 0
    flag = 1
    counter = 0

    rows = len(store)

    final_store = np.zeros((sum_indices, dimensions + 1))
    while (flag != 0):

        # find the repeated indices
        rep = find_repeated_elements(index_to_pick, store)
        coefficient_value = 0.0

        # Sum up all the coefficient values
        for i in range(0, len(rep)):
            actual_index = rep[i]
            coefficient_value = coefficient_value + store[actual_index, 0]

        # Store into a new array
        final_store[counter, 0] = coefficient_value
        final_store[counter, 1::] = store[index_to_pick, 1::]
        counter = counter + 1

        # Delete index from store
        store = np.delete(store, rep, axis=0)

        # How many entries remain in store?
        rows = len(store)
        if rows == 0:
            flag = 0

    indices_to_delete = np.arange(counter, sum_indices, 1)
    final_store = np.delete(final_store, indices_to_delete, axis=0)

    # Now split final store into coefficients and their index sets!
    coefficients = np.zeros((1, len(final_store)))
    for i in range(0, len(final_store)):
        coefficients[0, i] = final_store[i, 0]

    # Splitting final_store to get the indices!
    indices = final_store[:, 1::]

    # Now just double check to make sure they are all integers
    for i in range(0, len(indices)):
        for j in range(0, dimensions):
            indices[i, j] = int(indices[i, j])

    return coefficients, indices, points_saved
def getPseudospectralCoefficients(self, function, override_orders=None):

    stackOfParameters = self.uq_parameters
    dimensions = len(stackOfParameters)
    q0 = [1]
    Q = []
    orders = []

    # If additional orders are provided, then use those!
    if override_orders is None:
        for i in range(0, dimensions):
            orders.append(stackOfParameters[i].order)
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors()
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    else:
        for i in range(0, dimensions):
            orders.append(override_orders[i])
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors(orders[i])
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    # Compute multivariate Gauss points and weights!
    if override_orders is None:
        p, w = self.getPointsAndWeights()
    else:
        p, w = self.getPointsAndWeights(override_orders)

    # Evaluate the first point to get the size of the system
    fun_value_first_point = function(p[0, :])
    u0 = q0[0, 0] * fun_value_first_point
    N = 1
    gn = int(np.prod(orders))
    Uc = np.zeros((N, gn))
    Uc[0, 1] = u0

    function_values = np.zeros((1, gn))
    for i in range(0, gn):
        function_values[0, i] = function(p[i, :])

    # Now we evaluate the solution at all the points
    for j in range(0, gn):  # 0
        Uc[0, j] = q0[0, j] * function_values[0, j]

    # Compute the corresponding tensor grid index set:
    order_correction = []
    for i in range(0, len(orders)):
        temp = orders[i] - 1
        order_correction.append(temp)

    tensor_grid_basis = IndexSet('Tensor grid', order_correction)
    tensor_set = tensor_grid_basis.getIndexSet()

    # Now we use kronmult
    K = efficient_kron_mult(Q, Uc)
    F = function_values
    K = np.column_stack(K)
    return K, tensor_set, p
class Polynomial(object):
    """
    This class defines a polynomial and its associated functions. 

    :param array of Parameters uq_parameters: A list of Parameters
    :param IndexSet index_set: An instance of the IndexSet class, in case the user wants to overwrite the indices
        that are obtained using the orders of the univariate parameters in Parameters uq_parameters. The latter 
        corresponds to a tensor grid index set and is the default option if no index_set parameter input is given.
    
    **Sample declarations** 
    ::
        >> s = Parameter(lower=-2, upper=2, param_type='Uniform', points=4)
        >> T = IndexSet('Total order', [3,3])
        >> polyObject = Polynomial([s,s],T) # basis is defined by T

        >> s = Parameter(lower=-2, upper=2, param_type='Uniform')
        >> polyObject = Polynomial([s,s]) # Tensor basis is used
    """

    # Constructor
    def __init__(self, uq_parameters, index_sets=None):

        self.uq_parameters = uq_parameters

        # Here we set the index sets if they are not provided
        if index_sets is None:
            # Determine the highest orders for a tensor grid
            highest_orders = []
            for i in range(0, len(uq_parameters)):
                highest_orders.append(uq_parameters[i].order)

            self.index_sets = IndexSet('Tensor grid', highest_orders)
        else:
            self.index_sets = index_sets

    def getIndexSet(self):
        """
        Returns the index set used for computing the multivariate polynomials

        :param Polynomial self: An instance of the Polynomial class
        :return: index_set, cardinality-by-dimension matrix which is obtained by calling the getIndexSet() routine of the IndexSet object
        :rtype: ndarray

        **Sample declaration**
        :: 
            >> s = Parameter(lower=-2, upper=2, param_type='Uniform')
            >> polyObject = Polynomial([s,s])
            >> I = polyObject.getIndexSet()
        """
        return self.index_sets.getIndexSet()

    # Do we really need additional_orders?
    def getPointsAndWeights(self, override_orders=None):
        """
        Returns the nD Gaussian quadrature points and weights based on the recurrence coefficients of each Parameter. This function
        computes anisotropic and isotropic tensor product rules using a series of Kronecker product operations on univariate Gauss 
        quadrature points and weights. For details on the univariate rules, see Parameter.getLocalQuadrature()

        :param Polynomial self: An instance of the Polynomial class
        :param array override_orders: Optional input of orders that overrides the orders defined for each Parameter.
            This functionality is used by the integrals function.
        :return: points, N-by-d matrix that contains the tensor grid Gauss quadrature points
        :rtype: ndarray
        :return: weights, 1-by-N matrix that contains the tensor grid Gauss quadrature weights
        :rtype: ndarray


        **Sample declaration**
        :: 
            >> s = Parameter(lower=-2, upper=2, param_type='Uniform')
            >> polyObject = Polynomial([s,s])
            >> p, w = polyObject.getPointsAndWeights()
        """
        # Initialize some temporary variables
        stackOfParameters = self.uq_parameters
        dimensions = int(len(stackOfParameters))

        orders = []
        if override_orders is None:
            for i in range(0, dimensions):
                orders.append(stackOfParameters[i].order)
        else:
            orders = override_orders

        # Initialize points and weights
        pp = [1.0]
        ww = [1.0]

        # number of parameters
        # For loop across each dimension
        for u in range(0, dimensions):

            # Call to get local quadrature method (for dimension 'u')
            local_points, local_weights = stackOfParameters[
                u].getLocalQuadrature(orders[u])

            # Tensor product of the weights
            ww = np.kron(ww, local_weights)

            # Tensor product of the points
            dummy_vec = np.ones((len(local_points), 1))
            dummy_vec2 = np.ones((len(pp), 1))
            left_side = np.array(np.kron(pp, dummy_vec))
            right_side = np.array(np.kron(dummy_vec2, local_points))
            pp = np.concatenate((left_side, right_side), axis=1)

        # Ignore the first column of pp
        points = pp[:, 1::]
        weights = ww

        # Now re-scale the points and return only if its not a Gaussian!
        for i in range(0, dimensions):
            for j in range(0, len(points)):
                if (stackOfParameters[i].param_type == "Uniform"):
                    points[j, i] = 0.5 * (points[j, i] + 1.0) * (
                        stackOfParameters[i].upper - stackOfParameters[i].lower
                    ) + stackOfParameters[i].lower

                elif (stackOfParameters[i].param_type == "Beta"):
                    points[j, i] = (points[j, i]) * (
                        stackOfParameters[i].upper - stackOfParameters[i].lower
                    ) + stackOfParameters[i].lower

                elif (stackOfParameters[i].param_type == "Gaussian"):
                    points[j, i] = points[j, i]  # No scaling!

        # Return tensor grid quad-points and weights
        return points, weights

    def getMultivariatePolynomial(self, stackOfPoints, indexsets=None):
        """
        Returns multivariate orthonormal polynomials and their derivatives

        :param Polynomial self: An instance of the Polynomial class
        :param: ndarray stackOfPoints: An m-by-d matrix that contains points along which the polynomials (and their derivatives) must be evaluated
            at; here m represents the total number of points across d dimensions. Note that the derivatives are only computed if the Parameters 
            have the derivative_flag set to 1.
        :return: polynomial, m-by-N matrix where m are the number of points at which the multivariate orthonormal polynomial must be evaluated at, and
            N is the cardinality of the index set used when declaring a Polynomial object.
        :rtype: ndarray
        :return: derivatives, m-by-N matrix for each cell (total cells are d) where m are the number of points at which the multivariate orthonormal polynomial must be evaluated at, and
            N is the cardinality of the index set used when declaring a Polynomial object.
        :rtype: cell object


        **Sample declaration**
        :: 
            >> s = Parameter(lower=-1, upper=1, param_type='Uniform', points=2, derivative_flag=1)
            >> uq_parameters = [s,s]
            >> uq = Polynomial(uq_parameters)
            >> pts, x1, x2 = utils.meshgrid(-1.0, 1.0, 10, 10)
            >> P , Q = uq.getMultivariatePolynomial(pts)
        """

        # "Unpack" parameters from "self"
        empty = np.mat([0])
        stackOfParameters = self.uq_parameters
        isets = self.index_sets
        if indexsets is None:
            if isets.index_set_type == 'Sparse grid':
                ic, not_used, index_set = isets.getIndexSet()
            else:
                index_set = isets.getIndexSet()
        else:
            index_set = indexsets

        dimensions = len(stackOfParameters)
        p = {}
        d = {}
        C_all = {}

        # Save time by returning if univariate!
        if dimensions == 1 and stackOfParameters[0].derivative_flag == 0:
            poly, derivatives = stackOfParameters[0].getOrthoPoly(
                stackOfPoints)
            derivatives = empty
            return poly, derivatives
        elif dimensions == 1 and stackOfParameters[0].derivative_flag == 1:
            poly, derivatives = stackOfParameters[0].getOrthoPoly(
                stackOfPoints)
            C_all[0] = derivatives
            return poly, C_all
        else:
            for i in range(0, dimensions):
                p[i], d[i] = stackOfParameters[i].getOrthoPoly(
                    stackOfPoints[:, i], int(np.max(index_set[:, i] + 1)))

        # Now we multiply components according to the index set
        no_of_points = len(stackOfPoints)
        polynomial = np.zeros((len(index_set), no_of_points))
        derivatives = np.zeros((len(index_set), no_of_points, dimensions))

        # One loop for polynomials
        for i in range(0, len(index_set)):
            temp = np.ones((1, no_of_points))
            for k in range(0, dimensions):
                polynomial[i, :] = p[k][int(index_set[i, k])] * temp
                temp = polynomial[i, :]

        # Second loop for derivatives!
        if stackOfParameters[0].derivative_flag == 1:
            P_others = np.zeros((len(index_set), no_of_points))

            # Going into for loop!
            for j in range(0, dimensions):
                # Now what are the remaining dimnensions?
                C_local = np.zeros((len(index_set), no_of_points))
                remaining_dimensions = np.arange(0, dimensions)
                remaining_dimensions = np.delete(remaining_dimensions, j)
                total_elements = remaining_dimensions.__len__

                # Now we compute the "C" matrix
                for i in range(0, len(index_set)):
                    # Temporary variable!
                    P_others = np.zeros((len(index_set), no_of_points))
                    temp = np.ones((1, no_of_points))

                    # Multiply ortho-poly components in these "remaining" dimensions
                    for k in range(0, len(remaining_dimensions)):
                        entry = remaining_dimensions[k]
                        P_others[i, :] = p[entry][int(index_set[i,
                                                                entry])] * temp
                        temp = P_others[i, :]
                        if len(remaining_dimensions
                               ) == 0:  # in which case it is emtpy!
                            C_all[i, :] = d[j][int(index_set[i, j])]
                        else:
                            C_local[i, :] = d[j][int(
                                index_set[i, j])] * P_others[i, :]
                C_all[j] = C_local
                del C_local
            return polynomial, C_all
        empty = np.mat([0])
        return polynomial, empty

    def getPolynomialCoefficients(self, function):
        """
        Returns multivariate orthonormal polynomial coefficients. Depending on the choice of the index set, this function will either return a tensor grid
        of pseudospectral coefficients or a sparse grid using the SPAM technique by Constantine et al (2012). 
    
        :param Polynomial self: An instance of the Polynomial class
        :param: callable function: The function that needs to be approximated (or interpolated)
        :return: coefficients: The pseudospectral coefficients
        :rtype: ndarray
        :return: indexset: The indices used for the pseudospectral computation
        :rtype: ndarray
        :return: evaled_pts: The points at which the function was evaluated
        :rtype: ndarray

        """
        # Method to compute the coefficients
        method = self.index_sets.index_set_type
        # Get the right polynomial coefficients
        if method == "Tensor grid":
            coefficients, indexset, evaled_pts = getPseudospectralCoefficients(
                self, function)
        if method == "Sparse grid":
            coefficients, indexset, evaled_pts = getSparsePseudospectralCoefficients(
                self, function)
        else:
            coefficients, indexset, evaled_pts = getPseudospectralCoefficients(
                self, function)
        return coefficients, indexset, evaled_pts

    def getPolynomialApproximation(self,
                                   function,
                                   plotting_pts,
                                   coefficients=None,
                                   indexset=None):
        """
        Returns the polynomial approximation of a function. This routine effectively multiplies the coefficients of a polynomial
        expansion with its corresponding basis polynomials. 
    
        :param Polynomial self: An instance of the Polynomial class
        :param: callable function: The function that needs to be approximated (or interpolated)
        :param: ndarray plotting_pts: The points at which the polynomial approximation should be evaluated at
        :return: polyapprox: The polynomial expansion of a function
        :rtype: numpy matrix

        """
        # Check to see if we need to call the coefficients
        if coefficients is None or indexset is None:
            coefficients, indexset, evaled_pts = self.getPolynomialCoefficients(
                function)

        P, Q = self.getMultivariatePolynomial(plotting_pts, indexset)
        P = np.mat(P)
        C = np.mat(coefficients)
        polyapprox = P.T * C
        return polyapprox

    def getPDF(self, function, graph=1, coefficients=None, indexset=None):
        """
        Returns the PDF of the model output. This routine effectively multiplies the coefficients of a polynomial
        expansion with its corresponding basis polynomials. 
    
        :param Polynomial self: An instance of the Polynomial class
        :param: callable function: The function that needs to be approximated (or interpolated)
        :return: polyapprox: The polynomial expansion of a function
        :rtype: numpy matrix

        """
        dimensions = len(self.uq_parameters)

        # Check to see if we need to call the coefficients
        if coefficients is None or indexset is None:
            coefficients, indexset, evaled_pts = self.getPolynomialCoefficients(
                function)

        # For each UQ parameter in self, store the samples
        number_of_samples = 50000  # default value!
        plotting_pts = np.zeros((number_of_samples, dimensions))
        for i in range(0, dimensions):
            univariate_samples = self.uq_parameters[i].getSamples(
                number_of_samples)
            for j in range(0, number_of_samples):
                plotting_pts[j, i] = univariate_samples[j]

        P, Q = self.getMultivariatePolynomial(plotting_pts, indexset)
        P = np.mat(P)
        C = np.mat(coefficients)
        polyapprox = P.T * C

        if graph is not None:
            fig = plt.figure()
            n, bins, patches = plt.hist(polyapprox,
                                        30,
                                        normed=1,
                                        facecolor='red',
                                        alpha=0.75)
            plt.xlabel('f(x)')
            plt.ylabel('PDF')
            plt.xlim(np.min(polyapprox) - 0.25, np.max(polyapprox) + 0.25)
            #plt.savefig('file.png', format='png', dpi=800)
            plt.show()

        return polyapprox
示例#13
0
文件: adapt.py 项目: rdwight/smobol
def sobol_adaptive(f, dim, S_cutoff=0.95, max_samples=200, max_iters=10,
                   max_level=10, max_k=10,
                   fval=None, plotting=False, labels=None):
    """
    Sobol-based dimension-adaptive sparse grids.
      f           - function to sample
      dim         - number of input variables/dimensions
      S_cutoff    - Sobol index cutoff, adapt dimensions with Sobol indices
                    adding up to the cutoff
      max_samples - termination criteria, maximum allowed samples of f
      max_iters   - termination criteria, maximum adaptation iterations
      max_level   - maximum level allowed in any single variable, ie. don't allow
                    very-high resolution in any direction. Enforce
                    max(multiindex) <= max_level
      max_k       - enforce |multiindex|_1 <= dim + max_k - 1, ie. constrain
                    to simplex-rule of level max_k
      fval        - if samples of f already exist at sparse-grid nodes, pass
                    dictionary containing values - these will be used first
    The iteration will also terminate if the grid is unchanged after an 
    iteration.
    """
                                        # Initialize with simplex level 2, ie.
                                        # one-factor, 3 points in each direction
    K = IndexSet(dim=dim, level=2, fill='simplex')
    quad = QuadratureCC()
    sp = SparseGrid(dim, quad, indexset=K)
    iter = 1
    fval = {} if fval is None else fval # Dictionary of function values
                                        # Main adaptation loop 
    while sp.n_nodes() <= max_samples and iter <= max_iters:
                                        # Sampling call, don't recompute already
                                        # known values
        sp.sample_fn(f, fargs=(), fval=fval)
        print('Iter', iter, '='*100)
        print('sp.n_nodes() =', sp.n_nodes())
        if plotting:
            sp.plot(outfile='tmp/sobol_adapt_iter%d.pdf'%iter, labels=labels)
                                        # 2. Compute Sobol indices, up to maximum
                                        # interaction defined by the current K
        D, mu, var = sp.compute_sobol_variances(fval, 
                                                cardinality=K.max_interaction(), 
                                                levelrefine=2)
        del D[()]                       # Remove variance (==var) 

        print('# %6d %12.6e %12.6e' % (sp.n_nodes(), mu, var))
        ### ------------------------------------------- RESULT <==
                                        # 3. Interaction selection
                                        # Sort according to variance large->small
        print('D =', D)
        Dsort = sorted(D.iteritems(), key=lambda (k,v): -v)
        print('Dsort =', Dsort)
        print('var =', var)
        sobol_total,i,U = 0.,0,set([])  # Select most important interactions
        while sobol_total < S_cutoff and i < len(Dsort):
            sobol_total += Dsort[i][1] / var
            U |= set([Dsort[i][0]])
            i += 1
        print('U =', U)
                                        # 4. Interaction augmentation
                                        # Find set of potential *new*
                                        # interactions present in active set
        A = K.activeset()
        potential_interactions = set([interaction(a) for a in A]) - \
                                 K.interactions()
        print('A = ', A)
        print('potential_interactions =', potential_interactions)
                                        # Select potential new interactions 
                                        # satisfying 
        Uplus = set([])
        for interac in potential_interactions:
            all_subsets = set([])
            for r in range(1, len(interac)):
                all_subsets |= set(itertools.combinations(interac, r))
            if all_subsets <= U:
                Uplus |= set([interac])
        U |= Uplus
        print('Uplus =', Uplus)
        print('new U =', U)
                                        # 5. Indexset extension - new sparse grid
        unchanged = True
        for a in A:
            if np.sum(a) > max_k+dim-1: # Enforce simplex-constraint on indexset
                continue
            if np.max(a) > max_level:   # Enforce maximum level constraint
                continue
            if interaction(a) in U:
                unchanged = False
                K.I |= set([a])
        if unchanged:
            print('No new multi-indices added to index-set, terminate adapation')
            break
        print('K.I =', K.I)
        sp.set_indexset(K)
        iter += 1
示例#14
0
文件: adapt.py 项目: rdwight/smobol
def gerstnerandgriebel_adaptive(f, dim, max_samples=200, max_iters=10,
                                min_error=1.e-16, max_level=10, max_k=10,
                                fval=None, plotting=False, labels=None):
    """
    Gerstner+Griebel style dimension-adaptive sparse grids.
      f           - function to sample
      dim         - number of input variables/dimensions
      max_samples - termination criteria, maximum allowed samples of f
      max_iters   - termination criteria, maximum adaptation iterations
      max_level   - maximum level allowed in any single variable, ie. don't allow
                    very-high resolution in any direction. Enforce
                    max(multiindex) <= max_level
      max_k       - enforce |multiindex|_1 <= dim + max_k - 1, ie. constrain
                    to simplex-rule of level max_k
      fval        - if samples of f already exist at sparse-grid nodes, pass
                    dictionary containing values - these will be used first
    """
                                        # Initialize with simplex level 1
    K = IndexSet(dim=dim, level=1, fill='simplex')
    quad = QuadratureCC()
    sp = SparseGrid(dim, quad, indexset=K)
    iter, eta = 1, 1e100
    fval = {} if fval is None else fval # Dictionary of function values
                                        # Main adaptation loop 
    while sp.n_nodes() <= max_samples and iter <= max_iters and eta > min_error:
                                        # Sampling call, don't recompute already
                                        # known values
        sp.sample_fn(f, fargs=(), fval=fval)
        print('Iter', iter, '='*100)
        print('sp.n_nodes() =', sp.n_nodes())
        if plotting:
            sp.plot(outfile='tmp/GandG_adapt_iter%d.png'%iter, labels=labels)

        r = sp.integrate(fval)
        print('r =', r)
                                        # For each member of the active set,
                                        # compute the difference between the 
                                        # objective, with and without that member
        A = K.activeset()
        g = {}
        for a in A:
            if np.sum(a) > max_k+dim-1: # Enforce simplex-constraint on indexset
                continue
            if np.max(a) > max_level:   # Enforce maximum level constraint
                continue
            Kmod = copy.deepcopy(K)
            Kmod.I |= set([a])
            sp.set_indexset(Kmod)
            sp.sample_fn(f, fargs=(), fval=fval)
            rmod = sp.integrate(fval)
            g[a] = abs(rmod - r)            
        if len(g) == 0:
            print('No new multi-indices added to index-set, terminate adapation')
            break
        a_adapt = max(g, key=g.get)
        eta = sum(g.values())
        print('eta =',eta)
        K.I |= set([a_adapt])
        sp.set_indexset(K)
        iter += 1

    return r