def __init__(self, uq_parameters, index_set=None, method=None):
        self.uq_parameters = uq_parameters
        dimensions = len(uq_parameters)

        # For increased flexibility, if the index_set is not given, we will assume a tensor grid basis
        if index_set is None:

            # determine the orders!
            orders_to_use = []
            for u in range(0, dimensions):
                orders_to_use.append(np.int(uq_parameters[u].order - 1))

            # Use the tensor grid option!
            self.index_set = IndexSet("Tensor grid", orders_to_use)

        else:
            # Now before we set self.index_set = index_set, we check to make sure that
            # the number of basis used is -1 the number of points!
            orders_to_use = []
            count = 0
            for u in range(0, dimensions):
                orders_to_use.append(np.int(uq_parameters[u].order))
                if orders_to_use[u] <= index_set.orders[u]:
                    count = count + 1
            if count > 0:
                error_function(
                    'IndexSet: Basis orders: Ensure that the basis order is always -1 the number of points!'
                )

            self.index_set = index_set

        if method is not None:
            self.method = method
        else:
            self.method = 'QR'
示例#2
0
文件: sparse.py 项目: rdwight/smobol
 def __init__(self, dim, quad, indexset=None, level=3, fill='simplex'):
     """
     Initialize either with a standard indexset (specify level and fill), or
     a custom indexset (specify indexset).  
     """
     self.dim = dim
     self.quad = quad  # Init 1d rule
     # Init description of sparse pattern
     self.set_indexset(indexset if indexset else IndexSet(
         dim=dim, level=level, fill=fill))
def tensorgrid(stackOfParameters, function=None):
    """
    Computes a tensor grid of quadrature points based on the distributions for each Parameter in stackOfParameters 

    :param Parameter array stackOfParameters: A list of Parameter objects
    :param callable function: The function whose integral needs to be computed. Can also be input as an array of function values at the
        quadrature points. If the function is given as a callable, then this routine outputs the integral of the function and an array of
        the points at which the function was evaluated at to estimate the integral. These are the quadrature points. In case the function is
        not given as a callable (or an array, for that matter), then this function outputs the quadrature points and weights. 
      
    :return: tensor_int: The tensor grid approximation of the integral
    :rtype: double
    :return: points:  The quadrature points
    :rtype: numpy ndarray
    :return: weights: The quadrature weights
    :rtype: numpy ndarray

    **Notes**
    For further details on this routine, see Polynomial.getPointsAndWeights()

    """
    # Determine the index set to be used!
    dimensions = len(stackOfParameters)
    orders = []
    flags = []
    uniform = 1
    not_uniform = 0
    for i in range(0, dimensions):
        orders.append(stackOfParameters[i].order)
        if stackOfParameters[i].param_type is 'Uniform':
            flags.append(uniform)
        else:
            flags.append(not_uniform)

    tensor = IndexSet('Tensor grid', orders)
    polyObject = Polynomial(stackOfParameters, tensor)

    # Now compute the points and weights
    points, weights = polyObject.getPointsAndWeights()

    # For normalizing!
    for i in range(0, dimensions):
        if flags[i] == 0:
            weights = weights
        elif flags[i] == 1:
            weights = weights * (stackOfParameters[i].upper -
                                 stackOfParameters[i].lower)
            weights = weights / (2.0)

    # Now if the function is a callable, then we can compute the integral:
    if function is not None and callable(function):
        tensor_int = np.mat(weights) * evalfunction(points, function)
        return tensor_int, points
    else:
        return points, weights
def effectivequadratures(stackOfParameters, q_parameter, function):
    """
    Computes an approximation of the integral using effective-quadratures; this routine uses least squares to estimate the integral.

    :param Parameter array stackOfParameters: A list of Parameter objects
    :param double q_parameter: By default, this routine uses a hyperbolic polynomial basis where the q_parameter (value between 0.1 and 1.0) adjusts the number
        of basis terms to be used for solving the least squares problem.
    :param callable function: The function whose integral needs to be computed. Can also be input as an array of function values at the
        quadrature points. The function must be provided either as a callable or an array of values for this routine to work. 
      
    :return: integral_esq: The effective quadratures approximation of the integral
    :rtype: double
    :return: points:  The quadrature points
    :rtype: numpy ndarray

    """

    # Determine the index set to be used!
    dimensions = len(stackOfParameters)
    orders = []
    flags = []
    uniform = 1
    not_uniform = 0
    for i in range(0, dimensions):
        orders.append(int(stackOfParameters[i].order - 1))
        if stackOfParameters[i].param_type is 'Uniform':
            flags.append(uniform)
        else:
            flags.append(not_uniform)

    # Define the hyperbolic cross
    hyperbolic = IndexSet('Hyperbolic basis', orders=orders, q=q_parameter)
    maximum_number_of_evals = hyperbolic.getCardinality()
    effectiveQuads = EffectiveSubsampling(stackOfParameters, hyperbolic)

    # Call the effective subsampling object
    points = effectiveQuads.getEffectivelySubsampledPoints(
        maximum_number_of_evals)
    xn = effectiveQuads.solveLeastSquares(maximum_number_of_evals, function)
    integral_esq = xn[0]

    # For normalizing!
    for i in range(0, dimensions):
        if flags[i] == 0:
            integral_esq = integral_esq
        elif flags[i] == 1:
            integral_esq = integral_esq * (stackOfParameters[i].upper -
                                           stackOfParameters[i].lower)

    return integral_esq[0], points
    def __init__(self, uq_parameters, index_sets=None):

        self.uq_parameters = uq_parameters

        # Here we set the index sets if they are not provided
        if index_sets is None:
            # Determine the highest orders for a tensor grid
            highest_orders = []
            for i in range(0, len(uq_parameters)):
                highest_orders.append(uq_parameters[i].order)

            self.index_sets = IndexSet('Tensor grid', highest_orders)
        else:
            self.index_sets = index_sets
def sparsegrid(stackOfParameters, level, growth_rule, function=None):
    """
    Computes a sparse grid of quadrature points based on the distributions for each Parameter in stackOfParameters 

    :param Parameter array stackOfParameters: A list of Parameter objects
    :param integer level: Level parameter of the sparse grid integration rule
    :param string growth_rule: Growth rule for the sparse grid. Choose from 'linear' or 'exponential'.
    :param callable function: The function whose integral needs to be computed. Can also be input as an array of function values at the
        quadrature points. If the function is given as a callable, then this routine outputs the integral of the function and an array of
        the points at which the function was evaluated at to estimate the integral. These are the quadrature points. In case the function is
        not given as a callable (or an array, for that matter), then this function outputs the quadrature points and weights. 
      
    :return: sparse_int: The sparse grid approximation of the integral
    :rtype: double
    :return: points:  The quadrature points
    :rtype: numpy ndarray
    :return: weights: The quadrature weights
    :rtype: numpy ndarray

    """
    # Determine the index set to be used!
    dimensions = len(stackOfParameters)
    orders = []
    flags = []
    uniform = 1
    not_uniform = 0
    for i in range(0, dimensions):
        orders.append(stackOfParameters[i].order)
        if stackOfParameters[i].param_type is 'Uniform':
            flags.append(uniform)
        else:
            flags.append(not_uniform)

    # Call the sparse grid index set
    sparse = IndexSet('Sparse grid',
                      level=level,
                      growth_rule=growth_rule,
                      dimension=dimensions)
    sparse_index, sparse_coeffs, sparse_all_elements = sparse.getIndexSet()

    # Get this into an array
    rows = len(sparse_index)
    orders = np.zeros((rows, dimensions))
    points_store = []
    weights_store = []
    factor = 1

    # Now get the tensor grid for each sparse_index
    for i in range(0, rows):

        # loop through the dimensions
        for j in range(0, dimensions):
            orders[i, j] = np.array(sparse_index[i][j])

        # points and weights for each order~
        tensor = IndexSet('Tensor grid', orders)
        polyObject = Polynomial(stackOfParameters, tensor)
        points, weights = polyObject.getPointsAndWeights(orders[i, :])

        # Multiply weights by constant 'a':
        weights = weights * sparse_coeffs[i]

        # Now store point sets ---> scratch this, use append instead!!!!
        for k in range(0, len(points)):
            points_store = np.append(points_store, points[k, :], axis=0)
            weights_store = np.append(weights_store, weights[k])

    dims1 = int(len(points_store) / dimensions)
    points_store = np.reshape(points_store, (dims1, dimensions))

    # For normalizing!
    for i in range(0, dimensions):
        if flags[i] == 0:
            weights_store = weights_store
        elif flags[i] == 1:
            weights_store = weights_store * (stackOfParameters[i].upper -
                                             stackOfParameters[i].lower)
            weights_store = weights_store / (2.0)

    # Now if the function is a callable, then we can compute the integral:
    if function is not None and callable(function):
        sparse_int = np.mat(weights_store) * evalfunction(
            points_store, function)
        point_store = removeDuplicates(points_store)
        return sparse_int, points_store
    else:
        point_store = removeDuplicates(points_store)
        return points_store, weights_store
def getPseudospectralCoefficients(stackOfParameters,
                                  function,
                                  additional_orders=None):

    dimensions = len(stackOfParameters)
    q0 = [1]
    Q = []
    orders = []

    # If additional orders are provided, then use those!
    if additional_orders is None:
        for i in range(0, dimensions):
            orders.append(stackOfParameters[i].order)
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors()
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    else:
        print 'Using custom coefficients!'
        for i in range(0, dimensions):
            orders.append(additional_orders[i])
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors(orders[i])
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    # Compute multivariate Gauss points and weights
    p, w = getGaussianQuadrature(stackOfParameters, orders)

    # Evaluate the first point to get the size of the system
    fun_value_first_point = function(p[0, :])
    u0 = q0[0, 0] * fun_value_first_point
    N = 1
    gn = int(np.prod(orders))
    Uc = np.zeros((N, gn))
    Uc[0, 1] = u0

    function_values = np.zeros((1, gn))
    for i in range(0, gn):
        function_values[0, i] = function(p[i, :])

    # Now we evaluate the solution at all the points
    for j in range(0, gn):  # 0
        Uc[0, j] = q0[0, j] * function_values[0, j]

    # Compute the corresponding tensor grid index set:
    order_correction = []
    for i in range(0, len(orders)):
        temp = orders[i] - 1
        order_correction.append(temp)

    tensor_grid_basis = IndexSet("tensor grid", order_correction)
    tensor_set = tensor_grid_basis.getIndexSet()

    # Now we use kronmult
    K = efficient_kron_mult(Q, Uc)
    F = function_values
    K = np.column_stack(K)
    return K, tensor_set, p

    def getPolynomialApproximation(self, function, plotting_pts):

        # Get the right polynomial coefficients
        if self.method == "tensor grid" or self.method == "Tensor grid":
            coefficients, indexset, evaled_pts = getPseudospectralCoefficients(
                self.uq_parameters, function)
        if self.method == "spam" or self.method == "Spam":
            coefficients, indexset, evaled_pts = getSparsePseudospectralCoefficients(
                self, function)
        if self.method == "sparse grid" or self.method == "Sparse grid":
            print('WARNING: Use spam as a method instead!')
            coefficients, indexset, evaled_pts = getSparseCoefficientsViaIntegration(
                self, function)

        P = getMultiOrthoPoly(self, plotting_pts, indexset)
        PolyApprox = np.mat(coefficients) * np.mat(P)
        return PolyApprox, evaled_pts

    def getMultivariatePolynomial(self, stackOfPoints):

        # "Unpack" parameters from "self"
        stackOfParameters = self.uq_parameters
        isets = self.index_sets
        index_set = isets.getIndexSet()
        dimensions = len(stackOfParameters)
        p = {}
        d = {}

        # Save time by returning if univariate!
        if (dimensions == 1):
            poly, derivatives = stackOfParameters[0].getOrthoPoly(
                stackOfPoints)
            return poly, derivatives
        else:
            for i in range(0, dimensions):
                G, D = stackOfParameters[i].getOrthoPoly(
                    stackOfPoints[:, i], int(np.max(index_set[:, i] + 1)))
                p[i] = G
                d[i] = D

        # Now we multiply components according to the index set
        no_of_points = len(stackOfPoints)
        polynomial = np.zeros((len(index_set), no_of_points))
        derivatives = np.zeros((len(index_set), no_of_points, dimensions))

        # One loop for polynomials
        for i in range(0, len(index_set)):
            temp = np.ones((1, no_of_points))
            for k in range(0, dimensions):
                polynomial[i, :] = p[k][0][int(index_set[i, k])] * temp
                temp = polynomial[i, :]

        # Second loop for derivatives!
        if stackOfParameters[0].derivative_flag == 1:
            print 'WIP'

        return polynomial, derivatives
def getPseudospectralCoefficients(self, function, override_orders=None):

    stackOfParameters = self.uq_parameters
    dimensions = len(stackOfParameters)
    q0 = [1]
    Q = []
    orders = []

    # If additional orders are provided, then use those!
    if override_orders is None:
        for i in range(0, dimensions):
            orders.append(stackOfParameters[i].order)
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors()
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    else:
        for i in range(0, dimensions):
            orders.append(override_orders[i])
            Qmatrix = stackOfParameters[i].getJacobiEigenvectors(orders[i])
            Q.append(Qmatrix)

            if orders[i] == 1:
                q0 = np.kron(q0, Qmatrix)
            else:
                q0 = np.kron(q0, Qmatrix[0, :])

    # Compute multivariate Gauss points and weights!
    if override_orders is None:
        p, w = self.getPointsAndWeights()
    else:
        p, w = self.getPointsAndWeights(override_orders)

    # Evaluate the first point to get the size of the system
    fun_value_first_point = function(p[0, :])
    u0 = q0[0, 0] * fun_value_first_point
    N = 1
    gn = int(np.prod(orders))
    Uc = np.zeros((N, gn))
    Uc[0, 1] = u0

    function_values = np.zeros((1, gn))
    for i in range(0, gn):
        function_values[0, i] = function(p[i, :])

    # Now we evaluate the solution at all the points
    for j in range(0, gn):  # 0
        Uc[0, j] = q0[0, j] * function_values[0, j]

    # Compute the corresponding tensor grid index set:
    order_correction = []
    for i in range(0, len(orders)):
        temp = orders[i] - 1
        order_correction.append(temp)

    tensor_grid_basis = IndexSet('Tensor grid', order_correction)
    tensor_set = tensor_grid_basis.getIndexSet()

    # Now we use kronmult
    K = efficient_kron_mult(Q, Uc)
    F = function_values
    K = np.column_stack(K)
    return K, tensor_set, p
示例#9
0
文件: adapt.py 项目: rdwight/smobol
def sobol_adaptive(f, dim, S_cutoff=0.95, max_samples=200, max_iters=10,
                   max_level=10, max_k=10,
                   fval=None, plotting=False, labels=None):
    """
    Sobol-based dimension-adaptive sparse grids.
      f           - function to sample
      dim         - number of input variables/dimensions
      S_cutoff    - Sobol index cutoff, adapt dimensions with Sobol indices
                    adding up to the cutoff
      max_samples - termination criteria, maximum allowed samples of f
      max_iters   - termination criteria, maximum adaptation iterations
      max_level   - maximum level allowed in any single variable, ie. don't allow
                    very-high resolution in any direction. Enforce
                    max(multiindex) <= max_level
      max_k       - enforce |multiindex|_1 <= dim + max_k - 1, ie. constrain
                    to simplex-rule of level max_k
      fval        - if samples of f already exist at sparse-grid nodes, pass
                    dictionary containing values - these will be used first
    The iteration will also terminate if the grid is unchanged after an 
    iteration.
    """
                                        # Initialize with simplex level 2, ie.
                                        # one-factor, 3 points in each direction
    K = IndexSet(dim=dim, level=2, fill='simplex')
    quad = QuadratureCC()
    sp = SparseGrid(dim, quad, indexset=K)
    iter = 1
    fval = {} if fval is None else fval # Dictionary of function values
                                        # Main adaptation loop 
    while sp.n_nodes() <= max_samples and iter <= max_iters:
                                        # Sampling call, don't recompute already
                                        # known values
        sp.sample_fn(f, fargs=(), fval=fval)
        print('Iter', iter, '='*100)
        print('sp.n_nodes() =', sp.n_nodes())
        if plotting:
            sp.plot(outfile='tmp/sobol_adapt_iter%d.pdf'%iter, labels=labels)
                                        # 2. Compute Sobol indices, up to maximum
                                        # interaction defined by the current K
        D, mu, var = sp.compute_sobol_variances(fval, 
                                                cardinality=K.max_interaction(), 
                                                levelrefine=2)
        del D[()]                       # Remove variance (==var) 

        print('# %6d %12.6e %12.6e' % (sp.n_nodes(), mu, var))
        ### ------------------------------------------- RESULT <==
                                        # 3. Interaction selection
                                        # Sort according to variance large->small
        print('D =', D)
        Dsort = sorted(D.iteritems(), key=lambda (k,v): -v)
        print('Dsort =', Dsort)
        print('var =', var)
        sobol_total,i,U = 0.,0,set([])  # Select most important interactions
        while sobol_total < S_cutoff and i < len(Dsort):
            sobol_total += Dsort[i][1] / var
            U |= set([Dsort[i][0]])
            i += 1
        print('U =', U)
                                        # 4. Interaction augmentation
                                        # Find set of potential *new*
                                        # interactions present in active set
        A = K.activeset()
        potential_interactions = set([interaction(a) for a in A]) - \
                                 K.interactions()
        print('A = ', A)
        print('potential_interactions =', potential_interactions)
                                        # Select potential new interactions 
                                        # satisfying 
        Uplus = set([])
        for interac in potential_interactions:
            all_subsets = set([])
            for r in range(1, len(interac)):
                all_subsets |= set(itertools.combinations(interac, r))
            if all_subsets <= U:
                Uplus |= set([interac])
        U |= Uplus
        print('Uplus =', Uplus)
        print('new U =', U)
                                        # 5. Indexset extension - new sparse grid
        unchanged = True
        for a in A:
            if np.sum(a) > max_k+dim-1: # Enforce simplex-constraint on indexset
                continue
            if np.max(a) > max_level:   # Enforce maximum level constraint
                continue
            if interaction(a) in U:
                unchanged = False
                K.I |= set([a])
        if unchanged:
            print('No new multi-indices added to index-set, terminate adapation')
            break
        print('K.I =', K.I)
        sp.set_indexset(K)
        iter += 1
示例#10
0
文件: adapt.py 项目: rdwight/smobol
def gerstnerandgriebel_adaptive(f, dim, max_samples=200, max_iters=10,
                                min_error=1.e-16, max_level=10, max_k=10,
                                fval=None, plotting=False, labels=None):
    """
    Gerstner+Griebel style dimension-adaptive sparse grids.
      f           - function to sample
      dim         - number of input variables/dimensions
      max_samples - termination criteria, maximum allowed samples of f
      max_iters   - termination criteria, maximum adaptation iterations
      max_level   - maximum level allowed in any single variable, ie. don't allow
                    very-high resolution in any direction. Enforce
                    max(multiindex) <= max_level
      max_k       - enforce |multiindex|_1 <= dim + max_k - 1, ie. constrain
                    to simplex-rule of level max_k
      fval        - if samples of f already exist at sparse-grid nodes, pass
                    dictionary containing values - these will be used first
    """
                                        # Initialize with simplex level 1
    K = IndexSet(dim=dim, level=1, fill='simplex')
    quad = QuadratureCC()
    sp = SparseGrid(dim, quad, indexset=K)
    iter, eta = 1, 1e100
    fval = {} if fval is None else fval # Dictionary of function values
                                        # Main adaptation loop 
    while sp.n_nodes() <= max_samples and iter <= max_iters and eta > min_error:
                                        # Sampling call, don't recompute already
                                        # known values
        sp.sample_fn(f, fargs=(), fval=fval)
        print('Iter', iter, '='*100)
        print('sp.n_nodes() =', sp.n_nodes())
        if plotting:
            sp.plot(outfile='tmp/GandG_adapt_iter%d.png'%iter, labels=labels)

        r = sp.integrate(fval)
        print('r =', r)
                                        # For each member of the active set,
                                        # compute the difference between the 
                                        # objective, with and without that member
        A = K.activeset()
        g = {}
        for a in A:
            if np.sum(a) > max_k+dim-1: # Enforce simplex-constraint on indexset
                continue
            if np.max(a) > max_level:   # Enforce maximum level constraint
                continue
            Kmod = copy.deepcopy(K)
            Kmod.I |= set([a])
            sp.set_indexset(Kmod)
            sp.sample_fn(f, fargs=(), fval=fval)
            rmod = sp.integrate(fval)
            g[a] = abs(rmod - r)            
        if len(g) == 0:
            print('No new multi-indices added to index-set, terminate adapation')
            break
        a_adapt = max(g, key=g.get)
        eta = sum(g.values())
        print('eta =',eta)
        K.I |= set([a_adapt])
        sp.set_indexset(K)
        iter += 1

    return r