def levelset_adapt(): ls = sparse.LevelSet(2, 1, fill='none') ls.I.append(np.array((2, 1))) ls.I.append(np.array((3, 1))) ls.I.append(np.array((4, 1))) ls.I.append(np.array((5, 1))) ls.I.append(np.array((1, 2))) ls.I.append(np.array((1, 3))) ls.I.append(np.array((2, 2))) ls.O = set(range(8)) ls.A = set([]) sp = sparse.SparseGrid(2, QuadratureCC(), levelset=ls) sp.plot('sparse_CC_adapt.pdf')
def sparse_nodes(dim, level, fill='simplex', quadrature=QuadratureCC()): """ Extract nodes and weights of a sparse rule of given dimension (dim), level and fill type. Return: sp - the SparseGrid class midx - list of node multi-indices, needed for constructing dictionary needed for calling SparseGrid.integrate() x - list of node locations w - list of weights """ sp = SparseGrid(dim, quadrature, level=level, fill=fill) xval, wval = sp.get_nodes(), sp.get_weights() # Python docs: "If keys(), values() are # called with no intervening # modifications to the dictionary, the # lists will directly correspond." # This is relied upon here. midx, x = xval.keys(), xval.values() # Guarantee weights are in the same order w = np.zeros(len(midx)) # as the nodes and multi-indices. for i, mi in enumerate(midx): w[i] = wval[mi] return sp, midx, x, w
def sobol_adaptive(f, dim, S_cutoff=0.95, max_samples=200, max_iters=10, max_level=10, max_k=10, fval=None, plotting=False, labels=None): """ Sobol-based dimension-adaptive sparse grids. f - function to sample dim - number of input variables/dimensions S_cutoff - Sobol index cutoff, adapt dimensions with Sobol indices adding up to the cutoff max_samples - termination criteria, maximum allowed samples of f max_iters - termination criteria, maximum adaptation iterations max_level - maximum level allowed in any single variable, ie. don't allow very-high resolution in any direction. Enforce max(multiindex) <= max_level max_k - enforce |multiindex|_1 <= dim + max_k - 1, ie. constrain to simplex-rule of level max_k fval - if samples of f already exist at sparse-grid nodes, pass dictionary containing values - these will be used first The iteration will also terminate if the grid is unchanged after an iteration. """ # Initialize with simplex level 2, ie. # one-factor, 3 points in each direction K = IndexSet(dim=dim, level=2, fill='simplex') quad = QuadratureCC() sp = SparseGrid(dim, quad, indexset=K) iter = 1 fval = {} if fval is None else fval # Dictionary of function values # Main adaptation loop while sp.n_nodes() <= max_samples and iter <= max_iters: # Sampling call, don't recompute already # known values sp.sample_fn(f, fargs=(), fval=fval) print('Iter', iter, '='*100) print('sp.n_nodes() =', sp.n_nodes()) if plotting: sp.plot(outfile='tmp/sobol_adapt_iter%d.pdf'%iter, labels=labels) # 2. Compute Sobol indices, up to maximum # interaction defined by the current K D, mu, var = sp.compute_sobol_variances(fval, cardinality=K.max_interaction(), levelrefine=2) del D[()] # Remove variance (==var) print('# %6d %12.6e %12.6e' % (sp.n_nodes(), mu, var)) ### ------------------------------------------- RESULT <== # 3. Interaction selection # Sort according to variance large->small print('D =', D) Dsort = sorted(D.iteritems(), key=lambda (k,v): -v) print('Dsort =', Dsort) print('var =', var) sobol_total,i,U = 0.,0,set([]) # Select most important interactions while sobol_total < S_cutoff and i < len(Dsort): sobol_total += Dsort[i][1] / var U |= set([Dsort[i][0]]) i += 1 print('U =', U) # 4. Interaction augmentation # Find set of potential *new* # interactions present in active set A = K.activeset() potential_interactions = set([interaction(a) for a in A]) - \ K.interactions() print('A = ', A) print('potential_interactions =', potential_interactions) # Select potential new interactions # satisfying Uplus = set([]) for interac in potential_interactions: all_subsets = set([]) for r in range(1, len(interac)): all_subsets |= set(itertools.combinations(interac, r)) if all_subsets <= U: Uplus |= set([interac]) U |= Uplus print('Uplus =', Uplus) print('new U =', U) # 5. Indexset extension - new sparse grid unchanged = True for a in A: if np.sum(a) > max_k+dim-1: # Enforce simplex-constraint on indexset continue if np.max(a) > max_level: # Enforce maximum level constraint continue if interaction(a) in U: unchanged = False K.I |= set([a]) if unchanged: print('No new multi-indices added to index-set, terminate adapation') break print('K.I =', K.I) sp.set_indexset(K) iter += 1
def gerstnerandgriebel_adaptive(f, dim, max_samples=200, max_iters=10, min_error=1.e-16, max_level=10, max_k=10, fval=None, plotting=False, labels=None): """ Gerstner+Griebel style dimension-adaptive sparse grids. f - function to sample dim - number of input variables/dimensions max_samples - termination criteria, maximum allowed samples of f max_iters - termination criteria, maximum adaptation iterations max_level - maximum level allowed in any single variable, ie. don't allow very-high resolution in any direction. Enforce max(multiindex) <= max_level max_k - enforce |multiindex|_1 <= dim + max_k - 1, ie. constrain to simplex-rule of level max_k fval - if samples of f already exist at sparse-grid nodes, pass dictionary containing values - these will be used first """ # Initialize with simplex level 1 K = IndexSet(dim=dim, level=1, fill='simplex') quad = QuadratureCC() sp = SparseGrid(dim, quad, indexset=K) iter, eta = 1, 1e100 fval = {} if fval is None else fval # Dictionary of function values # Main adaptation loop while sp.n_nodes() <= max_samples and iter <= max_iters and eta > min_error: # Sampling call, don't recompute already # known values sp.sample_fn(f, fargs=(), fval=fval) print('Iter', iter, '='*100) print('sp.n_nodes() =', sp.n_nodes()) if plotting: sp.plot(outfile='tmp/GandG_adapt_iter%d.png'%iter, labels=labels) r = sp.integrate(fval) print('r =', r) # For each member of the active set, # compute the difference between the # objective, with and without that member A = K.activeset() g = {} for a in A: if np.sum(a) > max_k+dim-1: # Enforce simplex-constraint on indexset continue if np.max(a) > max_level: # Enforce maximum level constraint continue Kmod = copy.deepcopy(K) Kmod.I |= set([a]) sp.set_indexset(Kmod) sp.sample_fn(f, fargs=(), fval=fval) rmod = sp.integrate(fval) g[a] = abs(rmod - r) if len(g) == 0: print('No new multi-indices added to index-set, terminate adapation') break a_adapt = max(g, key=g.get) eta = sum(g.values()) print('eta =',eta) K.I |= set([a_adapt]) sp.set_indexset(K) iter += 1 return r
def heavygas_barrier(): """ TNO heavy-gas (propane) transport with barrier wall. Stijn's case. Inputs: U_{ABL}[m/s], U_{rel}[m/s], T_{rel}[m/s] Output: Effect-distance[m] """ ninput = 3 # First 3 columns inputs, last col output data = np.array( [ [5., 20., 290., 180.04], [3., 20., 290., 226.67], [7., 20., 290., 161.04], [5., 18., 290., 166.23], [5., 22., 290., 193.1], [5., 20., 270., 175.09], [5., 20., 310., 186.11], [3.585786, 20., 290., 198.5], [6.414214, 20., 290., 167.14], [3., 18., 290., 204.35], [7., 18., 290., 149.08], [3., 22., 290., 250.17], [7., 22., 290., 172.94], [5., 18.58579, 290., 170.28], [5., 21.41421, 290., 189.29], [3., 20., 270., 262.36], [7., 20., 270., 162.29], [3., 20., 310., 215.01], [7., 20., 310., 159.2], [5., 18., 270., 160.9], [5., 22., 270., 188.37], [5., 18., 310., 172.13], [5., 22., 310., 199.47], [5., 20., 275.8579, 176.06], [5., 20., 304.1421, 184.59], [3.152241, 20., 290., 217.83], [4.234633, 20., 290., 184.57], [5.765367, 20., 290., 173.81], [6.847759, 20., 290., 162.67], [3.585786, 18., 290., 179.47], [6.414214, 18., 290., 154.59], [3.585786, 22., 290., 215.46], [6.414214, 22., 290., 179.48], [3., 18.58579, 290., 211.5], [7., 18.58579, 290., 152.78], [3., 21.41421, 290., 243.35], [7., 21.41421, 290., 169.32], [5., 18.15224, 290., 167.26], [5., 19.23463, 290., 174.91], [5., 20.76537, 290., 185.23], [5., 21.84776, 290., 192.1], [3.585786, 20., 270., 206.16], [6.414214, 20., 270., 167.], [3.585786, 20., 310., 200.23], [6.414214, 20., 310., 166.62], [3., 18., 270., 230.94], [7., 18., 270., 149.6], [3., 22., 270., 294.59], [7., 22., 270., 174.77], [3., 18., 310., 197.47], [7., 18., 310., 148.36], [3., 22., 310., 233.67], [7., 22., 310., 186.56], [5., 18.58579, 270., 165.05], [5., 21.41421, 270., 184.48], [5., 18.58579, 310., 176.28], [5., 21.41421, 310., 195.67], [3., 20., 275.8579, 249.58], [7., 20., 275.8579, 161.98], [3., 20., 304.1421, 217.17], [7., 20., 304.1421, 159.8], [5., 18., 275.8579, 161.58], [5., 22., 275.8579, 189.49], [5., 18., 304.1421, 170.56], [5., 22., 304.1421, 197.64], [5., 20., 271.5224, 175.36], [5., 20., 282.3463, 177.45], [5., 20., 297.6537, 182.6], [5., 20., 308.4776, 185.72], ] ) varmin, varmax = data.min(0), data.max(0) for i in range(ninput): # Transform to [0,1]^{ninput} data[:, i] = (data[:, i] - varmin[i]) / (varmax[i] - varmin[i]) # Setup sparse grid - we know its form # for this data (level 4, simplex, CC) dim, level = 3, 4 sp = SparseGrid(dim, QuadratureCC(), level=level, fill='simplex') xval = sp.get_nodes() fval = {} for k, x in xval.iteritems(): for xref in data: if np.sqrt(np.sum((x - xref[:-1]) ** 2)) < 0.001: fval[k] = xref[-1] # Check # print sp.integrate(fval) # Agrees with Stijn Desmedt MSc Table 4.1 # print sp.compute_sobol_variances(fval, cardinality=3, levelrefine=2) return sp, fval
for level in levels: sys.stdout.write('%10d' % level) sys.stdout.write('\n ' + '-' * 6 * 11) for dim in dims: sys.stdout.write('\n %s %6d |' % ('dim' if dim == 8 else ' ', dim)) for level in levels: sp = SparseGrid(dim, quadrature, level=level, fill=fill) sys.stdout.write('%10d' % sp.n_nodes()) sys.stdout.flush() sys.stdout.write('\n') if __name__ == '__main__': if True: ### Test: count support-points unittest_nodecount(fill='simplex', quadrature=QuadratureCC()) if False: ### Test: integration unittest_integration(fill='simplex', quadrature=QuadraturePatterson()) if False: ### Test: Sobol indices unittest_sobol(fill='simplex', quadrature=QuadratureCC()) if False: ### Test: interpolation unittest_interpolation(fill='simplex', quadrature=QuadratureCC()) if False: ### Example: typical use with cheap fn dim, level = 4, 3 def fcheap(x): return np.sin(x[0]) * x[1] + x[2]**2 * x[3]
def levelset_plot_2d(highlight): dim = 2 l = 5 sp = sparse.SparseGrid(dim, QuadratureCC(), level=l, fill='simplex') sp.plot('sparse_CC_%d_%d.pdf' % highlight, highlight)
import numpy as np import pickle from sparse import SparseGrid import test_genz as genz from quad_cc import QuadratureCC if __name__ == '__main__': dim = 5 Nrepeat = 100 quadrature = QuadratureCC() results = {} for iter in range(Nrepeat): # Random variation # Genz coefficients genz.w = np.random.random(5) # According to Laurent genz.c = np.random.random(5) genz.c *= 2.5 / np.linalg.norm(genz.c) exact = genz.f1_exact(dim) for lmax, fill in zip([10, 4], ['simplex', 'full_factor']): if fill not in results: results[fill] = np.zeros((lmax + 1, 5)) print('%s Dimension = %d %s' % ('-' * 20, dim, '-' * 20)) print('%10s %10s %16s %12s' % ('Level', '#nodes', 'Integral', 'Rel error')) for l in range(1, lmax + 1): sp = SparseGrid(dim, quadrature, level=l, fill=fill) fval = sp.sample_fn(genz.f1) approx = sp.integrate(fval)