def SchemeUpwind(u, A, omega, D, rhs, bc): """ Discretization of -Tr(A(x) hess u(x)) + \| grad u(x) - omega(x) \|_D(x)^2 - rhs, with Dirichlet boundary conditions, using upwind finite differences for the first order part. The scheme is degenerate elliptic if A and D are positive definite. """ # Compute the decompositions (here offset_e = offset_f) nothing = (np.full((0, ), 0.), np.full((2, 0), 0)) # empty coefs and offsets mu, offset_e = nothing if A is None else Selling.Decomposition(A) nu, offset_f = nothing if D is None else Selling.Decomposition(D) omega_f = lp.dot_VA(omega, offset_f.astype(float)) # First and second order finite differences maxi = np.maximum mu, nu, omega_f = (bc.as_field(e) for e in (mu, nu, omega_f)) dup = bc.DiffUpwind(u, offset_f) dum = bc.DiffUpwind(u, -offset_f) dup[..., bc.not_interior] = 0. # Placeholder values to silence NaN warnings dum[..., bc.not_interior] = 0. d2u = bc.Diff2(u, offset_e) # Scheme in the interior du = maxi(0., maxi(omega_f - dup, -omega_f - dum)) residue = -lp.dot_VV(mu, d2u) + lp.dot_VV(nu, du**2) - rhs # Placeholders outside domain return np.where(bc.interior, residue, u - bc.grid_values)
def SchemeCentered(u, cst, mult, omega, diff, bc, ret_hmax=False): """Discretization of a linear non-divergence form second order PDE cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0 Second order accurate, centered yet monotone finite differences are used for <omega,grad u> - bc : boundary conditions. - ret_hmax : return the largest grid scale for which monotony holds """ # Decompose the tensor field coefs2, offsets = Selling.Decomposition(diff) # Decompose the vector field scals = lp.dot_VA(lp.solve_AV(diff, omega), offsets.astype(float)) coefs1 = coefs2 * scals if ret_hmax: return 2. / norm(scals, ord=np.inf) # Compute the first and second order finite differences du = bc.DiffCentered(u, offsets) d2u = bc.Diff2(u, offsets) # In interior : cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0 coefs1, coefs2 = (bc.as_field(e) for e in (coefs1, coefs2)) residue = cst + mult * u + lp.dot_VV(coefs1, du) - lp.dot_VV(coefs2, d2u) # On boundary : u-bc = 0 return np.where(bc.interior, residue, u - bc.grid_values)
def MinimizeTrace_Opt(u, alpha, bc, oracle=None): if oracle is None: return MinimizeTrace(u, alpha, bc) # The oracle contains the optimal angles diffs = Diff(alpha, oracle.squeeze(axis=0)) coefs, sb = Selling.Decomposition(diffs) value = lp.dot_VV(coefs, bc.Diff2(u, sb)) return value, oracle
def Gradient(u, A, bc, decomp=None): """ Approximates grad u(x), using finite differences along the axes of A. """ coefs, offsets = Selling.Decomposition(A) if decomp is None else decomp du = bc.DiffCentered(u, offsets) AGrad = lp.dot_AV(offsets.astype(float), (coefs * du)) # Approximates A * grad u return lp.solve_AV(A, AGrad) # Approximates A^{-1} (A * grad u) = grad u
def SchemeSampling(u, diffs, beta, bc): # Tensor decomposition coefs, offsets = Selling.Decomposition(diffs) # Numerical scheme coefs = bc.as_field(coefs) residue = beta - (coefs * bc.Diff2(u, offsets)).sum(0).min(0) # Boundary conditions return np.where(bc.interior, residue, u - bc.grid_values)
def SchemeNonlinear(u, x, f, bc): coef, offsets = Selling.Decomposition(D(x)) du = bc.DiffCentered(u, offsets) d2u = bc.Diff2(u, offsets) p = lp.dot_AV(lp.inverse(D(x)), np.sum(coef * du * offsets, axis=1)) return np.where( bc.interior, -1 / 2 * lp.dot_VV(omega(x), p)**2 - lp.dot_VV(coef, d2u) - f, u - bc.grid_values, )
def AnglesAndSuperbases(D, maxiter=200): sb = Selling.CanonicalSuperbase(2).astype(int) thetas = [] superbases = [] theta = 0 for i in range(maxiter): thetas.append(theta) if (theta >= np.pi): break superbases.append(sb) theta, sb = NextAngleAndSuperbase(theta, sb, D) return np.array(thetas), np.stack(superbases, axis=2)
def StencilForConditioning(cond): V3 = Selling.SuperbasesForConditioning(cond) offsets = V3.reshape((2, -1)) # Make offsets positive for the lexicographic order, inversing their sign if needed. offsets[:, offsets[0] < 0] *= -1 offsets[:, np.logical_and(offsets[0] == 0, offsets[1] < 0)] *= -1 V1, indices = np.unique(offsets, axis=1, return_inverse=True) V3_indices = indices.reshape(V3.shape[1:]) V2_indices = np.unique( np.sort( np.concatenate( (V3_indices[[0, 1]], V3_indices[[0, 2]], V3_indices[[1, 2]]), axis=1), axis=0, ), axis=1, ) V2 = V1[:, V2_indices] Q = np.zeros((3, 3, V3.shape[2])) w = np.zeros((3, V3.shape[2])) for i in range(3): Q[i, i] = (lp.dot_VV(V3[:, (i + 1) % 3], V3[:, (i + 1) % 3]) * lp.dot_VV(V3[:, (i + 2) % 3], V3[:, (i + 2) % 3]) / 4) Q[i, (i + 1) % 3] = (lp.dot_VV(V3[:, i], V3[:, (i + 1) % 3]) * lp.dot_VV(V3[:, (i + 2) % 3], V3[:, (i + 2) % 3]) / 4) Q[i, (i + 2) % 3] = (lp.dot_VV(V3[:, i], V3[:, (i + 2) % 3]) * lp.dot_VV(V3[:, (i + 1) % 3], V3[:, (i + 1) % 3]) / 4) w[i] = -lp.dot_VV(V3[:, (i + 1) % 3], V3[:, (i + 2) % 3]) / 2 omega0 = 1 / (lp.dot_VV(V2[:, 0], V2[:, 0]) * lp.dot_VV(V2[:, 1], V2[:, 1])) omega1 = 1 / (2 * np.stack( [lp.dot_VV(V2[:, 0], V2[:, 0]), -lp.dot_VV(V2[:, 1], V2[:, 1])])) omega2 = 1 / (2 * np.stack( [lp.dot_VV(V2[:, 0], V2[:, 0]), lp.dot_VV(V2[:, 1], V2[:, 1])])) return Stencil(V1, V2, V2_indices, V3, V3_indices, Q, w, omega0, omega1, omega2)
def SchemeSampling_OptInner(u, diffs, bc, oracle=None): # Select the active tensors, if they are known if not (oracle is None): diffs = np.take_along_axis(diffs, np.broadcast_to( oracle, diffs.shape[:2] + (1, ) + oracle.shape), axis=2) print("Has AD information :", ad.is_ad(u), ". Number active tensors per point :", diffs.shape[2]) # Tensor decomposition coefs, offsets = Selling.Decomposition(diffs) # Return the minimal value, and the minimizing index return ad.min_argmin(lp.dot_VV(coefs, bc.Diff2(u, offsets)), axis=0)
def SchemeLinear(u, x, f, bc): coef, offsets = Selling.Decomposition(D(x)) # coef_min = np.min(coef) # offsets_norm2 = lp.dot_VV(offsets, offsets) # offsets_max2 = np.max(np.where(coef < 1e-13, 0, offsets_norm2)) # print(f"h: {bc.gridscale}, c: {coef_min}, e2: {offsets_max2}") du = bc.DiffCentered(u, offsets) d2u = bc.Diff2(u, offsets) return np.where( bc.interior, -lp.dot_VAV(omega(x), lp.inverse(D(x)), np.sum(coef * du * offsets, axis=1)) - lp.dot_VV(coef, d2u) - f, u - bc.grid_values, )
def SchemeLaxFriedrichs(u, A, F, bc): """ Discretization of - Tr(A(x) hess u(x)) + F(grad u(x)) - 1 = 0, with Dirichlet boundary conditions. The scheme is second order, and degenerate elliptic under suitable assumptions. """ # Compute the tensor decomposition coefs, offsets = Selling.Decomposition(A) A, coefs, offsets = (bc.as_field(e) for e in (A, coefs, offsets)) # Obtain the first and second order finite differences grad = Gradient(u, A, bc, decomp=(coefs, offsets)) d2u = bc.Diff2(u, offsets) # Numerical scheme in interior residue = -lp.dot_VV(coefs, d2u) + F(grad) - 1. # Placeholders outside domain return ad.where(bc.interior, residue, u - bc.grid_values)
def SchemeUpwind(u, cst, mult, omega, diff, bc): """Discretization of a linear non-divergence form second order PDE cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0 First order accurate, upwind finite differences are used for <omega,grad u> - bc : boundary conditions. """ # Decompose the tensor field coefs2, offsets2 = Selling.Decomposition(diff) omega, coefs2 = (bc.as_field(e) for e in (omega, coefs2)) # Decompose the vector field coefs1 = -np.abs(omega) basis = bc.as_field(np.eye(len(omega))) offsets1 = -np.sign(omega) * basis # Compute the first and second order finite differences du = bc.DiffUpwind(u, offsets1.astype(int)) d2u = bc.Diff2(u, offsets2) # In interior : cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0 residue = cst + mult * u + lp.dot_VV(coefs1, du) - lp.dot_VV(coefs2, d2u) # On boundary : u-bc = 0 return np.where(bc.interior, residue, u - bc.grid_values)