Example #1
0
def SchemeCentered(u, cst, mult, omega, diff, bc, ret_hmax=False):
    """Discretization of a linear non-divergence form second order PDE
        cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
        Second order accurate, centered yet monotone finite differences are used for <omega,grad u>
        - bc : boundary conditions. 
        - ret_hmax : return the largest grid scale for which monotony holds
    """
    # Decompose the tensor field
    coefs2, offsets = Selling.Decomposition(diff)

    # Decompose the vector field
    scals = lp.dot_VA(lp.solve_AV(diff, omega), offsets.astype(float))
    coefs1 = coefs2 * scals
    if ret_hmax: return 2. / norm(scals, ord=np.inf)

    # Compute the first and second order finite differences
    du = bc.DiffCentered(u, offsets)
    d2u = bc.Diff2(u, offsets)

    # In interior : cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
    coefs1, coefs2 = (bc.as_field(e) for e in (coefs1, coefs2))
    residue = cst + mult * u + lp.dot_VV(coefs1, du) - lp.dot_VV(coefs2, d2u)

    # On boundary : u-bc = 0
    return np.where(bc.interior, residue, u - bc.grid_values)
def SchemeUpwind(u, A, omega, D, rhs, bc):
    """
    Discretization of -Tr(A(x) hess u(x)) + \| grad u(x) - omega(x) \|_D(x)^2 - rhs,
    with Dirichlet boundary conditions, using upwind finite differences for the first order part.
    The scheme is degenerate elliptic if A and D are positive definite. 
    """
    # Compute the decompositions (here offset_e = offset_f)
    nothing = (np.full((0, ), 0.), np.full((2, 0),
                                           0))  # empty coefs and offsets
    mu, offset_e = nothing if A is None else Selling.Decomposition(A)
    nu, offset_f = nothing if D is None else Selling.Decomposition(D)
    omega_f = lp.dot_VA(omega, offset_f.astype(float))

    # First and second order finite differences
    maxi = np.maximum
    mu, nu, omega_f = (bc.as_field(e) for e in (mu, nu, omega_f))

    dup = bc.DiffUpwind(u, offset_f)
    dum = bc.DiffUpwind(u, -offset_f)
    dup[...,
        bc.not_interior] = 0.  # Placeholder values to silence NaN warnings
    dum[..., bc.not_interior] = 0.

    d2u = bc.Diff2(u, offset_e)

    # Scheme in the interior
    du = maxi(0., maxi(omega_f - dup, -omega_f - dum))
    residue = -lp.dot_VV(mu, d2u) + lp.dot_VV(nu, du**2) - rhs

    # Placeholders outside domain
    return np.where(bc.interior, residue, u - bc.grid_values)
Example #3
0
def Gradient(u, A, bc, decomp=None):
    """
    Approximates grad u(x), using finite differences along the axes of A.
    """
    coefs, offsets = Selling.Decomposition(A) if decomp is None else decomp
    du = bc.DiffCentered(u, offsets)
    AGrad = lp.dot_AV(offsets.astype(float),
                      (coefs * du))  # Approximates A * grad u
    return lp.solve_AV(A, AGrad)  # Approximates A^{-1} (A * grad u) = grad u
Example #4
0
def SchemeNonlinear(u, x, f, bc):
    coef, offsets = Selling.Decomposition(D(x))
    du = bc.DiffCentered(u, offsets)
    d2u = bc.Diff2(u, offsets)
    p = lp.dot_AV(lp.inverse(D(x)), np.sum(coef * du * offsets, axis=1))
    return np.where(
        bc.interior,
        -1 / 2 * lp.dot_VV(omega(x), p)**2 - lp.dot_VV(coef, d2u) - f,
        u - bc.grid_values,
    )
Example #5
0
def SchemeUniform_OptInner(u, SB, f, bc, oracle=None):
    # Use the oracle, if available, to select the active superbases only
    if not (oracle is None):
        SB = np.take_along_axis(SB,
                                np.broadcast_to(
                                    oracle,
                                    SB.shape[:2] + (1, ) + oracle.shape),
                                axis=2)

    d2u = bc.Diff2(u, SB)
    d2u[..., bc.not_interior] = 0.  # Placeholder value to silent NaN warnings

    # Generate the parameters for the low dimensional optimization problem
    Q = 0.5 * np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
    dim = 2
    l = -d2u
    m = lp.dot_VV(SB, SB)

    m = bc.as_field(m)
    from agd.FiniteDifferences import as_field
    Q = as_field(Q, m.shape[1:])

    dim = 2
    alpha = dim * f**(1 / dim)
    mask = (alpha == 0)

    Q = Q * np.where(mask, 1., alpha**2)
    # Evaluate the non-linear functional using dense-sparse composition
    residue = ad.apply(ConstrainedMaximize, Q, l, m,
                       shape_bound=u.shape).copy()
    residue[:, mask] = np.max(l / m, axis=0)[:, mask]

    return ad.max_argmax(residue, axis=0)
Example #6
0
def SchemeUniform(u, SB, f, bc):
    # Compute the finite differences along the superbase directions
    d2u = bc.Diff2(u, SB)
    d2u[..., bc.not_interior] = 0.  # Placeholder value to silent NaN warnings

    # Generate the parameters for the low dimensional optimization problem
    Q = 0.5 * np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
    l = -d2u
    m = lp.dot_VV(SB, SB)

    # Evaluate the numerical scheme
    m = bc.as_field(m)
    from agd.FiniteDifferences import as_field
    Q = as_field(Q, m.shape[1:])

    dim = 2
    alpha = dim * f**(1 / dim)
    mask = (alpha == 0)

    Q = Q * np.where(mask, 1., alpha**2)
    residue = ConstrainedMaximize(Q, l, m).max(axis=0)
    residue[mask] = np.max(l / m, axis=0).max(axis=0)[mask]

    # Boundary conditions
    return ad.where(bc.interior, residue, u - bc.grid_values)
Example #7
0
def SolveNonlinear(x, f, bc):
    dde = True

    def Solver(residue):
        nonlocal dde

        triplets, rhs = residue.solve(raw=True)
        mat = tocsr(triplets)

        # if (diags(mat.diagonal()) - mat).min() <= -1e-8:
        #     dde = False

        dde = (diags(mat.diagonal()) - mat).min() > -1e-8

        precond = diags(1 / mat.diagonal())
        matprecond = precond @ mat
        rhsprecond = precond @ rhs

        return spsolve(matprecond, rhsprecond).reshape(x.shape[1:])

    result = newton_root(SchemeNonlinear,
                         0.0001 * lp.dot_VV(x, x),
                         params=(x, f, bc),
                         solver=Solver)

    return result, dde
def MinimizeTrace(u, alpha, bc, sqrt_relax=1e-16):
    # Compute the tensor decompositions
    D = MakeD(alpha)
    theta, sb = AnglesAndSuperbases(D)
    theta = np.array([theta[:-1], theta[1:]])

    # Compute the second order differences in the direction orthogonal to the superbase
    sb_rotated = np.array([-sb[1], sb[0]])
    d2u = bc.Diff2(u, sb_rotated)
    d2u[..., bc.not_interior] = 0.  # Placeholder values to silent NaNs

    # Compute the coefficients of the tensor decompositions
    sb1, sb2 = np.roll(sb, 1, axis=1), np.roll(sb, 2, axis=1)
    sb1, sb2 = (e.reshape((2, 3, 1) + sb.shape[2:]) for e in (sb1, sb2))
    D = D.reshape((2, 2, 1, 3, 1) + D.shape[3:])
    # Axes of D are space,space,index of superbase element, index of D, index of superbase, and possibly shape of u
    scals = lp.dot_VAV(sb1, D, sb2)

    # Compute the coefficients of the trigonometric polynomial
    scals, theta = (bc.as_field(e) for e in (scals, theta))
    coefs = -lp.dot_VV(scals, np.expand_dims(d2u, axis=1))

    # Optimality condition for the trigonometric polynomial in the interior
    value = coefs[0] - np.sqrt(
        np.maximum(coefs[1]**2 + coefs[2]**2, sqrt_relax))
    coefs_ = np.array(coefs)  # removed AD information
    angle = np.arctan2(-coefs_[2], -coefs_[1]) / 2.
    angle[angle < 0] += np.pi

    # Boundary conditions for the trigonometric polynomial minimization
    mask = np.logical_not(np.logical_and(theta[0] <= angle, angle <= theta[1]))
    t, c = theta[:, mask], coefs[:, mask]
    value[mask], amin_t = ad.min_argmin(c[0] + c[1] * np.cos(2 * t) +
                                        c[2] * np.sin(2 * t),
                                        axis=0)

    # Minimize over superbases
    value, amin_sb = ad.min_argmin(value, axis=0)

    # Record the optimal angles for future use
    angle[mask] = np.take_along_axis(t, np.expand_dims(amin_t, axis=0),
                                     axis=0).squeeze(axis=0)  # Min over bc
    angle = np.take_along_axis(angle, np.expand_dims(amin_sb, axis=0),
                               axis=0)  # Min over superbases

    return value, angle
def MinimizeTrace_Opt(u, alpha, bc, oracle=None):
    if oracle is None: return MinimizeTrace(u, alpha, bc)

    # The oracle contains the optimal angles
    diffs = Diff(alpha, oracle.squeeze(axis=0))
    coefs, sb = Selling.Decomposition(diffs)
    value = lp.dot_VV(coefs, bc.Diff2(u, sb))
    return value, oracle
Example #10
0
def SchemeLinear(u, x, f, bc):
    coef, offsets = Selling.Decomposition(D(x))

    # coef_min = np.min(coef)
    # offsets_norm2 = lp.dot_VV(offsets, offsets)
    # offsets_max2 = np.max(np.where(coef < 1e-13, 0, offsets_norm2))
    # print(f"h: {bc.gridscale}, c: {coef_min}, e2: {offsets_max2}")

    du = bc.DiffCentered(u, offsets)
    d2u = bc.Diff2(u, offsets)
    return np.where(
        bc.interior,
        -lp.dot_VAV(omega(x), lp.inverse(D(x)),
                    np.sum(coef * du * offsets, axis=1)) -
        lp.dot_VV(coef, d2u) - f,
        u - bc.grid_values,
    )
Example #11
0
def ConstrainedMaximize(Q, l, m):
    dim = l.shape[0]
    if dim == 1:
        return (l[0] + np.sqrt(Q[0, 0])) / m[0]

    # Discard infinite values, handled afterwards
    pos_bad = l.min(axis=0) == -np.inf
    L = l.copy()
    L[:, pos_bad] = 0

    # Solve the quadratic equation
    A = lp.inverse(Q)
    lAl = lp.dot_VAV(L, A, L)
    lAm = lp.dot_VAV(L, A, m)
    mAm = lp.dot_VAV(m, A, m)

    delta = lAm**2 - (lAl - 1.) * mAm
    pos_bad = np.logical_or(pos_bad, delta <= 0)
    delta[pos_bad] = 1.

    mu = (lAm + np.sqrt(delta)) / mAm

    # Check the positivity
    #    v = dot_AV(A,mu*m-L)
    rm_ad = np.array
    v = lp.dot_AV(rm_ad(A), rm_ad(mu) * rm_ad(m) - rm_ad(L))
    pos_bad = np.logical_or(pos_bad, np.any(v < 0, axis=0))

    result = mu
    result[pos_bad] = -np.inf

    # Solve the lower dimensional sub-problems
    # We could restrict to the bad positions, and avoid repeating computations
    for i in range(dim):
        axes = np.full((dim), True)
        axes[i] = False
        res = ConstrainedMaximize(Q[axes][:, axes], l[axes], m[axes])
        result = np.maximum(result, res)
    return result
Example #12
0
def SchemeUpwind(u, cst, mult, omega, diff, bc):
    """Discretization of a linear non-divergence form second order PDE
        cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
        First order accurate, upwind finite differences are used for <omega,grad u>
        - bc : boundary conditions. 
    """
    # Decompose the tensor field
    coefs2, offsets2 = Selling.Decomposition(diff)
    omega, coefs2 = (bc.as_field(e) for e in (omega, coefs2))

    # Decompose the vector field
    coefs1 = -np.abs(omega)
    basis = bc.as_field(np.eye(len(omega)))
    offsets1 = -np.sign(omega) * basis

    # Compute the first and second order finite differences
    du = bc.DiffUpwind(u, offsets1.astype(int))
    d2u = bc.Diff2(u, offsets2)

    # In interior : cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
    residue = cst + mult * u + lp.dot_VV(coefs1, du) - lp.dot_VV(coefs2, d2u)

    # On boundary : u-bc = 0
    return np.where(bc.interior, residue, u - bc.grid_values)
Example #13
0
def Scheme(a, b, d2u, stencil):
    delta = d2u - lp.dot_VAV(
        np.expand_dims(stencil.V1, (2, 3)),
        np.expand_dims(a, 2),
        np.expand_dims(stencil.V1, (2, 3)),
    )

    spad_sum(b)
    spad_sum(delta)

    # For now, replace `b` with one when it is zero, to prevent errors during automatic
    # differentiation.
    b_zero = b == 0
    b = np.where(b_zero, 1, b)

    residue = -np.inf

    for i in range(stencil.V3.shape[2]):
        residue = np.maximum(
            residue,
            H3(
                stencil.Q[:, :, i, np.newaxis, np.newaxis],
                stencil.w[:, i, np.newaxis, np.newaxis],
                b,
                delta[stencil.V3_indices[:, i]],
            ),
        )

    for i in range(stencil.V2.shape[2]):
        residue = np.maximum(
            residue,
            H2(
                stencil.omega0[i, np.newaxis, np.newaxis],
                stencil.omega1[:, i, np.newaxis, np.newaxis],
                stencil.omega2[:, i, np.newaxis, np.newaxis],
                b,
                delta[stencil.V2_indices[:, i]],
            ),
        )

    # Reset residue to minus infinity where `b` should have been zero.
    residue = np.where(b_zero, -np.inf, residue)

    for i in range(stencil.V1.shape[1]):
        residue = np.maximum(
            residue, H1(stencil.V1[:, i, np.newaxis, np.newaxis], delta[i]))

    return residue
def NextAngleAndSuperbase(theta, sb, D):
    pairs = np.stack([(1, 2), (2, 0), (0, 1)], axis=1)
    scals = lp.dot_VAV(np.expand_dims(sb[:, pairs[0]], axis=1),
                       np.expand_dims(D, axis=-1),
                       np.expand_dims(sb[:, pairs[1]], axis=1))
    phi = np.arctan2(scals[2], scals[1])
    cst = -scals[0] / np.sqrt(scals[1]**2 + scals[2]**2)
    theta_max = np.pi * np.ones(3)
    mask = cst < 1
    theta_max[mask] = (phi[mask] - np.arccos(cst[mask])) / 2
    theta_max[theta_max <= 0] += np.pi
    theta_max[theta_max <= theta] = np.pi
    k = np.argmin(theta_max)
    i, j = (k + 1) % 3, (k + 2) % 3
    return (theta_max[k],
            np.stack([sb[:, i], -sb[:, j], sb[:, j] - sb[:, i]], axis=1))
def SchemeSampling_OptInner(u, diffs, bc, oracle=None):
    # Select the active tensors, if they are known
    if not (oracle is None):
        diffs = np.take_along_axis(diffs,
                                   np.broadcast_to(
                                       oracle,
                                       diffs.shape[:2] + (1, ) + oracle.shape),
                                   axis=2)

    print("Has AD information :", ad.is_ad(u),
          ". Number active tensors per point :", diffs.shape[2])

    # Tensor decomposition
    coefs, offsets = Selling.Decomposition(diffs)

    # Return the minimal value, and the minimizing index
    return ad.min_argmin(lp.dot_VV(coefs, bc.Diff2(u, offsets)), axis=0)
Example #16
0
def SchemeLaxFriedrichs(u, A, F, bc):
    """
    Discretization of - Tr(A(x) hess u(x)) + F(grad u(x)) - 1 = 0,
    with Dirichlet boundary conditions. The scheme is second order,
    and degenerate elliptic under suitable assumptions.
    """
    # Compute the tensor decomposition
    coefs, offsets = Selling.Decomposition(A)
    A, coefs, offsets = (bc.as_field(e) for e in (A, coefs, offsets))

    # Obtain the first and second order finite differences
    grad = Gradient(u, A, bc, decomp=(coefs, offsets))
    d2u = bc.Diff2(u, offsets)

    # Numerical scheme in interior
    residue = -lp.dot_VV(coefs, d2u) + F(grad) - 1.

    # Placeholders outside domain
    return ad.where(bc.interior, residue, u - bc.grid_values)
def Diff(alpha, theta):
    e0 = np.array((np.cos(theta), np.sin(theta)))
    e1 = np.array((-np.sin(theta), np.cos(theta)))
    if isinstance(alpha, np.ndarray):
        e0, e1 = (as_field(e, alpha.shape) for e in (e0, e1))
    return alpha * lp.outer_self(e0) + lp.outer_self(e1)
Example #18
0
def B_quartic(x, r, p):
    return 48 * lp.dot_VV(x, x)**2
Example #19
0
def ExactQuartic(x):
    return lp.dot_VV(x, x)**2
Example #20
0
def MongeAmpere_ad(u, x):
    return lp.det(Hessian_ad(u, x))
Example #21
0
def Y_reflector(x, r, p):
    tmp = 1 + np.sqrt(1 - lp.dot_VV(p, p) / r**4)
    return x + 1 / (r**3 * tmp) * p
Example #22
0
def Z_reflector(x, r, p):
    tmp = 1 + np.sqrt(1 - lp.dot_VV(p, p) / r**4)
    return (1 - 1 / tmp) / r
Example #23
0
def H1(v, delta):
    return -delta / lp.dot_VV(v, v)
Example #24
0
def A_reflector(x, r, p):
    tmp = 1 + np.sqrt(1 - lp.dot_VV(p, p) / r**4)
    return (2 + tmp) / r * lp.outer(p, p) - r**3 * tmp * lp.identity(
        x.shape[1:])
Example #25
0
def sigma_reflector(x, r, e):
    tmp = (2 * (r**3 + r**5 + r**5 + lp.dot_VV(x, x)) * e -
           4 * r**5 * lp.dot_VV(e, x) * x)
    y = (4 * r**5 * lp.dot_VV(lp.perp(e), x) * lp.perp(tmp) + np.sqrt(
        lp.dot_VV(tmp, tmp) - 16 * r**10 * lp.dot_VV(lp.perp(e), x)**2) *
         tmp) / np.stack([lp.dot_VV(tmp, tmp),
                          lp.dot_VV(tmp, tmp)])
    return 2 * r**3 * lp.dot_VV(e,
                                y - x) / (1 + r**2 * lp.dot_VV(x - y, x - y))
Example #26
0
def B_reflector(x, r, p):
    tmp = 1 + np.sqrt(1 - lp.dot_VV(p, p) / r**4)
    return r**6 * (tmp**3 - tmp**2) * f(x)
Example #27
0
def f2(x):
    return (4 * alpha**2 * (1 + alpha**2 * lp.dot_VV(x, x)) /
            (1 - alpha**2 * lp.dot_VV(x, x))**3) * f(
                2 * alpha * x / (1 - alpha**2 * lp.dot_VV(x, x)))
Example #28
0
def sigma_reflector2(x, r, e):
    return alpha * np.sqrt(lp.dot_VV(e, e))
Example #29
0
 def F(g):
     return lp.dot_VAV(g - omega, D, g - omega)
Example #30
0
def Z_reflector2(x, r, p):
    return lp.dot_VV(x, p) - r