Exemplo n.º 1
0
def SchemeUniform_OptInner(u, SB, f, bc, oracle=None):
    # Use the oracle, if available, to select the active superbases only
    if not (oracle is None):
        SB = np.take_along_axis(SB,
                                np.broadcast_to(
                                    oracle,
                                    SB.shape[:2] + (1, ) + oracle.shape),
                                axis=2)

    d2u = bc.Diff2(u, SB)
    d2u[..., bc.not_interior] = 0.  # Placeholder value to silent NaN warnings

    # Generate the parameters for the low dimensional optimization problem
    Q = 0.5 * np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
    dim = 2
    l = -d2u
    m = lp.dot_VV(SB, SB)

    m = bc.as_field(m)
    from agd.FiniteDifferences import as_field
    Q = as_field(Q, m.shape[1:])

    dim = 2
    alpha = dim * f**(1 / dim)
    mask = (alpha == 0)

    Q = Q * np.where(mask, 1., alpha**2)
    # Evaluate the non-linear functional using dense-sparse composition
    residue = ad.apply(ConstrainedMaximize, Q, l, m,
                       shape_bound=u.shape).copy()
    residue[:, mask] = np.max(l / m, axis=0)[:, mask]

    return ad.max_argmax(residue, axis=0)
Exemplo n.º 2
0
def SchemeSampling_Opt(u, diffs, beta, bc):
    # Evaluate the operator using the envelope theorem
    result, _ = ad.apply(SchemeSampling_OptInner,
                         u,
                         bc.as_field(diffs),
                         bc,
                         envelope=True)

    # Boundary conditions
    return ad.where(bc.interior, beta - result, u - bc.grid_values)
Exemplo n.º 3
0
def SchemeUniform_Opt(u, SB, f, bc):

    # Evaluate the maximum over the superbases using the envelope theorem
    residue, _ = ad.apply(SchemeUniform_OptInner,
                          u,
                          bc.as_field(SB),
                          f,
                          bc,
                          envelope=True)

    return ad.where(bc.interior, residue, u - bc.grid_values)
Exemplo n.º 4
0
def SchemeMALBR_Opt(u, SB, f, bc):

    # Evaluate using the envelope theorem
    result, _ = ad.apply(SchemeMALBR_OptInner,
                         u,
                         bc.as_field(SB),
                         bc,
                         envelope=True)

    # Boundary conditions
    return ad.where(bc.interior, f - result, u - bc.grid_values)
Exemplo n.º 5
0
def SchemeMALBR_OptInner(u,SB,bc,oracle=None):
    # If the active superbases are known, then take only these
    if not(oracle is None):
        SB = np.take_along_axis(SB,np.broadcast_to(oracle,SB.shape[:2]+(1,)+oracle.shape),axis=2)
                
    d2u = bc.Diff2(u,SB)
    d2u[...,bc.not_interior] = 0. # Placeholder value to silent NaN warnings
    # Evaluate the complex non-linear function using dense - sparse composition
    result = ad.apply(MALBR_H,d2u,shape_bound=u.shape)
    
    return ad.min_argmin(result,axis=0)
Exemplo n.º 6
0
def MALBR_H(d2u):
    a, b, c = ad.sort(np.maximum(0., d2u), axis=0)

    # General formula, handling infinite values separately
    A, B, C = (ad.where(e == np.inf, 0., e) for e in (a, b, c))
    result = 0.5 * (A * B + B * C + C * A) - 0.25 * (A**2 + B**2 + C**2)

    pos_inf = np.logical_or.reduce(d2u == np.inf)
    result[pos_inf] = np.inf

    pos_ineq = a + b < c
    result[pos_ineq] = (A * B)[pos_ineq]

    return result
Exemplo n.º 7
0
def SchemeUniform(u, SB, f, bc):
    # Compute the finite differences along the superbase directions
    d2u = bc.Diff2(u, SB)
    d2u[..., bc.not_interior] = 0.  # Placeholder value to silent NaN warnings

    # Generate the parameters for the low dimensional optimization problem
    Q = 0.5 * np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
    l = -d2u
    m = lp.dot_VV(SB, SB)

    # Evaluate the numerical scheme
    m = bc.as_field(m)
    from agd.FiniteDifferences import as_field
    Q = as_field(Q, m.shape[1:])

    dim = 2
    alpha = dim * f**(1 / dim)
    mask = (alpha == 0)

    Q = Q * np.where(mask, 1., alpha**2)
    residue = ConstrainedMaximize(Q, l, m).max(axis=0)
    residue[mask] = np.max(l / m, axis=0).max(axis=0)[mask]

    # Boundary conditions
    return ad.where(bc.interior, residue, u - bc.grid_values)
def SchemeCentered(u,cst,mult,omega,diff,bc,ret_hmax=False):
    """Discretization of a linear non-divergence form second order PDE
        cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
        Second order accurate, centered yet monotone finite differences are used for <omega,grad u>
        - bc : boundary conditions. 
        - ret_hmax : return the largest grid scale for which monotony holds
    """
    # Decompose the tensor field
    coefs2,offsets = Selling.Decomposition(diff)
    
    # Decompose the vector field
    scals = lp.dot_VA(lp.solve_AV(diff,omega), offsets.astype(float))
    coefs1 = coefs2*scals
    if ret_hmax: return 2./norm(scals,ord=np.inf)
    
    # Compute the first and second order finite differences    
    du  = bc.DiffCentered(u,offsets)
    d2u = bc.Diff2(u,offsets)
    
    # In interior : cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
    coefs1,coefs2 = (bc.as_field(e) for e in (coefs1,coefs2))    
    residue = cst + mult*u +lp.dot_VV(coefs1,du) - lp.dot_VV(coefs2,d2u)
    
    # On boundary : u-bc = 0
    return ad.where(bc.interior,residue,u-bc.grid_values)
def MinimizeTrace(u, alpha, bc, sqrt_relax=1e-16):
    # Compute the tensor decompositions
    D = MakeD(alpha)
    theta, sb = AnglesAndSuperbases(D)
    theta = np.array([theta[:-1], theta[1:]])

    # Compute the second order differences in the direction orthogonal to the superbase
    sb_rotated = np.array([-sb[1], sb[0]])
    d2u = bc.Diff2(u, sb_rotated)
    d2u[..., bc.not_interior] = 0.  # Placeholder values to silent NaNs

    # Compute the coefficients of the tensor decompositions
    sb1, sb2 = np.roll(sb, 1, axis=1), np.roll(sb, 2, axis=1)
    sb1, sb2 = (e.reshape((2, 3, 1) + sb.shape[2:]) for e in (sb1, sb2))
    D = D.reshape((2, 2, 1, 3, 1) + D.shape[3:])
    # Axes of D are space,space,index of superbase element, index of D, index of superbase, and possibly shape of u
    scals = lp.dot_VAV(sb1, D, sb2)

    # Compute the coefficients of the trigonometric polynomial
    scals, theta = (bc.as_field(e) for e in (scals, theta))
    coefs = -lp.dot_VV(scals, np.expand_dims(d2u, axis=1))

    # Optimality condition for the trigonometric polynomial in the interior
    value = coefs[0] - np.sqrt(
        np.maximum(coefs[1]**2 + coefs[2]**2, sqrt_relax))
    coefs_ = np.array(coefs)  # removed AD information
    angle = np.arctan2(-coefs_[2], -coefs_[1]) / 2.
    angle[angle < 0] += np.pi

    # Boundary conditions for the trigonometric polynomial minimization
    mask = np.logical_not(np.logical_and(theta[0] <= angle, angle <= theta[1]))
    t, c = theta[:, mask], coefs[:, mask]
    value[mask], amin_t = ad.min_argmin(c[0] + c[1] * np.cos(2 * t) +
                                        c[2] * np.sin(2 * t),
                                        axis=0)

    # Minimize over superbases
    value, amin_sb = ad.min_argmin(value, axis=0)

    # Record the optimal angles for future use
    angle[mask] = np.take_along_axis(t, np.expand_dims(amin_t, axis=0),
                                     axis=0).squeeze(axis=0)  # Min over bc
    angle = np.take_along_axis(angle, np.expand_dims(amin_sb, axis=0),
                               axis=0)  # Min over superbases

    return value, angle
def SchemeSampling_OptInner(u, diffs, bc, oracle=None):
    # Select the active tensors, if they are known
    if not (oracle is None):
        diffs = np.take_along_axis(diffs,
                                   np.broadcast_to(
                                       oracle,
                                       diffs.shape[:2] + (1, ) + oracle.shape),
                                   axis=2)

    print("Has AD information :", ad.is_ad(u),
          ". Number active tensors per point :", diffs.shape[2])

    # Tensor decomposition
    coefs, offsets = Selling.Decomposition(diffs)

    # Return the minimal value, and the minimizing index
    return ad.min_argmin(lp.dot_VV(coefs, bc.Diff2(u, offsets)), axis=0)
Exemplo n.º 11
0
def SchemeSampling(u, diffs, beta, bc):
    # Tensor decomposition
    coefs, offsets = Selling.Decomposition(diffs)

    # Numerical scheme
    coefs = bc.as_field(coefs)
    residue = beta - (coefs * bc.Diff2(u, offsets)).sum(0).min(0)

    # Boundary conditions
    return ad.where(bc.interior, residue, u - bc.grid_values)
Exemplo n.º 12
0
def SchemeMALBR(u, SB, f, bc):
    # Compute the finite differences along the superbase directions
    d2u = bc.Diff2(u, SB)
    d2u[..., bc.
        not_interior] = 0.  # Replace NaNs with arbitrary values to silence warnings

    # Numerical scheme
    residue = f - MALBR_H(d2u).min(axis=0)

    # Boundary conditions
    return ad.where(bc.interior, residue, u - bc.grid_values)
Exemplo n.º 13
0
def SchemeNonMonotone(u, f, bc):
    # Compute the hessian matrix of u
    uxx = bc.Diff2(u, (1, 0))
    uyy = bc.Diff2(u, (0, 1))
    uxy = 0.25 * (bc.Diff2(u, (1, 1)) - bc.Diff2(u, (1, -1)))

    # Numerical scheme
    det = uxx * uyy - uxy**2
    residue = f - det

    # Boundary conditions
    return ad.where(bc.interior, residue, u - bc.grid_values)
Exemplo n.º 14
0
def SchemeLaxFriedrichs(u, A, F, bc):
    """
    Discretization of - Tr(A(x) hess u(x)) + F(grad u(x)) - 1 = 0,
    with Dirichlet boundary conditions. The scheme is second order,
    and degenerate elliptic under suitable assumptions.
    """
    # Compute the tensor decomposition
    coefs, offsets = Selling.Decomposition(A)
    A, coefs, offsets = (bc.as_field(e) for e in (A, coefs, offsets))

    # Obtain the first and second order finite differences
    grad = Gradient(u, A, bc, decomp=(coefs, offsets))
    d2u = bc.Diff2(u, offsets)

    # Numerical scheme in interior
    residue = -lp.dot_VV(coefs, d2u) + F(grad) - 1.

    # Placeholders outside domain
    return ad.where(bc.interior, residue, u - bc.grid_values)
Exemplo n.º 15
0
def SchemeNonMonotone(u, alpha, beta, bc, sqrt_relax=1e-6):
    # Compute the hessian matrix of u
    uxx = bc.Diff2(u, (1, 0))
    uyy = bc.Diff2(u, (0, 1))
    uxy = 0.25 * (bc.Diff2(u, (1, 1)) - bc.Diff2(u, (1, -1)))

    # Compute the eigenvalues
    # The relaxation is here to tame the non-differentiability of the square root.
    htr = (uxx + uyy) / 2.
    sdelta = np.sqrt(np.maximum(((uxx - uyy) / 2.)**2 + uxy**2, sqrt_relax))

    lambda_max = htr + sdelta
    lambda_min = htr - sdelta

    # Numerical scheme
    residue = beta - alpha * lambda_max - lambda_min

    # Boundary conditions
    return ad.where(bc.interior, residue, u - bc.grid_values)
def SchemeUpwind(u,cst,mult,omega,diff,bc):
    """Discretization of a linear non-divergence form second order PDE
        cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
        First order accurate, upwind finite differences are used for <omega,grad u>
        - bc : boundary conditions. 
    """
    # Decompose the tensor field
    coefs2,offsets2 = Selling.Decomposition(diff)
    omega,coefs2 = (bc.as_field(e) for e in (omega,coefs2))    

    # Decompose the vector field
    coefs1 = -np.abs(omega)
    basis = bc.as_field(np.eye(len(omega)))
    offsets1 = -np.sign(omega)*basis
    
    # Compute the first and second order finite differences    
    du  = bc.DiffUpwind(u,offsets1.astype(int))
    d2u = bc.Diff2(u,offsets2)
    
    # In interior : cst + mult u + <omega,grad u>- tr(diff hess(u)) = 0
    residue = cst + mult*u +lp.dot_VV(coefs1,du) - lp.dot_VV(coefs2,d2u)
    
    # On boundary : u-bc = 0
    return ad.where(bc.interior,residue,u-bc.grid_values)
def SchemeConsistent_Opt(u, alpha, beta, bc):
    value, _ = ad.apply(MinimizeTrace_Opt, u, alpha, bc, envelope=True)
    residue = beta - value
    return np.where(bc.interior, residue, u - bc.grid_values)
Exemplo n.º 18
0
def SchemeConsistent(u, alpha, beta, bc):
    value, _ = MinimizeTrace(u, alpha, bc)
    residue = beta - value
    return ad.where(bc.interior, residue, u - bc.grid_values)