def Gradient(u, A, bc, decomp=None): """ Approximates grad u(x), using finite differences along the axes of A. """ coefs, offsets = Selling.Decomposition(A) if decomp is None else decomp du = bc.DiffCentered(u, offsets) AGrad = lp.dot_AV(offsets.astype(float), (coefs * du)) # Approximates A * grad u return lp.solve_AV(A, AGrad) # Approximates A^{-1} (A * grad u) = grad u
def SchemeNonlinear(u, x, f, bc): coef, offsets = Selling.Decomposition(D(x)) du = bc.DiffCentered(u, offsets) d2u = bc.Diff2(u, offsets) p = lp.dot_AV(lp.inverse(D(x)), np.sum(coef * du * offsets, axis=1)) return np.where( bc.interior, -1 / 2 * lp.dot_VV(omega(x), p)**2 - lp.dot_VV(coef, d2u) - f, u - bc.grid_values, )
def ConstrainedMaximize(Q, l, m): dim = l.shape[0] if dim == 1: return (l[0] + np.sqrt(Q[0, 0])) / m[0] # Discard infinite values, handled afterwards pos_bad = l.min(axis=0) == -np.inf L = l.copy() L[:, pos_bad] = 0 # Solve the quadratic equation A = lp.inverse(Q) lAl = lp.dot_VAV(L, A, L) lAm = lp.dot_VAV(L, A, m) mAm = lp.dot_VAV(m, A, m) delta = lAm**2 - (lAl - 1.) * mAm pos_bad = np.logical_or(pos_bad, delta <= 0) delta[pos_bad] = 1. mu = (lAm + np.sqrt(delta)) / mAm # Check the positivity # v = dot_AV(A,mu*m-L) rm_ad = np.array v = lp.dot_AV(rm_ad(A), rm_ad(mu) * rm_ad(m) - rm_ad(L)) pos_bad = np.logical_or(pos_bad, np.any(v < 0, axis=0)) result = mu result[pos_bad] = -np.inf # Solve the lower dimensional sub-problems # We could restrict to the bad positions, and avoid repeating computations for i in range(dim): axes = np.full((dim), True) axes[i] = False res = ConstrainedMaximize(Q[axes][:, axes], l[axes], m[axes]) result = np.maximum(result, res) return result
def H3(Q, w, b, delta): Q_delta = lp.dot_AV(Q, delta) r = np.sqrt(b + lp.dot_VV(delta, Q_delta)) return np.where(np.all(Q_delta <= r * w, axis=0), r - lp.dot_VV(w, delta), -np.inf)