def C_star(u, uh, phi): """" Adjoint term for dC/du where C(u) = u.grad(u) """ if use_true_soln: return 0.5 * (ufl.transpose(grad(u + uh)) * phi - grad(phi) * (u + uh) - div((u + uh)) * phi) else: return ufl.transpose(grad(uh)) * phi - grad(phi) * uh - div(uh) * phi
def stf3d2(rank2_2d): r""" Return the 3D symmetric and trace-free part of a 2D 2-tensor. .. warning:: Return a 2-tensor with the same dimensions as the input tensor. For the :math:`2 \times 2` case, return the 3D symmetric and trace-free (dev(sym(.))) :math:`B \in \mathbb{R}^{2 \times 2}` of the 2D 2-tensor :math:`A \in \mathbb{R}^{2 \times 2}`. .. math:: A &= \begin{pmatrix} a_{xx} & a_{xy} \\ a_{yx} & a_{yy} \end{pmatrix} \\ B &= (A)_\mathrm{dev} = \frac{1}{2} (A)_\mathrm{sym} - \frac{1}{3} \mathrm{tr}(A) I_{2 \times 2} """ dim = len(rank2_2d[:, 0]) symm = 1 / 2 * (rank2_2d + ufl.transpose(rank2_2d)) return symm - (1 / 3) * ufl.tr(symm) * ufl.Identity(dim)
def nn(eps, u, p, v, q): #return inner(grad(p), grad(q)) * dx def sigma_(vec, func=ufl.tanh): v = [func(vec[i]) for i in range(vec.ufl_shape[0])] return ufl.as_vector(v) relu = lambda vec: conditional(ufl.gt(vec, 0), vec, (ufl.exp(vec) - 1)) sigma = lambda vec: sigma_(vec, func=relu) nn = dot(W_3, sigma(ufl.transpose(as_vector([W_1, W_2])) * eps * grad(p) + b_1)) + b_2 return inner(nn, grad(q))*dx, inner(nn, nn)*dx
def nn(u, p, v, q): # return inner(grad(p), grad(q)) * dx, None, None def sigma_(vec, func=ufl.tanh): v = [func(vec[i]) for i in range(vec.ufl_shape[0])] return ufl.as_vector(v) relu = lambda vec: conditional(ufl.gt(vec, 0), vec, (ufl.exp(vec) - 1)) sigma = lambda vec: sigma_(vec, func=relu)#lambda x:x) nn_p = dot(W_3, sigma(ufl.transpose(as_vector([W_1, W_2])) * u + b_1)) + b_2 #nn_q = dot(W_3, sigma(ufl.transpose(as_vector([W_1, W_2])) * grad(q) + b_1)) + b_2 return inner(nn_p, v)*dx, inner(nn_p, nn_p)*dx, nn_p
def assemble_operator(self, term): dx = self.dx if term == "a": u = self.du v = self.v a0 = inner(grad(u) + transpose(grad(u)), grad(v)) * dx return (a0, ) elif term == "b": u = self.du q = self.q b0 = -q * div(u) * dx return (b0, ) elif term == "bt": p = self.dp v = self.v bt0 = -p * div(v) * dx return (bt0, ) elif term == "c": u = self.u v = self.v c0 = inner(grad(u) * u, v) * dx return (c0, ) elif term == "f": v = self.v f0 = inner(self.f, v) * dx return (f0, ) elif term == "g": q = self.q g0 = self.g * q * dx return (g0, ) elif term == "dirichlet_bc_u": bc0 = [ DirichletBC(self.V.sub(0), self.inlet, self.boundaries, 1), DirichletBC(self.V.sub(0), Constant((0.0, 0.0)), self.boundaries, 2) ] return (bc0, ) elif term == "inner_product_u": u = self.du v = self.v x0 = inner(grad(u), grad(v)) * dx return (x0, ) elif term == "inner_product_p": p = self.dp q = self.q x0 = inner(p, q) * dx return (x0, ) else: raise ValueError("Invalid term for assemble_operator().")
def __init__(self, angular_quad, L): from transport_data import angular_tensors_ext_module i,j,k1,k2,p,q = ufl.indices(6) tensors = angular_tensors_ext_module.AngularTensors(angular_quad, L) self.Y = ufl.as_tensor( numpy.reshape(tensors.Y(), tensors.shape_Y()) ) self.Q = ufl.as_tensor( numpy.reshape(tensors.Q(), tensors.shape_Q()) ) self.QT = ufl.transpose(self.Q) self.Qt = ufl.as_tensor( numpy.reshape(tensors.Qt(), tensors.shape_Qt()) ) self.QtT = ufl.as_tensor( self.Qt[k1,p,i], (p,i,k1) ) self.G = ufl.as_tensor( numpy.reshape(tensors.G(), tensors.shape_G()) ) self.T = ufl.as_tensor( numpy.reshape(tensors.T(), tensors.shape_T()) ) self.Wp = ufl.as_vector( angular_quad.get_pw() ) self.W = ufl.diag(self.Wp)
def nn(u, v): inp = as_vector( [avg(u), jump(u), *grad(avg(u)), *grad(jump(u)), *n('+')]) def sigma_(vec, func=ufl.tanh): v = [func(vec[i]) for i in range(vec.ufl_shape[0])] return ufl.as_vector(v) relu = lambda vec: conditional(ufl.gt(vec, 0), vec, (ufl.exp(vec) - 1)) sigma = lambda vec: sigma_(vec, func=relu) nn = dot(W_2, sigma(ufl.transpose(as_vector(W_1)) * inp + b_1)) + b_2 return inner(nn, jump(v) + avg(v)) * dS, inner(nn, nn) * dS
def NN(inputs, weights, sigma): r = as_vector(inputs) depth = len(weights) for i, weight in enumerate(weights): l = [] for w in weight["coefficient"]: l.append(w) vec = as_vector(l) if w.ufl_shape[0] != r.ufl_shape[0]: vec = ufl.transpose(vec) term = vec * r if "bias" in weight: term += weight["bias"] if i + 1 >= depth: r = term else: r = nonlin_function(term, func=sigma) if r.ufl_shape[0] == 1: return r[0] return r
v = [func(vec[i]) for i in range(vec.ufl_shape[0])] return ufl.as_vector(v) a = 1.0 relu = lambda vec: conditional(ufl.gt(vec, 0), vec, a * (ufl.exp(vec) - 1)) sigma = lambda vec: sigma_(vec, func=relu) U_ = Function(V) from pyadjoint.placeholder import Placeholder p = Placeholder(U_) a1 = inner( inner( W_4, sigma( ufl.transpose(as_vector([W_1, W_2, W_3])) * as_vector([U_, *X]) + b_1)) + b_2, v ) * dx + inner(grad(U), grad(v)) * dx - Constant( 1 ) * v * dx #+ inner(inner(W_4, sigma(as_vector(W_3, W_4)*X + b_3)) + b_4, v)*dx dt = 0.01 for i in range(100): a2 = (U - U_) / dt * v * dx + a1 solve(a2 == 0, U) U_.assign(U) a1 = inner( inner( W_4, sigma(
def eps(v): return ufl.grad(v) + ufl.transpose(ufl.grad(v))
n = FacetNormal(mesh) # The mapping r = x + f fmap = as_vector((r, y)) F = grad(fmap) J = det(F) Grad = lambda arg: dot(grad(arg), inv(F)) Div = lambda arg: inner(grad(arg), inv(F)) a = inner(Grad(u), Grad(v))*J*r*dx + inner(u[0]/r, v[0]/r)*J*r*dx +\ inner(p, Div(v))*J*r*dx + inner(p, v[0]/r)*J*r*dx +\ inner(q, Div(u))*J*r*dx + inner(q, u[0]/r)*J*r*dx L = inner(Constant((0, 0)), v)*J*r*dx + \ h_top*inner(v, dot(transpose(inv(F)), n))*J*ds(top) +\ h_bottom*inner(v, dot(transpose(inv(F)), n))*J*ds(bottom) bcs = [ DirichletBC(W.sub(0), bdry_velocity, bdries, left), DirichletBC(W.sub(0), Constant((0, 0)), bdries, right) ] wh = Function(W) V = FunctionSpace(mesh, v_elm) Q = FunctionSpace(mesh, p_elm) uh, ph = Function(V), Function(Q) assigner = FunctionAssigner([V, Q], W)