def test_vector_assemble_matrix_interior(): mesh = create_unit_square(MPI.COMM_WORLD, 3, 3) V = VectorFunctionSpace(mesh, ("Lagrange", 1)) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) a = form(ufl.inner(ufl.jump(u), ufl.jump(v)) * ufl.dS) A = assemble_matrix(a) A.assemble()
def xtest_tetrahedron_integral(space_type, space_order): domain = ufl.Mesh(ufl.VectorElement("Lagrange", "tetrahedron", 1)) temp_points = np.array([[-1., 0., -1.], [0., 0., 0.], [1., 0., 1.], [0., 1., 0.], [0., 0., 1.]]) for repeat in range(10): order = [i for i, j in enumerate(temp_points)] shuffle(order) points = np.zeros(temp_points.shape) for i, j in enumerate(order): points[j] = temp_points[i] cells = [] for cell in [[0, 1, 3, 4], [1, 2, 3, 4]]: # Randomly number the cell cell_order = list(range(4)) shuffle(cell_order) cells.append([order[cell[i]] for i in cell_order]) mesh = create_mesh(MPI.COMM_WORLD, cells, points, domain) V = FunctionSpace(mesh, (space_type, space_order)) Vvec = VectorFunctionSpace(mesh, ("P", 1)) dofs = [i for i in V.dofmap.cell_dofs(0) if i in V.dofmap.cell_dofs(1)] for d in dofs: v = Function(V) v.vector[:] = [1 if i == d else 0 for i in range(V.dim)] if space_type in ["RT", "BDM"]: # Hdiv def normal(x): values = np.zeros((3, x.shape[1])) values[0] = [1 for i in values[0]] return values n = Function(Vvec) n.interpolate(normal) form = ufl.inner(ufl.jump(v), n) * ufl.dS elif space_type in ["N1curl", "N2curl"]: # Hcurl def tangent1(x): values = np.zeros((3, x.shape[1])) values[1] = [1 for i in values[1]] return values def tangent2(x): values = np.zeros((3, x.shape[1])) values[2] = [1 for i in values[2]] return values t1 = Function(Vvec) t1.interpolate(tangent1) t2 = Function(Vvec) t2.interpolate(tangent1) form = ufl.inner(ufl.jump(v), t1) * ufl.dS form += ufl.inner(ufl.jump(v), t2) * ufl.dS else: form = ufl.jump(v) * ufl.dS value = fem.assemble_scalar(form) assert np.isclose(value, 0)
def xtest_quadrilateral_integral(space_type, space_order): domain = ufl.Mesh(ufl.VectorElement("Lagrange", "quadrilateral", 1)) temp_points = np.array([[-1., -1.], [0., 0.], [1., 0.], [-1., 1.], [0., 1.], [2., 2.]]) for repeat in range(10): order = [i for i, j in enumerate(temp_points)] shuffle(order) points = np.zeros(temp_points.shape) for i, j in enumerate(order): points[j] = temp_points[i] connections = {0: [1, 2], 1: [0, 3], 2: [0, 3], 3: [1, 2]} cells = [] for cell in [[0, 1, 3, 4], [1, 2, 4, 5]]: # Randomly number the cell start = choice(range(4)) cell_order = [start] for i in range(2): diff = choice([ i for i in connections[start] if i not in cell_order ]) - cell_order[0] cell_order += [c + diff for c in cell_order] cells.append([order[cell[i]] for i in cell_order]) mesh = create_mesh(MPI.COMM_WORLD, cells, points, domain) V = FunctionSpace(mesh, (space_type, space_order)) Vvec = VectorFunctionSpace(mesh, ("P", 1)) dofs = [i for i in V.dofmap.cell_dofs(0) if i in V.dofmap.cell_dofs(1)] for d in dofs: v = Function(V) v.vector[:] = [1 if i == d else 0 for i in range(V.dim)] if space_type in ["RTCF"]: # Hdiv def normal(x): values = np.zeros((2, x.shape[1])) values[0] = [1 for i in values[0]] return values n = Function(Vvec) n.interpolate(normal) form = ufl.inner(ufl.jump(v), n) * ufl.dS elif space_type in ["RTCE"]: # Hcurl def tangent(x): values = np.zeros((2, x.shape[1])) values[1] = [1 for i in values[1]] return values t = Function(Vvec) t.interpolate(tangent) form = ufl.inner(ufl.jump(v), t) * ufl.dS else: form = ufl.jump(v) * ufl.dS value = fem.assemble_scalar(form) assert np.isclose(value, 0)
def test_vector_assemble_matrix_interior(): mesh = dolfinx.UnitSquareMesh(MPI.COMM_WORLD, 3, 3) V = dolfinx.VectorFunctionSpace(mesh, ("CG", 1)) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) a = ufl.inner(ufl.jump(u), ufl.jump(v)) * ufl.dS A = dolfinx.fem.assemble_matrix(a) A.assemble()
def test_integral(cell_type, space_type, space_order): if cell_type == "hexahedron" and space_order >= 3: pytest.skip("Skipping expensive test on hexahedron") random.seed(4) for repeat in range(10): mesh = random_evaluation_mesh(cell_type) tdim = mesh.topology.dim V = FunctionSpace(mesh, (space_type, space_order)) Vvec = VectorFunctionSpace(mesh, ("P", 1)) dofs = [i for i in V.dofmap.cell_dofs(0) if i in V.dofmap.cell_dofs(1)] for d in dofs: v = Function(V) v.vector[:] = [ 1 if i == d else 0 for i, _ in enumerate(v.vector[:]) ] if space_type in ["RT", "BDM", "RTCF", "NCF", "BDMCF", "AAF"]: # Hdiv def normal(x): values = np.zeros((tdim, x.shape[1])) values[0] = [1 for i in values[0]] return values n = Function(Vvec) n.interpolate(normal) _form = ufl.inner(ufl.jump(v), n) * ufl.dS elif space_type in [ "N1curl", "N2curl", "RTCE", "NCE", "BDMCE", "AAE" ]: # Hcurl def tangent(x): values = np.zeros((tdim, x.shape[1])) values[1] = [1 for i in values[1]] return values t = Function(Vvec) t.interpolate(tangent) _form = ufl.inner(ufl.jump(v), t) * ufl.dS if tdim == 3: def tangent2(x): values = np.zeros((3, x.shape[1])) values[2] = [1 for i in values[2]] return values t2 = Function(Vvec) t2.interpolate(tangent2) _form += ufl.inner(ufl.jump(v), t2) * ufl.dS else: _form = ufl.jump(v) * ufl.dS value = assemble_scalar(form(_form)) assert np.isclose(value, 0)
def test_integral(cell_type, space_type, space_order): # TODO: Fix jump integrals in FFC by passing in full info for both cells, then re-enable these tests pytest.xfail() random.seed(4) for repeat in range(10): mesh = random_evaluation_mesh(cell_type) tdim = mesh.topology.dim V = FunctionSpace(mesh, (space_type, space_order)) Vvec = VectorFunctionSpace(mesh, ("P", 1)) dofs = [i for i in V.dofmap.cell_dofs(0) if i in V.dofmap.cell_dofs(1)] for d in dofs: v = Function(V) v.vector[:] = [1 if i == d else 0 for i in range(V.dim)] if space_type in ["RT", "BDM", "RTCF", "NCF"]: # Hdiv def normal(x): values = np.zeros((tdim, x.shape[1])) values[0] = [1 for i in values[0]] return values n = Function(Vvec) n.interpolate(normal) form = ufl.inner(ufl.jump(v), n) * ufl.dS elif space_type in ["N1curl", "N2curl", "RTCE", "NCE"]: # Hcurl def tangent(x): values = np.zeros((tdim, x.shape[1])) values[1] = [1 for i in values[1]] return values t = Function(Vvec) t.interpolate(tangent) form = ufl.inner(ufl.jump(v), t) * ufl.dS if tdim == 3: def tangent2(x): values = np.zeros((3, x.shape[1])) values[2] = [1 for i in values[2]] return values t2 = Function(Vvec) t2.interpolate(tangent2) form += ufl.inner(ufl.jump(v), t2) * ufl.dS else: form = ufl.jump(v) * ufl.dS value = fem.assemble_scalar(form) assert np.isclose(value, 0)
def test_interior_facet_integral(mode): cell = ufl.triangle element = ufl.FiniteElement("Lagrange", cell, 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS forms = [a0] compiled_forms, module = ffc.codegeneration.jit.compile_forms( forms, parameters={'scalar_type': mode}) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = cffi.FFI() form0 = compiled_forms[0][0] ids = np.zeros(form0.num_interior_facet_integrals, dtype=np.int32) form0.get_interior_facet_integral_ids(ffi.cast('int *', ids.ctypes.data)) assert ids[0] == -1 ffi = cffi.FFI() c_type, np_type = float_to_type(mode) integral0 = form0.create_interior_facet_integral(-1) A = np.zeros((6, 6), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np.float64) facets = np.array([0, 2], dtype=np.int32) orients = np.array([1, 1], dtype=np.int32) coords = np.array( [[0.0, 0.0, 1.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0, 1.0, 1.0]], dtype=np.float64) integral0.tabulate_tensor(ffi.cast('{} *'.format(c_type), A.ctypes.data), ffi.cast('{} *'.format(c_type), w.ctypes.data), ffi.cast('{} *'.format(c_type), c.ctypes.data), ffi.cast('double *', coords.ctypes.data), ffi.cast('int *', facets.ctypes.data), ffi.cast('int *', orients.ctypes.data)) print(A)
def test_interior_facet_integral(mode, compile_args): cell = ufl.triangle element = ufl.FiniteElement("Lagrange", cell, 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS forms = [a0] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = cffi.FFI() form0 = compiled_forms[0] ffi = cffi.FFI() c_type, np_type = float_to_type(mode) integral0 = form0.integrals(module.lib.interior_facet)[0] A = np.zeros((6, 6), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np.float64) facets = np.array([0, 2], dtype=np.intc) perms = np.array([0, 1], dtype=np.uint8) coords = np.array([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0]], dtype=np.float64) integral0.tabulate_tensor(ffi.cast('{} *'.format(c_type), A.ctypes.data), ffi.cast('{} *'.format(c_type), w.ctypes.data), ffi.cast('{} *'.format(c_type), c.ctypes.data), ffi.cast('double *', coords.ctypes.data), ffi.cast('int *', facets.ctypes.data), ffi.cast('uint8_t *', perms.ctypes.data))
def b(tau_S, v): n = FacetNormal(mesh) return inner(tau_S, grad(grad(v))) * dx \ - ufl.dot(ufl.dot(tau_S('+'), n('+')), n('+')) * jump(grad(v), n) * dS \ - ufl.dot(ufl.dot(tau_S, n), n) * ufl.dot(grad(v), n) * ds
) source = lambda u1, u2, u3, v: -1 / spiral_eps * u1 * (1 - u2) * (u3 - ustar(v) ) xForm = inner(diffusiveFlux(u, grad(u)), grad(phi)) * dx xForm += ufl.conditional(uh_n < ustar(vh_n), source(u, uh_n, uh_n, vh_n), source(uh_n, u, uh_n, vh_n)) * phi * dx # <markdowncell> # To handle the possible non conforming intersection we add DG type # skeleton terms: # <codecell> penalty = 5 * (order * (order + 1)) * spiral_D if nonConforming: xForm -= ( inner( outer(jump(u), n('+')), avg(diffusiveFlux(u,grad(phi)))) +\ inner( avg(diffusiveFlux(u,grad(u))), outer(jump(phi), n('+'))) ) * dS xForm += penalty / hS * inner(jump(u), jump(phi)) * dS # <markdowncell> # After adding the time discretization (a simple backward Euler scheme), # the model is now completely implemented and the scheme can be created: # <codecell> form = (inner(u, phi) - inner(uh_n, phi)) * dx + dt * xForm solverParameters =\ {"newton.tolerance": 1e-8, "newton.linear.tolerance": 1e-12, "newton.linear.preconditioning.method": "amg-ilu", "newton.linear.maxiterations":1000, "newton.verbose": False,
def setup_problem(self, debug=False): # # assemble the matrix, if necessary (once for all time points) # if not hasattr(self, 'A'): drho_integral = self.tdrho * self.wrho * self.dx dU_integral = self.tdU * self.wU * self.dx self.A = fe.assemble(drho_integral + dU_integral) # if self.solver_type == 'lu': # self.solver = fe.LUSolver( # self.A, # method=self.solver_type # ) # self.solver.parameters['reuse_factorization'] = True # else: # self.solver = fe.KrylovSolver( # self.A, # self.solver_type, # self.preconditioner_type # ) self.dsol = Function(self.VS) self.drho, self.dU = self.dsol.sub(0), self.dsol.sub(1) # # assemble RHS (has to be done for each time point) # if not hasattr(self, 'rho_terms'): self.sigma = self.params['sigma'] self.s2 = self.sigma * self.sigma / 2 self.rho_min = self.params['rho_min'] self.rhopen = self.params['rhopen'] self.grhopen = self.params['grhopen'] self.v = -ufl.grad(self.V(self.iU, self.irho)) self.flux = self.v * self.irho self.vn = ufl.max_value(ufl.dot(self.v, self.n), 0) self.facet_flux = (self.vn('+') * self.irho('+') - self.vn('-') * self.irho('-')) self.rho_flux_jump = -self.facet_flux * ufl.jump( self.wrho) * self.dS self.rho_grad_move = ufl.dot(self.flux, ufl.grad( self.wrho)) * self.dx self.rho_penalty = -( (self.rhopen * self.degree**2 / self.havg) * ufl.dot( ufl.jump(self.irho, self.n), ufl.jump(self.wrho, self.n)) * self.dS) # self.facet_flux = ( # self.vn('+')*self.rho('+') - self.vn('-')*self.rho('-') # ) # self.rho_flux_jump = -self.facet_flux*ufl.jump(self.wrho)*self.dS # self.rho_grad_move = ufl.dot(self.flux, ufl.grad(self.wrho))*self.dx self.jump_grhow = ( self.s2 * ufl.jump(self.wrho * ufl.grad(self.irho), self.n) * self.dS) self.rho_diffusion = -self.s2 * ufl.dot(ufl.grad( self.irho), ufl.grad(self.wrho)) * self.dx # self.rho_penalty = -( # (self.rhopen * self.degree**2 / self.havg) * # ufl.dot(ufl.jump(self.rho, self.n), # ufl.jump(self.wrho, self.n)) * self.dS # ) self.grho_penalty = -(self.grhopen * self.degree**2 * (ufl.jump(ufl.grad(self.irho), self.n) * ufl.jump(ufl.grad(self.wrho), self.n)) * self.dS) self.rho_terms = ( # advection terms self.rho_flux_jump + self.rho_grad_move + # diffusive terms self.rho_diffusion + self.jump_grhow + # penalty terms (to enforce continuity) self.rho_penalty + self.grho_penalty) if not hasattr(self, 'U_terms'): self.U_min = self.params['U_min'] self.gamma = self.params['gamma'] self.s = self.params['s'] self.D = self.params['D'] self.Upen = self.params['Upen'] self.gUpen = self.params['gUpen'] self.U_decay = -self.gamma * self.iU * self.wU * self.dx self.U_secretion = self.s * self.irho * self.wU * self.dx self.jump_gUw = (self.D * ufl.jump(self.wU * ufl.grad(self.iU), self.n) * self.dS) self.U_diffusion = -self.D * ufl.dot(ufl.grad(self.iU), ufl.grad(self.wU)) * self.dx self.U_penalty = -( (self.Upen * self.degree**2 / self.havg) * ufl.dot(ufl.jump(self.iU, self.n), ufl.jump(self.wU, self.n)) * self.dS) self.gU_penalty = -(self.gUpen * self.degree**2 * (ufl.jump(ufl.grad(self.iU), self.n) * ufl.jump(ufl.grad(self.wU), self.n)) * self.dS) self.U_terms = ( # decay and secretion self.U_decay + self.U_secretion + # diffusion self.jump_gUw + self.U_diffusion + # penalties (to enforce continuity) self.U_penalty + self.gU_penalty) if not hasattr(self, 'all_terms'): self.all_terms = self.rho_terms + self.U_terms if not hasattr(self, 'all_terms'): self.all_terms = self.rho_terms + self.U_terms if not hasattr(self, 'J_terms'): self.J_terms = fe.derivative(self.all_terms, self.sol)
def setup_problem(self, debug=False): # # assemble the matrix, if necessary (once for all time points) # if not hasattr(self, 'A'): drho_integral = vectotal([ tdrho * wrho * self.dx for tdrho, wrho in zip(self.tdrhos, self.wrhos) ]) dU_integral = vectotal( [tdU * wU * self.dx for tdU, wU in zip(self.tdUs, self.wUs)]) self.A = fe.assemble(drho_integral + dU_integral) for bc in self.bcs: bc.apply(self.A) self.dsol = Function(self.VS) self.drhos = self.dsol.split()[:2**self.dim] self.dUs = self.dsol.split()[2**self.dim:] # # These are the values of rho and U themselves (not their # symmetrized versions) on all subdomains of the original # domain. # if not hasattr(self, 'rhosds'): self.rhosds = matmul(self.eomat, self.irhos) # self.Usds is a list of nligands lists. Sublist i is of # length 2**dim and lists the value of ligand i on each of the # 2**dim subdomains. # if not hasattr(self, 'Usds'): self.Usds = [ matmul(self.eomat, self.iUs[i * 2**self.dim:(i + 1) * 2**self.dim]) for i in range(self.nligands) ] # # assemble RHS (for each time point, but compile only once) # if not hasattr(self, 'rho_terms'): self.sigma = self.params['sigma'] self.s2 = self.sigma * self.sigma / 2 self.rho_min = self.params['rho_min'] self.rhopen = self.params['rhopen'] self.grhopen = self.params['grhopen'] # # Compute fluxes on subdomains. # Vsds is a list of length 2**dim, the value of V on each # subdomain. # self.Vsds = [] for Usd, rhosd in zip(zip(*self.Usds), self.rhosds): self.Vsds.append(self.V(Usd, rhosd)) # # I may need to adjust the signs of the subdomain vs by # the symmetries of the combinations # self.vsds = [ -ufl.grad(Vsd) - (self.s2 * ufl.grad(rhosd) / ufl.max_value(rhosd, self.rho_min)) for Vsd, rhosd in zip(self.Vsds, self.rhosds) ] self.fluxsds = [ vsd * rhosd for vsd, rhosd in zip(self.vsds, self.rhosds) ] self.vnsds = [ ufl.max_value(ufl.dot(vsd, self.n), 0) for vsd in self.vsds ] self.facet_fluxsds = [ (vnsd('+') * ufl.max_value(rhosd('+'), 0.0) - vnsd('-') * ufl.max_value(rhosd('-'), 0.0)) for vnsd, rhosd in zip(self.vnsds, self.rhosds) ] # # Now combine the subdomain fluxes to get the fluxes for # the symmetrized functions # self.fluxs = matmul((2.0**-self.dim) * self.eomat, self.fluxsds) self.facet_fluxs = matmul((2.0**-self.dim) * self.eomat, self.facet_fluxsds) self.rho_flux_jump = vectotal([ -facet_flux * ufl.jump(wrho) * self.dS for facet_flux, wrho in zip(self.facet_fluxs, self.wrhos) ]) self.rho_grad_move = vectotal([ ufl.dot(flux, ufl.grad(wrho)) * self.dx for flux, wrho in zip(self.fluxs, self.wrhos) ]) self.rho_penalty = vectotal([ -(self.rhopen * self.degree**2 / self.havg) * ufl.dot(ufl.jump(rho, self.n), ufl.jump(wrho, self.n)) * self.dS for rho, wrho in zip(self.irhos, self.wrhos) ]) self.grho_penalty = vectotal([ -self.grhopen * self.degree**2 * (ufl.jump(ufl.grad(rho), self.n) * ufl.jump(ufl.grad(wrho), self.n)) * self.dS for rho, wrho in zip(self.irhos, self.wrhos) ]) self.rho_terms = (self.rho_flux_jump + self.rho_grad_move + self.rho_penalty + self.grho_penalty) if not hasattr(self, 'U_terms'): self.U_min = self.params['U_min'] self.Upen = self.params['Upen'] self.gUpen = self.params['gUpen'] self.U_decay = 0.0 self.U_secretion = 0.0 self.jump_gUw = 0.0 self.U_diffusion = 0.0 self.U_penalty = 0.0 self.gU_penalty = 0.0 for j, lig in enumerate(self.ligands.ligands()): sl = slice(j * 2**self.dim, (j + 1) * 2**self.dim) self.U_decay += -lig.gamma * sum([ iUi * wUi * self.dx for iUi, wUi in zip(self.iUs[sl], self.wUs[sl]) ]) self.U_secretion += lig.s * sum([ rho * wU * self.dx for rho, wU in zip(self.irhos, self.wUs[sl]) ]) self.jump_gUw += lig.D * sum([ ufl.jump(wU * ufl.grad(U), self.n) * self.dS for wU, U in zip(self.wUs[sl], self.iUs[sl]) ]) self.U_diffusion += -lig.D * sum([ ufl.dot(ufl.grad(U), ufl.grad(wU)) * self.dx for U, wU in zip(self.iUs[sl], self.wUs[sl]) ]) self.U_penalty += -self.Upen * self.degree**2 * sum([ (1.0 / self.havg) * ufl.dot(ufl.jump(U, self.n), ufl.jump(wU, self.n)) * self.dS for U, wU in zip(self.iUs[sl], self.wUs[sl]) ]) self.gU_penalty += -self.gUpen * self.degree**2 * sum([ ufl.jump(ufl.grad(U), self.n) * ufl.jump(ufl.grad(wU), self.n) * self.dS for U, wU in zip(self.iUs[sl], self.wUs[sl]) ]) self.U_terms = ( # decay and secretion self.U_decay + self.U_secretion + # diffusion self.jump_gUw + self.U_diffusion + # penalties (to enforce continuity) self.U_penalty + self.gU_penalty) if not hasattr(self, 'all_terms'): self.all_terms = self.rho_terms + self.U_terms if not hasattr(self, 'J_terms'): self.J_terms = fe.derivative(self.all_terms, self.sol)
# Error estimator # <codecell> fvspace = dune.fem.space.finiteVolume(uh.space.grid) estimate = fvspace.interpolate([0], name="estimate") estimate_pm1 = fvspace.interpolate([0], name="estimate_pm1") chi = ufl.TestFunction(fvspace) hT = ufl.MaxCellEdgeLength(fvspace.cell()) he = ufl.MaxFacetEdgeLength(fvspace.cell())('+') n = ufl.FacetNormal(fvspace.cell()) residual = (u-uh_n)/dt - div(diffusiveFlux(u,grad(u))) + source(u,u,u,vh) estimator_ufl = hT**2 * residual**2 * chi * dx +\ he * inner( jump(diffusiveFlux(u,grad(u))), n('+'))**2 * avg(chi) * dS +\ 1/he * jump(u)**2 * avg(chi) * dS estimator = dune.fem.operator.galerkin(estimator_ufl) # <markdowncell> # Time loop # <codecell> nextSaveTime = saveInterval count = 0 levelFunction = dune.fem.function.levelFunction(gridView) gridView.writeVTK("spiral", pointdata=[uh,vh], number=count, celldata=[estimate,levelFunction]) count += 1 @gridFunction(gridView,name="pEstimate") def pEstimator(e,x):
def xtest_hexahedron_integral(space_type, space_order): domain = ufl.Mesh(ufl.VectorElement("Lagrange", "hexahedron", 1)) temp_points = np.array([[-1., 0., -1.], [0., 0., 0.], [1., 0., 1.], [-1., 1., 1.], [0., 1., 0.], [1., 1., 1.], [-1., 0., 0.], [0., 0., 1.], [1., 0., 2.], [-1., 1., 2.], [0., 1., 1.], [1., 1., 2.]]) for repeat in range(10): order = [i for i, j in enumerate(temp_points)] shuffle(order) points = np.zeros(temp_points.shape) for i, j in enumerate(order): points[j] = temp_points[i] connections = { 0: [1, 2, 4], 1: [0, 3, 5], 2: [0, 3, 6], 3: [1, 2, 7], 4: [0, 5, 6], 5: [1, 4, 7], 6: [2, 4, 7], 7: [3, 5, 6] } cells = [] for cell in [[0, 1, 3, 4, 6, 7, 9, 10], [1, 2, 4, 5, 7, 8, 10, 11]]: # Randomly number the cell start = choice(range(8)) cell_order = [start] for i in range(3): diff = choice([ i for i in connections[start] if i not in cell_order ]) - cell_order[0] cell_order += [c + diff for c in cell_order] cells.append([order[cell[i]] for i in cell_order]) mesh = create_mesh(MPI.COMM_WORLD, cells, points, domain) V = FunctionSpace(mesh, (space_type, space_order)) Vvec = VectorFunctionSpace(mesh, ("P", 1)) dofs = [i for i in V.dofmap.cell_dofs(0) if i in V.dofmap.cell_dofs(1)] for d in dofs: v = Function(V) v.vector[:] = [ 1 if i == d else 0 for i in range(v.vector.local_size) ] if space_type in ["NCF"]: # Hdiv def normal(x): values = np.zeros((3, x.shape[1])) values[0] = [1 for i in values[0]] return values n = Function(Vvec) n.interpolate(normal) form = ufl.inner(ufl.jump(v), n) * ufl.dS elif space_type in ["NCE"]: # Hcurl def tangent1(x): values = np.zeros((3, x.shape[1])) values[1] = [1 for i in values[1]] return values def tangent2(x): values = np.zeros((3, x.shape[1])) values[2] = [1 for i in values[2]] return values t1 = Function(Vvec) t1.interpolate(tangent1) t2 = Function(Vvec) t2.interpolate(tangent1) form = ufl.inner(ufl.jump(v), t1) * ufl.dS form += ufl.inner(ufl.jump(v), t2) * ufl.dS else: form = ufl.jump(v) * ufl.dS value = fem.assemble_scalar(form) assert np.isclose(value, 0)
1) # No normal displacement for solid on left side bcd3 = DirichletBC(W.sub(0).sub(0), Constant(0.0), boundaries, 3) # No normal displacement for solid on right side bcd4 = DirichletBC(W.sub(0).sub(1), Constant(0.0), boundaries, 4) # No normal displacement for solid on bottom side bcs = BlockDirichletBC([[bcd1, bcd3, bcd4], []]) a = inner(2 * mu_l * strain(u) + lmbda_l * div(u) * I, sym(grad(v))) * dx b = inner(-alpha * p * I, sym(grad(v))) * dx c = rho * alpha * div(u) * q * dx d = (rho * ct * p * q * dx + dt * dot(rho * k / vis * grad(p), grad(q)) * dx - dt * dot(avg_w(rho * k / vis * grad(p), weight_e(k, n)), jump(q, n)) * dS - theta * dt * dot(avg_w(rho * k / vis * grad(q), weight_e(k, n)), jump(p, n)) * dS + dt * penalty1 / h_avg * avg(rho) * k_e(k, n) / avg(vis) * dot(jump(p, n), jump(q, n)) * dS - dt * dot(rho * k / vis * grad(p), q * n) * ds(2) - dt * dot(rho * k / vis * grad(q), p * n) * ds(2) + dt * (penalty2 / h * rho / vis * dot(dot(n, k), n) * dot(p * n, q * n)) * ds(2)) lhs = [[a, b], [c, d]] f_u = (inner(f, v) * dx + dot(f_stress_y * n, v) * ds(2)) f_p = ( rho * alpha * div(u0) * q * dx + rho * ct * p0 * q * dx +
def setup_problem(self, t, debug=False): self.set_time(t) # # assemble the matrix, if necessary (once for all time points) # if not hasattr(self, 'A'): logVARIABLE('making matrix A') self.drho_integral = sum([ tdrho * wrho * self.dx for tdrho, wrho in zip(self.tdrhos, self.wrhos) ]) self.dU_integral = sum( [tdU * wU * self.dx for tdU, wU in zip(self.tdUs, self.wUs)]) logVARIABLE('assembling A') self.A = fe.PETScMatrix() logVARIABLE('self.A', self.A) fe.assemble(self.drho_integral + self.dU_integral, tensor=self.A) logVARIABLE('A assembled. Applying BCs') pA = fe.as_backend_type(self.A).mat() Adiag = pA.getDiagonal() logVARIABLE('Adiag.array', Adiag.array) # self.A = fe.assemble(self.drho_integral + self.dU_integral + # self.dP_integral) for bc in self.bcs: bc.apply(self.A) Adiag = pA.getDiagonal() logVARIABLE('Adiag.array', Adiag.array) self.dsol = Function(self.VS) dsolsplit = self.dsol.split() self.drhos, self.dUs = (dsolsplit[:2**self.dim], dsolsplit[2**self.dim:]) # # assemble RHS (for each time point, but compile only once) # # # These are the values of rho and U themselves (not their # symmetrized versions) on all subdomains of the original # domain. # if not hasattr(self, 'rhosds'): self.rhosds = matmul(self.eomat, self.irhos) # self.Usds is a list of nligands lists. Sublist i is of # length 2**dim and lists the value of ligand i on each of the # 2**dim subdomains. # if not hasattr(self, 'Usds'): self.Usds = [ matmul(self.eomat, self.iUs[i * 2**self.dim:(i + 1) * 2**self.dim]) for i in range(self.nligands) ] if not hasattr(self, 'rho_terms'): logVARIABLE('making rho_terms') self.sigma = self.iparams['sigma'] self.s2 = self.sigma * self.sigma / 2 self.rhomin = self.iparams['rhomin'] self.rhopen = self.iparams['rhopen'] self.grhopen = self.iparams['grhopen'] # # Compute fluxes on subdomains. # Vsds is a list of length 2**dim, the value of V on each # subdomain. # self.Vsds = [] for Usd, rhosd in zip(zip(*self.Usds), self.rhosds): self.Vsds.append(self.V(Usd, ufl.max_value(rhosd, self.rhomin))) self.vsds = [ -ufl.grad(Vsd) - (self.s2 * ufl.grad(rhosd) / ufl.max_value(rhosd, self.rhomin)) for Vsd, rhosd in zip(self.Vsds, self.rhosds) ] self.fluxsds = [ vsd * rhosd for vsd, rhosd in zip(self.vsds, self.rhosds) ] self.vnsds = [ ufl.max_value(ufl.dot(vsd, self.n), 0) for vsd in self.vsds ] self.facet_fluxsds = [ (vnsd('+') * ufl.max_value(rhosd('+'), 0.0) - vnsd('-') * ufl.max_value(rhosd('-'), 0.0)) for vnsd, rhosd in zip(self.vnsds, self.rhosds) ] # # Now combine the subdomain fluxes to get the fluxes for # the symmetrized functions # self.fluxs = matmul((2.0**-self.dim) * self.eomat, self.fluxsds) self.facet_fluxs = matmul((2.0**-self.dim) * self.eomat, self.facet_fluxsds) self.rho_flux_jump = sum([ -facet_flux * ufl.jump(wrho) * self.dS for facet_flux, wrho in zip(self.facet_fluxs, self.wrhos) ]) self.rho_grad_move = sum([ ufl.dot(flux, ufl.grad(wrho)) * self.dx for flux, wrho in zip(self.fluxs, self.wrhos) ]) self.rho_penalty = sum([ -(self.degree**2 / self.havg) * ufl.dot(ufl.jump(rho, self.n), ufl.jump(self.rhopen * wrho, self.n)) * self.dS for rho, wrho in zip(self.irhos, self.wrhos) ]) self.grho_penalty = sum([ self.degree**2 * (ufl.jump(ufl.grad(rho), self.n) * ufl.jump(ufl.grad(-self.grhopen * wrho), self.n)) * self.dS for rho, wrho in zip(self.irhos, self.wrhos) ]) self.rho_terms = (self.rho_flux_jump + self.rho_grad_move + self.rho_penalty + self.grho_penalty) logVARIABLE('rho_terms made') if not hasattr(self, 'U_terms'): logVARIABLE('making U_terms') self.Umin = self.iparams['Umin'] self.Upen = self.iparams['Upen'] self.gUpen = self.iparams['gUpen'] self.U_decay = 0.0 self.U_secretion = 0.0 self.jump_gUw = 0.0 self.U_diffusion = 0.0 self.U_penalty = 0.0 self.gU_penalty = 0.0 for j, lig in enumerate(self.iligands.ligands()): sl = slice(j * 2**self.dim, (j + 1) * 2**self.dim) self.U_decay += sum([ -lig.gamma * iUi * wUi * self.dx for iUi, wUi in zip(self.iUs[sl], self.wUs[sl]) ]) self.U_secretion += sum([ lig.s * rho * wU * self.dx for rho, wU in zip(self.irhos, self.wUs[sl]) ]) self.jump_gUw += sum([ ufl.jump(lig.D * wU * ufl.grad(U), self.n) * self.dS for wU, U in zip(self.wUs[sl], self.iUs[sl]) ]) self.U_diffusion += sum([ -lig.D * ufl.dot(ufl.grad(U), ufl.grad(wU)) * self.dx for U, wU in zip(self.iUs[sl], self.wUs[sl]) ]) self.U_penalty += sum([ (-self.degree**2 / self.havg) * ufl.dot(ufl.jump(U, self.n), ufl.jump(self.Upen * wU, self.n)) * self.dS for U, wU in zip(self.iUs[sl], self.wUs[sl]) ]) self.gU_penalty += sum([ -self.degree**2 * ufl.jump(ufl.grad(U), self.n) * ufl.jump(ufl.grad(self.gUpen * wU), self.n) * self.dS for U, wU in zip(self.iUs[sl], self.wUs[sl]) ]) self.U_terms = ( # decay and secretion self.U_decay + self.U_secretion + # diffusion self.jump_gUw + self.U_diffusion + # penalties (to enforce continuity) self.U_penalty + self.gU_penalty) logVARIABLE('U_terms made') if not hasattr(self, 'all_terms'): logVARIABLE('making all_terms') self.all_terms = self.rho_terms + self.U_terms if not hasattr(self, 'J_terms'): logVARIABLE('making J_terms') self.J_terms = fe.derivative(self.all_terms, self.sol)
def setup_problem(self, t, debug=False): self.set_time(t) # # assemble the matrix, if necessary (once for all time points) # if not hasattr(self, 'A'): self.drho_integral = self.tdrho * self.wrho * self.dx self.dU_integral = sum([ tdUi * wUi * self.dx for tdUi, wUi in zip(self.tdUs, self.wUs) ]) logVARIABLE('assembling A') self.A = PETScMatrix() logVARIABLE('self.A', self.A) fe.assemble(self.drho_integral + self.dU_integral, tensor=self.A) logVARIABLE('A assembled. Applying BCs') self.dsol = Function(self.VS) dsolsplit = self.dsol.split() self.drho, self.dUs = dsolsplit[0], dsolsplit[1:] # # assemble RHS (for each time point, but compile only once) # if not hasattr(self, 'rho_terms'): self.sigma = self.iparams['sigma'] self.s2 = self.sigma * self.sigma / 2 self.rhomin = self.iparams['rhomin'] self.rhopen = self.iparams['rhopen'] self.grhopen = self.iparams['grhopen'] self.v = -ufl.grad( self.V(self.iUs, ufl.max_value(self.irho, self.rhomin)) - (self.s2 * ufl.grad(self.irho) / ufl.max_value(self.irho, self.rhomin))) self.flux = self.v * self.irho self.vn = ufl.max_value(ufl.dot(self.v, self.n), 0) self.facet_flux = ufl.jump(self.vn * ufl.max_value(self.irho, 0.0)) self.rho_flux_jump = -self.facet_flux * ufl.jump( self.wrho) * self.dS self.rho_grad_move = ufl.dot(self.flux, ufl.grad( self.wrho)) * self.dx self.rho_penalty = -( (self.degree**2 / self.havg) * ufl.dot(ufl.jump(self.irho, self.n), ufl.jump(self.rhopen * self.wrho, self.n)) * self.dS) self.grho_penalty = -( self.degree**2 * (ufl.jump(ufl.grad(self.irho), self.n) * ufl.jump( ufl.grad(self.grhopen * self.wrho), self.n)) * self.dS) self.rho_terms = (self.rho_flux_jump + self.rho_grad_move + self.rho_penalty + self.grho_penalty) if not hasattr(self, 'U_terms'): self.Umin = self.iparams['Umin'] self.Upen = self.iparams['Upen'] self.gUpen = self.iparams['gUpen'] self.U_decay = sum([ -lig.gamma * iUi * wUi * self.dx for lig, iUi, wUi in zip( self.iligands.ligands(), self.iUs, self.wUs) ]) self.U_secretion = sum([ lig.s * self.irho * wUi * self.dx for lig, wUi in zip(self.iligands.ligands(), self.wUs) ]) self.jump_gUw = sum([ ufl.jump(lig.D * wUi * ufl.grad(iUi), self.n) * self.dS for lig, wUi, iUi in zip(self.iligands.ligands(), self.wUs, self.iUs) ]) self.U_diffusion = sum([ -lig.D * ufl.dot(ufl.grad(iUi), ufl.grad(wUi)) * self.dx for lig, iUi, wUi in zip(self.iligands.ligands(), self.iUs, self.wUs) ]) self.U_penalty = sum([ -(self.degree**2 / self.havg) * ufl.dot( ufl.jump(iUi, self.n), ufl.jump(self.Upen * wUi, self.n)) * self.dS for iUi, wUi in zip(self.iUs, self.wUs) ]) self.gU_penalty = sum([ -self.degree**2 * ufl.jump(ufl.grad(iUi), self.n) * ufl.jump(ufl.grad(self.gUpen * wUi), self.n) * self.dS for iUi, wUi in zip(self.iUs, self.wUs) ]) self.U_terms = ( # decay and secretion self.U_decay + self.U_secretion + # diffusion self.jump_gUw + self.U_diffusion + # penalties (to enforce continuity) self.U_penalty + self.gU_penalty) if not hasattr(self, 'all_terms'): self.all_terms = self.rho_terms + self.U_terms if not hasattr(self, 'J_terms'): self.J_terms = fe.derivative(self.all_terms, self.sol)
def model(space, epsilon, weakBnd, skeleton, useMol): u = TrialFunction(space) v = TestFunction(space) n = FacetNormal(space) he = avg(CellVolume(space)) / FacetArea(space) hbnd = CellVolume(space) / FacetArea(space) x = SpatialCoordinate(space) #exact = sin(x[0]*x[1]) # atan(1*x[1]) exact = uflFunction(space.gridView, name="exact", order=3, ufl=sin(x[0] * x[1])) # diffusion factor eps = 1 # Constant(epsilon,"eps") # transport direction and upwind flux b = as_vector([1, 0]) hatb = (dot(b, n) + abs(dot(b, n))) / 2.0 # characteristic function for left/right boundary dD = conditional((1 + x[0]) * (1 - x[0]) < 1e-10, 1, 0) # penalty parameter beta = Constant(10 * space.order**2 if space.order > 0 else 1, "beta") rhs = (-div(eps * grad(exact) - b * exact) + exact) * v * dx aInternal = (dot(eps * grad(u) - b * u, grad(v)) + dot(u, v)) * dx diffSkeleton = eps*beta/he*jump(u)*jump(v)*dS -\ eps*dot(avg(grad(u)),n('+'))*jump(v)*dS -\ eps*jump(u)*dot(avg(grad(v)),n('+'))*dS diffSkeleton -= eps * dot(grad(exact), n) * v * (1 - dD) * ds if weakBnd: diffSkeleton += eps*beta/hbnd*(u-exact)*v*dD*ds -\ eps*dot(grad(exact),n)*v*dD*ds advSkeleton = jump(hatb * u) * jump(v) * dS if weakBnd: advSkeleton += (hatb * u + (dot(b, n) - hatb) * exact) * v * dD * ds if skeleton: form = aInternal + diffSkeleton + advSkeleton else: form = aInternal if weakBnd and skeleton: strongBC = None else: strongBC = None # DirichletBC(space,exact,dD) if space.storage[0] == "fem": solver = {"solver": ("suitesparse", "umfpack")} else: solver = { "solver": "bicgstab", "parameters": { "newton.linear.preconditioning.method": "jacobi", "newton.linear.tolerance": 1e-13 } } if useMol: scheme = solutionMolScheme([form == rhs, strongBC], **solver) else: scheme = solutionScheme([form == rhs, strongBC], **solver) uh = space.interpolate(exact, name="solution") A = linear(scheme) return scheme, uh, A, exact
hbnd = CellVolume(space) / FacetArea(space) x = SpatialCoordinate(space) # diffusion factor eps = Constant(0.1,"eps") # transport direction and upwind flux b = as_vector([1,0]) hatb = (dot(b, n) + abs(dot(b, n)))/2.0 # boundary values (for left/right boundary) dD = conditional((1+x[0])*(1-x[0])<1e-10,1,0) g = conditional(x[0]<0,atan(10*x[1]),0) # penalty parameter beta = 10*order*order aInternal = dot(eps*grad(u) - b*u, grad(v)) * dx diffSkeleton = eps*beta/he*jump(u)*jump(v)*dS -\ eps*dot(avg(grad(u)),n('+'))*jump(v)*dS -\ eps*jump(u)*dot(avg(grad(v)),n('+'))*dS diffSkeleton += eps*beta/hbnd*(u-g)*v*dD*ds -\ eps*dot(grad(u),n)*v*dD*ds advSkeleton = jump(hatb*u)*jump(v)*dS advSkeleton += ( hatb*u + (dot(b,n)-hatb)*g )*v*dD*ds form = aInternal + diffSkeleton + advSkeleton scheme = solutionScheme(form==0, solver="gmres", parameters={"newton.linear.preconditioning.method":"jacobi"}) uh = space.interpolate(0, name="solution") scheme.solve(target=uh) uh.plot() # <markdowncell>
def run_dg_test(mesh, V, degree): """ Manufactured Poisson problem, solving u = x[component]**n, where n is the degree of the Lagrange function space. """ u, v = TrialFunction(V), TestFunction(V) # Exact solution x = SpatialCoordinate(mesh) u_exact = x[1]**degree # Coefficient k = Function(V) k.vector.set(2.0) k.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # Source term f = -div(k * grad(u_exact)) # Mesh normals and element size n = FacetNormal(mesh) h = CellDiameter(mesh) h_avg = (h("+") + h("-")) / 2.0 # Penalty parameter alpha = 32 dx_ = dx(metadata={"quadrature_degree": -1}) ds_ = ds(metadata={"quadrature_degree": -1}) dS_ = dS(metadata={"quadrature_degree": -1}) with common.Timer("Compile forms"): a = inner(k * grad(u), grad(v)) * dx_ \ - k("+") * inner(avg(grad(u)), jump(v, n)) * dS_ \ - k("+") * inner(jump(u, n), avg(grad(v))) * dS_ \ + k("+") * (alpha / h_avg) * inner(jump(u, n), jump(v, n)) * dS_ \ - inner(k * grad(u), v * n) * ds_ \ - inner(u * n, k * grad(v)) * ds_ \ + (alpha / h) * inner(k * u, v) * ds_ L = inner(f, v) * dx_ - inner(k * u_exact * n, grad(v)) * ds_ \ + (alpha / h) * inner(k * u_exact, v) * ds_ for integral in a.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( a) for integral in L.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( L) with common.Timer("Assemble vector"): b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) with common.Timer("Assemble matrix"): A = assemble_matrix(a, []) A.assemble() with common.Timer("Solve"): # Create LU linear solver solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) with common.Timer("Error functional compile"): # Calculate error M = (u_exact - uh)**2 * dx M = fem.Form(M) with common.Timer("Error assembly"): error = mesh.mpi_comm().allreduce(assemble_scalar(M), op=MPI.SUM) common.list_timings(MPI.COMM_WORLD, [common.TimingType.wall]) assert np.absolute(error) < 1.0e-14
def test_manufactured_poisson_dg(degree, filename, datadir): """ Manufactured Poisson problem, solving u = x[component]**n, where n is the degree of the Lagrange function space. """ with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, filename), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: mesh = xdmf.read_mesh(name="Grid") V = FunctionSpace(mesh, ("DG", degree)) u, v = TrialFunction(V), TestFunction(V) # Exact solution x = SpatialCoordinate(mesh) u_exact = x[1]**degree # Coefficient k = Function(V) k.vector.set(2.0) k.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # Source term f = -div(k * grad(u_exact)) # Mesh normals and element size n = FacetNormal(mesh) h = CellDiameter(mesh) h_avg = (h("+") + h("-")) / 2.0 # Penalty parameter alpha = 32 dx_ = dx(metadata={"quadrature_degree": -1}) ds_ = ds(metadata={"quadrature_degree": -1}) dS_ = dS(metadata={"quadrature_degree": -1}) a = inner(k * grad(u), grad(v)) * dx_ \ - k("+") * inner(avg(grad(u)), jump(v, n)) * dS_ \ - k("+") * inner(jump(u, n), avg(grad(v))) * dS_ \ + k("+") * (alpha / h_avg) * inner(jump(u, n), jump(v, n)) * dS_ \ - inner(k * grad(u), v * n) * ds_ \ - inner(u * n, k * grad(v)) * ds_ \ + (alpha / h) * inner(k * u, v) * ds_ L = inner(f, v) * dx_ - inner(k * u_exact * n, grad(v)) * ds_ \ + (alpha / h) * inner(k * u_exact, v) * ds_ for integral in a.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( a) for integral in L.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( L) b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) A = assemble_matrix(a, []) A.assemble() # Create LU linear solver solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) error = mesh.mpi_comm().allreduce(assemble_scalar((u_exact - uh)**2 * dx), op=MPI.SUM) assert np.absolute(error) < 1.0e-14
def compute(space, epsilon, weakBnd, skeleton, mol=None): u = TrialFunction(space) v = TestFunction(space) n = FacetNormal(space) he = avg(CellVolume(space)) / FacetArea(space) hbnd = CellVolume(space) / FacetArea(space) x = SpatialCoordinate(space) exact = uflFunction(space.gridView, name="exact", order=3, ufl=sin(x[0] * x[1])) uh = space.interpolate(exact, name="solution") # diffusion factor eps = Constant(epsilon, "eps") # transport direction and upwind flux b = as_vector([1, 0]) hatb = (dot(b, n) + abs(dot(b, n))) / 2.0 # characteristic function for left/right boundary dD = conditional((1 + x[0]) * (1 - x[0]) < 1e-10, 1, 0) # penalty parameter beta = Constant(20 * space.order**2, "beta") rhs = -(div(eps * grad(exact) - b * exact)) * v * dx aInternal = dot(eps * grad(u) - b * u, grad(v)) * dx aInternal -= eps * dot(grad(exact), n) * v * (1 - dD) * ds diffSkeleton = eps*beta/he*jump(u)*jump(v)*dS -\ eps*dot(avg(grad(u)),n('+'))*jump(v)*dS -\ eps*jump(u)*dot(avg(grad(v)),n('+'))*dS if weakBnd: diffSkeleton += eps*beta/hbnd*(u-exact)*v*dD*ds -\ eps*dot(grad(exact),n)*v*dD*ds advSkeleton = jump(hatb * u) * jump(v) * dS if weakBnd: advSkeleton += (hatb * u + (dot(b, n) - hatb) * exact) * v * dD * ds if skeleton: form = aInternal + diffSkeleton + advSkeleton else: form = aInternal if weakBnd and skeleton: strongBC = None else: strongBC = DirichletBC(space, exact, dD) if space.storage[0] == "numpy": solver = { "solver": ("suitesparse", "umfpack"), "parameters": { "newton.verbose": True, "newton.linear.verbose": False, "newton.linear.tolerance": 1e-5, } } else: solver = { "solver": "bicgstab", "parameters": { "newton.linear.preconditioning.method": "ilu", "newton.linear.tolerance": 1e-13, "newton.verbose": True, "newton.linear.verbose": False } } if mol == 'mol': scheme = molSolutionScheme([form == rhs, strongBC], **solver) else: scheme = solutionScheme([form == rhs, strongBC], **solver) eoc = [] info = scheme.solve(target=uh) error = dot(uh - exact, uh - exact) error0 = math.sqrt(integrate(gridView, error, order=5)) print(error0, " # output", flush=True) for i in range(3): gridView.hierarchicalGrid.globalRefine(1) uh.interpolate(exact) scheme.solve(target=uh) error = dot(uh - exact, uh - exact) error1 = math.sqrt(integrate(gridView, error, order=5)) eoc += [math.log(error1 / error0) / math.log(0.5)] print(i, error0, error1, eoc, " # output", flush=True) error0 = error1 # print(space.order,epsilon,eoc) if (eoc[-1] - (space.order + 1)) < -0.1: print("ERROR:", space.order, epsilon, eoc) return eoc
def initialize(self, pc): """Set up the problem context. Take the original mixed problem and reformulate the problem as a hybridized mixed system. A KSP is created for the Lagrange multiplier system. """ from firedrake import (FunctionSpace, Function, Constant, TrialFunction, TrialFunctions, TestFunction, DirichletBC) from firedrake.assemble import (allocate_matrix, create_assembly_callable) from firedrake.formmanipulation import split_form from ufl.algorithms.replace import replace # Extract the problem context prefix = pc.getOptionsPrefix() + "hybridization_" _, P = pc.getOperators() self.ctx = P.getPythonContext() if not isinstance(self.ctx, ImplicitMatrixContext): raise ValueError( "The python context must be an ImplicitMatrixContext") test, trial = self.ctx.a.arguments() V = test.function_space() mesh = V.mesh() if len(V) != 2: raise ValueError("Expecting two function spaces.") if all(Vi.ufl_element().value_shape() for Vi in V): raise ValueError("Expecting an H(div) x L2 pair of spaces.") # Automagically determine which spaces are vector and scalar for i, Vi in enumerate(V): if Vi.ufl_element().sobolev_space().name == "HDiv": self.vidx = i else: assert Vi.ufl_element().sobolev_space().name == "L2" self.pidx = i # Create the space of approximate traces. W = V[self.vidx] if W.ufl_element().family() == "Brezzi-Douglas-Marini": tdegree = W.ufl_element().degree() else: try: # If we have a tensor product element h_deg, v_deg = W.ufl_element().degree() tdegree = (h_deg - 1, v_deg - 1) except TypeError: tdegree = W.ufl_element().degree() - 1 TraceSpace = FunctionSpace(mesh, "HDiv Trace", tdegree) # Break the function spaces and define fully discontinuous spaces broken_elements = ufl.MixedElement( [ufl.BrokenElement(Vi.ufl_element()) for Vi in V]) V_d = FunctionSpace(mesh, broken_elements) # Set up the functions for the original, hybridized # and schur complement systems self.broken_solution = Function(V_d) self.broken_residual = Function(V_d) self.trace_solution = Function(TraceSpace) self.unbroken_solution = Function(V) self.unbroken_residual = Function(V) shapes = (V[self.vidx].finat_element.space_dimension(), np.prod(V[self.vidx].shape)) domain = "{[i,j]: 0 <= i < %d and 0 <= j < %d}" % shapes instructions = """ for i, j w[i,j] = w[i,j] + 1 end """ self.weight = Function(V[self.vidx]) par_loop((domain, instructions), ufl.dx, {"w": (self.weight, INC)}, is_loopy_kernel=True) instructions = """ for i, j vec_out[i,j] = vec_out[i,j] + vec_in[i,j]/w[i,j] end """ self.average_kernel = (domain, instructions) # Create the symbolic Schur-reduction: # Original mixed operator replaced with "broken" # arguments arg_map = {test: TestFunction(V_d), trial: TrialFunction(V_d)} Atilde = Tensor(replace(self.ctx.a, arg_map)) gammar = TestFunction(TraceSpace) n = ufl.FacetNormal(mesh) sigma = TrialFunctions(V_d)[self.vidx] if mesh.cell_set._extruded: Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_h + gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_v) else: Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS) # Here we deal with boundaries. If there are Neumann # conditions (which should be enforced strongly for # H(div)xL^2) then we need to add jump terms on the exterior # facets. If there are Dirichlet conditions (which should be # enforced weakly) then we need to zero out the trace # variables there as they are not active (otherwise the hybrid # problem is not well-posed). # If boundary conditions are contained in the ImplicitMatrixContext: if self.ctx.row_bcs: # Find all the subdomains with neumann BCS # These are Dirichlet BCs on the vidx space neumann_subdomains = set() for bc in self.ctx.row_bcs: if bc.function_space().index == self.pidx: raise NotImplementedError( "Dirichlet conditions for scalar variable not supported. Use a weak bc" ) if bc.function_space().index != self.vidx: raise NotImplementedError( "Dirichlet bc set on unsupported space.") # append the set of sub domains subdom = bc.sub_domain if isinstance(subdom, str): neumann_subdomains |= set([subdom]) else: neumann_subdomains |= set( as_tuple(subdom, numbers.Integral)) # separate out the top and bottom bcs extruded_neumann_subdomains = neumann_subdomains & { "top", "bottom" } neumann_subdomains = neumann_subdomains - extruded_neumann_subdomains integrand = gammar * ufl.dot(sigma, n) measures = [] trace_subdomains = [] if mesh.cell_set._extruded: ds = ufl.ds_v for subdomain in sorted(extruded_neumann_subdomains): measures.append({ "top": ufl.ds_t, "bottom": ufl.ds_b }[subdomain]) trace_subdomains.extend( sorted({"top", "bottom"} - extruded_neumann_subdomains)) else: ds = ufl.ds if "on_boundary" in neumann_subdomains: measures.append(ds) else: measures.extend((ds(sd) for sd in sorted(neumann_subdomains))) markers = [int(x) for x in mesh.exterior_facets.unique_markers] dirichlet_subdomains = set(markers) - neumann_subdomains trace_subdomains.extend(sorted(dirichlet_subdomains)) for measure in measures: Kform += integrand * measure trace_bcs = [ DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains ] else: # No bcs were provided, we assume weak Dirichlet conditions. # We zero out the contribution of the trace variables on # the exterior boundary. Extruded cells will have both # horizontal and vertical facets trace_subdomains = ["on_boundary"] if mesh.cell_set._extruded: trace_subdomains.extend(["bottom", "top"]) trace_bcs = [ DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains ] # Make a SLATE tensor from Kform K = Tensor(Kform) # Assemble the Schur complement operator and right-hand side self.schur_rhs = Function(TraceSpace) self._assemble_Srhs = create_assembly_callable( K * Atilde.inv * AssembledVector(self.broken_residual), tensor=self.schur_rhs, form_compiler_parameters=self.ctx.fc_params) mat_type = PETSc.Options().getString(prefix + "mat_type", "aij") schur_comp = K * Atilde.inv * K.T self.S = allocate_matrix(schur_comp, bcs=trace_bcs, form_compiler_parameters=self.ctx.fc_params, mat_type=mat_type, options_prefix=prefix) self._assemble_S = create_assembly_callable( schur_comp, tensor=self.S, bcs=trace_bcs, form_compiler_parameters=self.ctx.fc_params, mat_type=mat_type) with timed_region("HybridOperatorAssembly"): self._assemble_S() Smat = self.S.petscmat nullspace = self.ctx.appctx.get("trace_nullspace", None) if nullspace is not None: nsp = nullspace(TraceSpace) Smat.setNullSpace(nsp.nullspace(comm=pc.comm)) # Set up the KSP for the system of Lagrange multipliers trace_ksp = PETSc.KSP().create(comm=pc.comm) trace_ksp.setOptionsPrefix(prefix) trace_ksp.setOperators(Smat) trace_ksp.setUp() trace_ksp.setFromOptions() self.trace_ksp = trace_ksp split_mixed_op = dict(split_form(Atilde.form)) split_trace_op = dict(split_form(K.form)) # Generate reconstruction calls self._reconstruction_calls(split_mixed_op, split_trace_op)
# <markdowncell> # Error estimator # <codecell> fvspace = dune.fem.space.finiteVolume(uh.space.grid) estimate = fvspace.interpolate([0], name="estimate") chi = ufl.TestFunction(fvspace) hT = ufl.MaxCellEdgeLength(fvspace.cell()) he = ufl.MaxFacetEdgeLength(fvspace.cell())('+') n = ufl.FacetNormal(fvspace.cell()) residual = (u - uh_n) / dt - div(diffusiveFlux) + source(u, u, u, vh) estimator_ufl = hT**2 * residual**2 * chi * dx +\ he * inner( jump(diffusiveFlux), n('+'))**2 * avg(chi) * dS estimator = dune.fem.operator.galerkin(estimator_ufl) # <markdowncell> # Time loop # <codecell> nextSaveTime = saveInterval count = 0 levelFunction = dune.fem.function.levelFunction(gridView) gridView.writeVTK("spiral", pointdata=[uh, vh], number=count, celldata=[estimate, levelFunction]) count += 1
def setup_problem(self, debug=False): # # assemble the matrix, if necessary (once for all time points) # if not hasattr(self, 'A'): self.drho_integral = self.tdrho * self.wrho * self.dx self.dU_integral = self.tdU * self.wU * self.dx self.A = fe.assemble(self.drho_integral + self.dU_integral) # if self.solver_type == 'lu': # self.solver = fe.LUSolver( # self.A, # ) # self.solver.parameters['reuse_factorization'] = True # else: # self.solver = fe.KrylovSolver( # self.A, # self.solver_type, # self.preconditioner_type # ) # self.solver.parameters.add('linear_solver', self.solver_type) # kparams = fe.Parameters('krylov_solver') # kparams.add('report', True) # kparams.add('nonzero_initial_guess', True) # self.solver.parameters.add(kparams) # lparams = fe.Parameters('lu_solver') # lparams.add('report', True) # lparams.add('reuse_factorization', True) # lparams.add('verbose', True) # self.solver.parameters.add(lparams) self.dsol = Function(self.VS) self.drho, self.dU = self.dsol.sub(0), self.dsol.sub(1) # # assemble RHS (for each time point, but compile only once) # if not hasattr(self, 'rho_terms'): self.sigma = self.params['sigma'] self.s2 = self.sigma * self.sigma / 2 self.rho_min = self.params['rho_min'] self.rhopen = self.params['rhopen'] self.grhopen = self.params['grhopen'] self.v = -ufl.grad(self.V(self.iU, self.irho)) - ( self.s2 * ufl.grad(self.irho) / ufl.max_value(self.irho, self.rho_min)) self.flux = self.v * self.irho self.vn = ufl.max_value(ufl.dot(self.v, self.n), 0) self.facet_flux = ( self.vn('+') * ufl.max_value(self.irho('+'), 0.0) - self.vn('-') * ufl.max_value(self.irho('-'), 0.0)) self.rho_flux_jump = -self.facet_flux * ufl.jump( self.wrho) * self.dS self.rho_grad_move = ufl.dot(self.flux, ufl.grad( self.wrho)) * self.dx self.rho_penalty = -( (self.rhopen * self.degree**2 / self.havg) * ufl.dot( ufl.jump(self.irho, self.n), ufl.jump(self.wrho, self.n)) * self.dS) self.grho_penalty = -(self.grhopen * self.degree**2 * (ufl.jump(ufl.grad(self.irho), self.n) * ufl.jump(ufl.grad(self.wrho), self.n)) * self.dS) self.rho_terms = (self.rho_flux_jump + self.rho_grad_move + self.rho_penalty + self.grho_penalty) if not hasattr(self, 'U_terms'): self.U_min = self.params['U_min'] self.gamma = self.params['gamma'] self.s = self.params['s'] self.D = self.params['D'] self.Upen = self.params['Upen'] self.gUpen = self.params['gUpen'] self.U_decay = -self.gamma * self.iU * self.wU * self.dx self.U_secretion = self.s * self.irho * self.wU * self.dx self.jump_gUw = (self.D * ufl.jump(self.wU * ufl.grad(self.iU), self.n) * self.dS) self.U_diffusion = -self.D * ufl.dot(ufl.grad(self.iU), ufl.grad(self.wU)) * self.dx self.U_penalty = -( (self.Upen * self.degree**2 / self.havg) * ufl.dot(ufl.jump(self.iU, self.n), ufl.jump(self.wU, self.n)) * self.dS) self.gU_penalty = -(self.gUpen * self.degree**2 * (ufl.jump(ufl.grad(self.iU), self.n) * ufl.jump(ufl.grad(self.wU), self.n)) * self.dS) self.U_terms = ( # decay and secretion self.U_decay + self.U_secretion + # diffusion self.jump_gUw + self.U_diffusion + # penalties (to enforce continuity) self.U_penalty + self.gU_penalty) if not hasattr(self, 'all_terms'): self.all_terms = self.rho_terms + self.U_terms if not hasattr(self, 'J_terms'): self.J_terms = fe.derivative(self.all_terms, self.sol)
# The bilinear form a(v, u) and linear form L(v) for # Poisson's equation in a discontinuous Galerkin (DG) # formulation. from ufl import (Coefficient, Constant, FacetNormal, FiniteElement, TestFunction, TrialFunction, avg, dot, dS, ds, dx, grad, inner, jump, triangle) element = FiniteElement("Discontinuous Lagrange", triangle, 1) v = TestFunction(element) u = TrialFunction(element) f = Coefficient(element) n = FacetNormal(triangle) h = Constant(triangle) gN = Coefficient(element) alpha = 4.0 gamma = 8.0 a = inner(grad(v), grad(u)) * dx \ - inner(avg(grad(v)), jump(u, n)) * dS \ - inner(jump(v, n), avg(grad(u))) * dS \ + alpha / h('+') * dot(jump(v, n), jump(u, n)) * dS \ - inner(grad(v), u * n) * ds \ - inner(v * n, grad(u)) * ds \ + gamma / h * v * u * ds L = v * f * dx + v * gN * ds
def setup_problem(self, debug=False): # # assemble the matrix, if necessary (once for all time points) # if not hasattr(self, 'A'): drho_integral = vectotal( [tdrho*wrho*self.dx for tdrho,wrho in zip(self.tdrhos, self.wrhos)] ) dU_integral = vectotal( [tdU*wU*self.dx for tdU,wU in zip(self.tdUs, self.wUs) ] ) self.A = fe.assemble(drho_integral + dU_integral) for bc in self.bcs: bc.apply(self.A) # if self.solver_type == 'lu': # self.solver = fe.LUSolver( # self.A, # ) # self.solver.parameters['reuse_factorization'] = True # else: # self.solver = fe.KrylovSolver( # self.A, # self.solver_type, # self.preconditioner_type # ) self.dsol = Function(self.VS) self.drhos = self.dsol.split()[: 2**self.dim] self.dUs = self.dsol.split()[2**self.dim :] # # These are the values of rho and U themselves (not their # symmetrized versions) on all subdomains of the original # domain. # if not hasattr(self, 'rhosds'): self.rhosds = matmul(self.eomat, self.irhos) if not hasattr(self, 'Usds'): self.Usds = matmul(self.eomat, self.iUs) # # assemble RHS (for each time point, but compile only once) # if not hasattr(self, 'rho_terms'): self.sigma = self.params['sigma'] self.s2 = self.sigma * self.sigma / 2 self.rho_min = self.params['rho_min'] self.rhopen = self.params['rhopen'] self.grhopen = self.params['grhopen'] # # Compute fluxes on subdomains. # self.Vsds = [self.V(Usd, rhosd) for Usd,rhosd in zip(self.Usds, self.rhosds)] # # I may need to adjust the signs of the subdomain vs by # the symmetries of the combinations # self.vsds = [-ufl.grad(Vsd) - ( self.s2*ufl.grad(rhosd)/ufl.max_value(rhosd, self.rho_min) ) for Vsd,rhosd in zip(self.Vsds, self.rhosds)] self.fluxsds = [vsd * rhosd for vsd,rhosd in zip(self.vsds, self.rhosds)] self.vnsds = [ufl.max_value(ufl.dot(vsd, self.n), 0) for vsd in self.vsds] self.facet_fluxsds = [( vnsd('+')*ufl.max_value(rhosd('+'), 0.0) - vnsd('-')*ufl.max_value(rhosd('-'), 0.0) ) for vnsd,rhosd in zip(self.vnsds, self.rhosds)] # # Now combine the subdomain fluxes to get the fluxes for # the symmetrized functions # self.fluxs = matmul((2.0**-self.dim)*self.eomat, self.fluxsds) self.facet_fluxs = matmul((2.0**-self.dim)*self.eomat, self.facet_fluxsds) self.rho_flux_jump = vectotal( [-facet_flux*ufl.jump(wrho)*self.dS for facet_flux,wrho in zip(self.facet_fluxs, self.wrhos)] ) self.rho_grad_move = vectotal( [ufl.dot(flux, ufl.grad(wrho))*self.dx for flux,wrho in zip(self.fluxs, self.wrhos)] ) self.rho_penalty = vectotal( [-(self.rhopen * self.degree**2 / self.havg) * ufl.dot(ufl.jump(rho, self.n), ufl.jump(wrho, self.n)) * self.dS for rho,wrho in zip(self.irhos, self.wrhos)] ) self.grho_penalty = vectotal( [-self.grhopen * self.degree**2 * (ufl.jump(ufl.grad(rho), self.n) * ufl.jump(ufl.grad(wrho), self.n)) * self.dS for rho,wrho in zip(self.irhos, self.wrhos)] ) self.rho_terms = ( self.rho_flux_jump + self.rho_grad_move + self.rho_penalty + self.grho_penalty ) if not hasattr(self, 'U_terms'): self.U_min = self.params['U_min'] self.gamma = self.params['gamma'] self.s = self.params['s'] self.D = self.params['D'] self.Upen = self.params['Upen'] self.gUpen = self.params['gUpen'] self.U_decay = vectotal( [-self.gamma * U * wU * self.dx for U,wU in zip(self.iUs, self.wUs)] ) self.U_secretion = vectotal( [self.s * rho * wU * self.dx for rho, wU in zip(self.irhos, self.wUs)] ) self.jump_gUw = vectotal( [self.D * ufl.jump(wU * ufl.grad(U), self.n) * self.dS for wU, U in zip(self.wUs, self.iUs) ] ) self.U_diffusion = vectotal( [-self.D * ufl.dot(ufl.grad(U), ufl.grad(wU))*self.dx for U,wU in zip(self.iUs, self.wUs) ] ) self.U_penalty = vectotal( [-(self.Upen * self.degree**2 / self.havg) * ufl.dot(ufl.jump(U, self.n), ufl.jump(wU, self.n))*self.dS for U,wU in zip(self.iUs, self.wUs) ] ) self.gU_penalty = vectotal( [-self.gUpen * self.degree**2 * ufl.jump(ufl.grad(U), self.n) * ufl.jump(ufl.grad(wU), self.n) * self.dS for U,wU in zip(self.iUs, self.wUs) ] ) self.U_terms = ( # decay and secretion self.U_decay + self.U_secretion + # diffusion self.jump_gUw + self.U_diffusion + # penalties (to enforce continuity) self.U_penalty + self.gU_penalty ) if not hasattr(self, 'all_terms'): self.all_terms = self.rho_terms + self.U_terms if not hasattr(self, 'J_terms'): self.J_terms = fe.derivative(self.all_terms, self.sol)
CC = FF.T * FF Fv = variable(FF) S = diff(freeEnergy(Fv.T * Fv, CCv), Fv) # first PK stress dl_interp(CC, C) dl_interp(CC, Cn) my_identity = grad(SpatialCoordinate(mesh)) dl_interp(my_identity, CCv) dl_interp(my_identity, Cvn) dl_interp(my_identity, C_quart) dl_interp(my_identity, C_thr_quart) dl_interp(my_identity, C_half) a_uv = (derivative(freeEnergy(CC, CCv), u, v) * dx + qvals / h_avg * dot(jump(u), jump(v)) * dS) jac = derivative(a_uv, u, du) # assign DirichletBC left_facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, left) right_facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, right) bottom_facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, bottom) back_facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, back) left_dofs = fem.locate_dofs_topological(V.sub(0), mesh.topology.dim - 1, left_facets) right_dofs = fem.locate_dofs_topological(V.sub(0), mesh.topology.dim - 1, right_facets) back_dofs = fem.locate_dofs_topological(V.sub(2), mesh.topology.dim - 1, back_facets) bottom_dofs = fem.locate_dofs_topological(V.sub(1), mesh.topology.dim - 1,
def initialize(self, pc): """Set up the problem context. Take the original mixed problem and reformulate the problem as a hybridized mixed system. A KSP is created for the Lagrange multiplier system. """ from firedrake import (FunctionSpace, Function, Constant, TrialFunction, TrialFunctions, TestFunction, DirichletBC) from firedrake.assemble import (allocate_matrix, create_assembly_callable) from firedrake.formmanipulation import split_form from ufl.algorithms.replace import replace # Extract the problem context prefix = pc.getOptionsPrefix() + "hybridization_" _, P = pc.getOperators() self.ctx = P.getPythonContext() if not isinstance(self.ctx, ImplicitMatrixContext): raise ValueError("The python context must be an ImplicitMatrixContext") test, trial = self.ctx.a.arguments() V = test.function_space() mesh = V.mesh() if len(V) != 2: raise ValueError("Expecting two function spaces.") if all(Vi.ufl_element().value_shape() for Vi in V): raise ValueError("Expecting an H(div) x L2 pair of spaces.") # Automagically determine which spaces are vector and scalar for i, Vi in enumerate(V): if Vi.ufl_element().sobolev_space().name == "HDiv": self.vidx = i else: assert Vi.ufl_element().sobolev_space().name == "L2" self.pidx = i # Create the space of approximate traces. W = V[self.vidx] if W.ufl_element().family() == "Brezzi-Douglas-Marini": tdegree = W.ufl_element().degree() else: try: # If we have a tensor product element h_deg, v_deg = W.ufl_element().degree() tdegree = (h_deg - 1, v_deg - 1) except TypeError: tdegree = W.ufl_element().degree() - 1 TraceSpace = FunctionSpace(mesh, "HDiv Trace", tdegree) # Break the function spaces and define fully discontinuous spaces broken_elements = ufl.MixedElement([ufl.BrokenElement(Vi.ufl_element()) for Vi in V]) V_d = FunctionSpace(mesh, broken_elements) # Set up the functions for the original, hybridized # and schur complement systems self.broken_solution = Function(V_d) self.broken_residual = Function(V_d) self.trace_solution = Function(TraceSpace) self.unbroken_solution = Function(V) self.unbroken_residual = Function(V) shapes = (V[self.vidx].finat_element.space_dimension(), np.prod(V[self.vidx].shape)) domain = "{[i,j]: 0 <= i < %d and 0 <= j < %d}" % shapes instructions = """ for i, j w[i,j] = w[i,j] + 1 end """ self.weight = Function(V[self.vidx]) par_loop((domain, instructions), ufl.dx, {"w": (self.weight, INC)}, is_loopy_kernel=True) instructions = """ for i, j vec_out[i,j] = vec_out[i,j] + vec_in[i,j]/w[i,j] end """ self.average_kernel = (domain, instructions) # Create the symbolic Schur-reduction: # Original mixed operator replaced with "broken" # arguments arg_map = {test: TestFunction(V_d), trial: TrialFunction(V_d)} Atilde = Tensor(replace(self.ctx.a, arg_map)) gammar = TestFunction(TraceSpace) n = ufl.FacetNormal(mesh) sigma = TrialFunctions(V_d)[self.vidx] if mesh.cell_set._extruded: Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_h + gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_v) else: Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS) # Here we deal with boundaries. If there are Neumann # conditions (which should be enforced strongly for # H(div)xL^2) then we need to add jump terms on the exterior # facets. If there are Dirichlet conditions (which should be # enforced weakly) then we need to zero out the trace # variables there as they are not active (otherwise the hybrid # problem is not well-posed). # If boundary conditions are contained in the ImplicitMatrixContext: if self.ctx.row_bcs: # Find all the subdomains with neumann BCS # These are Dirichlet BCs on the vidx space neumann_subdomains = set() for bc in self.ctx.row_bcs: if bc.function_space().index == self.pidx: raise NotImplementedError("Dirichlet conditions for scalar variable not supported. Use a weak bc") if bc.function_space().index != self.vidx: raise NotImplementedError("Dirichlet bc set on unsupported space.") # append the set of sub domains subdom = bc.sub_domain if isinstance(subdom, str): neumann_subdomains |= set([subdom]) else: neumann_subdomains |= set(as_tuple(subdom, int)) # separate out the top and bottom bcs extruded_neumann_subdomains = neumann_subdomains & {"top", "bottom"} neumann_subdomains = neumann_subdomains - extruded_neumann_subdomains integrand = gammar * ufl.dot(sigma, n) measures = [] trace_subdomains = [] if mesh.cell_set._extruded: ds = ufl.ds_v for subdomain in sorted(extruded_neumann_subdomains): measures.append({"top": ufl.ds_t, "bottom": ufl.ds_b}[subdomain]) trace_subdomains.extend(sorted({"top", "bottom"} - extruded_neumann_subdomains)) else: ds = ufl.ds if "on_boundary" in neumann_subdomains: measures.append(ds) else: measures.extend((ds(sd) for sd in sorted(neumann_subdomains))) markers = [int(x) for x in mesh.exterior_facets.unique_markers] dirichlet_subdomains = set(markers) - neumann_subdomains trace_subdomains.extend(sorted(dirichlet_subdomains)) for measure in measures: Kform += integrand*measure trace_bcs = [DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains] else: # No bcs were provided, we assume weak Dirichlet conditions. # We zero out the contribution of the trace variables on # the exterior boundary. Extruded cells will have both # horizontal and vertical facets trace_subdomains = ["on_boundary"] if mesh.cell_set._extruded: trace_subdomains.extend(["bottom", "top"]) trace_bcs = [DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains] # Make a SLATE tensor from Kform K = Tensor(Kform) # Assemble the Schur complement operator and right-hand side self.schur_rhs = Function(TraceSpace) self._assemble_Srhs = create_assembly_callable( K * Atilde.inv * AssembledVector(self.broken_residual), tensor=self.schur_rhs, form_compiler_parameters=self.ctx.fc_params) mat_type = PETSc.Options().getString(prefix + "mat_type", "aij") schur_comp = K * Atilde.inv * K.T self.S = allocate_matrix(schur_comp, bcs=trace_bcs, form_compiler_parameters=self.ctx.fc_params, mat_type=mat_type, options_prefix=prefix) self._assemble_S = create_assembly_callable(schur_comp, tensor=self.S, bcs=trace_bcs, form_compiler_parameters=self.ctx.fc_params, mat_type=mat_type) self._assemble_S() self.S.force_evaluation() Smat = self.S.petscmat nullspace = self.ctx.appctx.get("trace_nullspace", None) if nullspace is not None: nsp = nullspace(TraceSpace) Smat.setNullSpace(nsp.nullspace(comm=pc.comm)) # Set up the KSP for the system of Lagrange multipliers trace_ksp = PETSc.KSP().create(comm=pc.comm) trace_ksp.setOptionsPrefix(prefix) trace_ksp.setOperators(Smat) trace_ksp.setUp() trace_ksp.setFromOptions() self.trace_ksp = trace_ksp split_mixed_op = dict(split_form(Atilde.form)) split_trace_op = dict(split_form(K.form)) # Generate reconstruction calls self._reconstruction_calls(split_mixed_op, split_trace_op)