def get_function_spaces_1(): return ( lambda mesh: FunctionSpace(mesh, "Lagrange", 1), pytest.mark.slow(lambda mesh: FunctionSpace(mesh, "Lagrange", 2)), lambda mesh: VectorFunctionSpace(mesh, "Lagrange", 1), pytest.mark.slow(lambda mesh: VectorFunctionSpace(mesh, "Lagrange", 2)), pytest.mark.slow(lambda mesh: TensorFunctionSpace(mesh, "Lagrange", 1)), pytest.mark.slow(lambda mesh: TensorFunctionSpace(mesh, "Lagrange", 2)), lambda mesh: StokesFunctionSpace(mesh, "Lagrange", 1), pytest.mark.slow(lambda mesh: StokesFunctionSpace(mesh, "Lagrange", 2)), lambda mesh: FunctionSpace(mesh, "Real", 0), pytest.mark.slow(lambda mesh: VectorFunctionSpace(mesh, "Real", 0)), pytest.mark.slow(lambda mesh: FunctionAndRealSpace(mesh, "Lagrange", 1)), pytest.mark.slow(lambda mesh: FunctionAndRealSpace(mesh, "Lagrange", 2)) )
def test_save_1d_tensor(tempfile, file_options): mesh = UnitIntervalMesh(MPI.comm_world, 32) u = Function(TensorFunctionSpace(mesh, ("Lagrange", 2))) u.vector()[:] = 1.0 VTKFile(tempfile + "u.pvd", "ascii").write(u) for file_option in file_options: VTKFile(tempfile + "u.pvd", file_option).write(u)
def test_save_3d_tensor(tempdir, encoding): filename = os.path.join(tempdir, "u3t.xdmf") mesh = UnitCubeMesh(MPI.comm_world, 4, 4, 4) u = Function(TensorFunctionSpace(mesh, ("Lagrange", 2))) u.vector.set(1.0 + (1j if has_petsc_complex else 0)) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(u)
def perm_update_rutqvist_newton(mesh, p, p0, phi0, phi, coeff): DG0 = TensorFunctionSpace(mesh, 'DG', 0) mult_min = 1e-10 expr = exp(coeff * (phi / phi0 - 1.0)) mult = conditional(ge(expr, 0.0), expr, mult_min) p = p0 * mult return project(p, DG0)
def get_custom_space(self, family, degree, shape, boundary=False): """Get a custom function space. """ if boundary: mesh = self.BoundaryMesh key = (family, degree, shape, boundary) else: mesh = self.mesh key = (family, degree, shape) space = self._spaces.get(key) if space is None: rank = len(shape) if rank == 0: space = FunctionSpace(mesh, family, degree) elif rank == 1: space = VectorFunctionSpace(mesh, family, degree, shape[0]) else: space = TensorFunctionSpace(mesh, family, degree, shape, symmetry={}) self._spaces[key] = space return space
def get_spaces(mesh): """ Return an object of dolfin FunctionSpace, to be used in the optimization pipeline :param mesh: The mesh :type mesh: :py:class:`dolfin.Mesh` :returns: An object of functionspaces :rtype: object """ from dolfin import FunctionSpace, VectorFunctionSpace, TensorFunctionSpace # Make a dummy object spaces = Object() # A real space with scalars used for dolfin adjoint spaces.r_space = FunctionSpace(mesh, "R", 0) # A space for the strain fields spaces.strainfieldspace = VectorFunctionSpace(mesh, "CG", 1, dim=3) # A space used for scalar strains spaces.strainspace = VectorFunctionSpace(mesh, "R", 0, dim=3) # Spaces for the strain weights spaces.strain_weight_space = TensorFunctionSpace(mesh, "R", 0) return spaces
def _generate_space(expression: Operator): # Extract mesh from expression (from dolfin/fem/projection.py, _extract_function_space function) meshes = set( [ufl_domain.ufl_cargo() for ufl_domain in extract_domains(expression)]) for t in traverse_unique_terminals( expression): # from ufl/domain.py, extract_domains if hasattr(t, "_mesh"): meshes.add(t._mesh) assert len(meshes) == 1 mesh = meshes.pop() # The EIM algorithm will evaluate the expression at vertices. However, since the Operator expression may # contain e.g. a gradient of a solution defined in a C^0 space, we resort to DG1 spaces. shape = expression.ufl_shape assert len(shape) in (0, 1, 2) if len(shape) == 0: space = FunctionSpace(mesh, "Discontinuous Lagrange", 1) elif len(shape) == 1: space = VectorFunctionSpace(mesh, "Discontinuous Lagrange", 1, dim=shape[0]) elif len(shape) == 2: space = TensorFunctionSpace(mesh, "Discontinuous Lagrange", 1, shape=shape) else: raise ValueError( "Invalid expression in ParametrizedExpressionFactory.__init__().") return space
def permeability_tensor(self, K): FS = self.geometry.f0.function_space() TS = TensorFunctionSpace(self.geometry.mesh, 'P', 1) d = self.geometry.dim() fibers = Function(FS) fibers.vector()[:] = self.geometry.f0.vector().get_local() fibers.vector()[:] /= df.norm(self.geometry.f0) if self.geometry.s0 is not None: # normalize vectors sheet = Function(FS) sheet.vector()[:] = self.geometry.s0.vector().get_local() sheet.vector()[:] /= df.norm(self.geometry.s0) if d == 3: csheet = Function(FS) csheet.vector()[:] = self.geometry.n0.vector().get_local() csheet.vector()[:] /= df.norm(self.geometry.n0) else: return Constant(1) from ufl import diag factor = 10 if d == 3: ftensor = df.as_matrix(((fibers[0], sheet[0], csheet[0]), (fibers[1], sheet[1], csheet[1]), (fibers[2], sheet[2], csheet[2]))) ktensor = diag(df.as_vector([K, K / factor, K / factor])) else: ftensor = df.as_matrix( ((fibers[0], sheet[0]), (fibers[1], sheet[1]))) ktensor = diag(df.as_vector([K, K / factor])) permeability = df.project( df.dot(df.dot(ftensor, ktensor), df.inv(ftensor)), TS) return permeability
def gauss_divergence(u, mesh=None): ''' This function uses Gauss divergence theorem to compute divergence of u inside the cell by integrating normal fluxes across the cell boundary. If u is a vector or tensor field the result of computation is diverence of u in the cell center = DG0 scalar/vector function. For scalar fields, the result is grad(u) = DG0 vector function. The fluxes are computed by midpoint rule and as such the computed divergence is exact for linear fields. ''' # Require u to be GenericFunction assert isinstance(u, GenericFunction) # Require u to be scalar/vector/rank 2 tensor rank = u.value_rank() assert rank in [0, 1, 2] # For now, there is no support for manifolds if mesh is None: _mesh = u.function_space().mesh() else: _mesh = mesh tdim = _mesh.topology().dim() gdim = _mesh.geometry().dim() assert tdim == gdim for i in range(rank): assert u.value_dimension(i) == gdim # Based on rank choose the type of CR1 space where u should be interpolated # to to get the midpoint values + choose the type of DG0 space for # divergence if rank == 1: DG = FunctionSpace(_mesh, 'DG', 0) CR = VectorFunctionSpace(_mesh, 'CR', 1) else: DG = VectorFunctionSpace(_mesh, 'DG', 0) if rank == 0: CR = FunctionSpace(_mesh, 'CR', 1) else: CR = TensorFunctionSpace(_mesh, 'CR', 1) divu = Function(DG) _u = interpolate(u, CR) # Use Gauss theorem cell by cell to get the divergence. The implementation # is based on divergence(vector) = scalar and so the spaces for these # two need to be provided if rank == 1: pass # CR, DG are correct already else: DG = FunctionSpace(_mesh, 'DG', 0) CR = VectorFunctionSpace(_mesh, 'CR', 1) compiled_cr_module.cr_divergence(divu, _u, DG, CR) return divu
def test_save_3d_tensor(tempfile, file_options): mesh = UnitCubeMesh(MPI.comm_world, 8, 8, 8) u = Function(TensorFunctionSpace(mesh, ("Lagrange", 2))) u.vector()[:] = 1.0 VTKFile(tempfile + "u.pvd", "ascii").write(u) f = VTKFile(tempfile + "u.pvd", "ascii") f.write(u, 0.) f.write(u, 1.) for file_option in file_options: VTKFile(tempfile + "u.pvd", file_option).write(u)
def test_save_3d_tensor(tempdir, encoding): if invalid_config(encoding): pytest.skip("XDMF unsupported in current configuration") filename = os.path.join(tempdir, "u3t.xdmf") mesh = UnitCubeMesh(MPI.comm_world, 4, 4, 4) u = Function(TensorFunctionSpace(mesh, "Lagrange", 2)) u.vector()[:] = 1.0 with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(u)
def test_save_2d_tensor(tempdir, encoding): if invalid_config(encoding): pytest.skip("XDMF unsupported in current configuration") filename = os.path.join(tempdir, "tensor.xdmf") mesh = UnitSquareMesh(MPI.comm_world, 16, 16) u = Function(TensorFunctionSpace(mesh, "Lagrange", 2)) u.vector()[:] = 1.0 + (1j if has_petsc_complex() else 0) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(u)
def initialize(self, V, Q, PS, D): """ :param V: velocity space :param Q: pressure space :param PS: scalar space of same order as V, used for analytic solution generation :param D: divergence of velocity space """ self.vSpace = V self.divSpace = D self.pSpace = Q self.solutionSpace = V self.vFunction = Function(V) self.divFunction = Function(D) self.pFunction = Function(Q) # self.pgSpace = VectorFunctionSpace(mesh, "DG", 0) # used to save pressure gradient as vectors # self.pgFunction = Function(self.pgSpace) self.initialize_xdmf_files() self.stepsInCycle = self.cycle_length / self.metadata['dt'] info('stepsInCycle = %f' % self.stepsInCycle) if self.args.wss != 'none': self.tc.start('WSSinit') if self.args.wss_method == 'expression': self.T = TensorFunctionSpace(self.mesh, 'Lagrange', 1) info('Generating boundary mesh') self.wall_mesh = BoundaryMesh(self.mesh, 'exterior') self.wall_mesh_oriented = BoundaryMesh(self.mesh, 'exterior', order=False) info(' Boundary mesh geometric dim: %d' % self.wall_mesh.geometry().dim()) info(' Boundary mesh topologic dim: %d' % self.wall_mesh.topology().dim()) self.Tb = TensorFunctionSpace(self.wall_mesh, 'Lagrange', 1) self.Vb = VectorFunctionSpace(self.wall_mesh, 'Lagrange', 1) info('Generating normal to boundary') normal_expr = self.NormalExpression(self.wall_mesh_oriented) Vn = VectorFunctionSpace(self.wall_mesh, 'DG', 0) self.nb = project(normal_expr, Vn) self.Sb = FunctionSpace(self.wall_mesh, 'DG', 0) if self.args.wss_method == 'integral': self.SDG = FunctionSpace(self.mesh, 'DG', 0) self.tc.end('WSSinit')
def __init__(self, mesh0=UnitCubeMesh(8, 8, 8), params={}): parameters['form_compiler']['representation'] = 'uflacs' parameters['form_compiler']['optimize'] = True parameters['form_compiler']['quadrature_degree'] = 4 self.mesh0 = Mesh(mesh0) self.mesh = Mesh(mesh0) if not 'C1' in params: params['C1'] = 100 self.params = params self.b = Constant((0.0, 0.0, 0.0)) self.h = Constant((0.0, 0.0, 0.0)) self.C0 = FunctionSpace(self.mesh0, "Lagrange", 2) self.V0 = VectorFunctionSpace(self.mesh0, "Lagrange", 1) self.W0 = TensorFunctionSpace(self.mesh0, "Lagrange", 1) self.C = FunctionSpace(self.mesh, "Lagrange", 2) self.V = VectorFunctionSpace(self.mesh, "Lagrange", 1) self.W = TensorFunctionSpace(self.mesh, "Lagrange", 1) self.G = project(Identity(3), self.W0) self.ut = Function(self.V0) self.du = TestFunction(self.V0) self.w = TrialFunction(self.V0) self.n0 = FacetNormal(self.mesh0) self.v = Function(self.V) self.t = 0.0
def get_P1_space(V): '''Get the Lagrange CG1 space corresponding to V''' # This is how in essence FEniCS 2017.2.0 dumps data, i.e. there is # no support for higher order spaces assert V.ufl_element().family() != 'Discontinuous Lagrange' # Cell data needed mesh = V.mesh() elm = V.ufl_element() if elm.value_shape() == (): return FunctionSpace(mesh, 'CG', 1) if len(elm.value_shape()) == 1: return VectorFunctionSpace(mesh, 'CG', 1) return TensorFunctionSpace(mesh, 'CG', 1)
def compute(self, get): u = get(self.valuename) if u is None: return None if not isinstance(u, Function): cbc_warning("Do not understand how to handle datatype %s" % str(type(u))) return None #if not hasattr(self, "restriction_map"): if not hasattr(self, "keys"): V = u.function_space() element = V.ufl_element() family = element.family() degree = element.degree() if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"): rank = len(u.ufl_shape) else: rank = u.rank() if rank == 0: FS = FunctionSpace(self.submesh, family, degree) elif rank == 1: FS = VectorFunctionSpace(self.submesh, family, degree) elif rank == 2: FS = TensorFunctionSpace(self.submesh, family, degree, symmetry={}) self.u = Function(FS) #self.restriction_map = restriction_map(V, FS) rmap = restriction_map(V, FS) self.keys = np.array(rmap.keys(), dtype=np.intc) self.values = np.array(rmap.values(), dtype=np.intc) self.temp_array = np.zeros(len(self.keys), dtype=np.float_) # The simple __getitem__, __setitem__ has been removed in dolfin 1.5.0. # The new cbcpost-method get_set_vector should be compatible with 1.4.0 and 1.5.0. #self.u.vector()[self.keys] = u.vector()[self.values] get_set_vector(self.u.vector(), self.keys, u.vector(), self.values, self.temp_array) return self.u
def _generate_space(expression: BaseExpression): # Extract mesh from expression assert hasattr(expression, "_mesh") mesh = expression._mesh # The EIM algorithm will evaluate the expression at vertices. It is thus enough # to use a CG1 space. shape = expression.ufl_shape assert len(shape) in (0, 1, 2) if len(shape) == 0: space = FunctionSpace(mesh, "Lagrange", 1) elif len(shape) == 1: space = VectorFunctionSpace(mesh, "Lagrange", 1, dim=shape[0]) elif len(shape) == 2: space = TensorFunctionSpace(mesh, "Lagrange", 1, shape=shape) else: raise ValueError("Invalid expression in ParametrizedExpressionFactory.__init__().") return space
def write_vtk_f(fname, mesh=None, nodefunctions=None,cellfunctions=None): """ Write a whole bunch of FEniCS functions to the same vtk file. """ if mesh==None: if nodefunctions != None: mesh = nodefunctions.itervalues().next().function_space().mesh() else: mesh = cellfunctions.itervalues().next().function_space().mesh() C = { 0:FunctionSpace(mesh,"DG",0), 1:VectorFunctionSpace(mesh,"DG",0), 2:TensorFunctionSpace(mesh,"DG",0) } nodefields = [(k,f.compute_vertex_values().reshape(-1,mesh.num_vertices()).T) for k,f in iteritems(nodefunctions)] if nodefunctions else None edgefields=[(k,project(f,C[f.value_rank()]).vector().get_local().reshape(mesh.num_cells(),-1) ) for k,f in iteritems(cellfunctions) ] if cellfunctions else None write_vtk(fname, mesh.cells(), mesh.coordinates(), nodefields,edgefields )
def Q(mesh): return TensorFunctionSpace(mesh, ('CG', 1))
if edgefields: fh.write("CELL_DATA {0}\n".format(elems.shape[0])) for n,f in edgefields: PUTFIELD(n,f) fh.close() if __name__=="__main__": from dolfin import UnitSquareMesh, Function, FunctionSpace, VectorFunctionSpace, TensorFunctionSpace, Expression mesh = UnitSquareMesh(10,10) S=FunctionSpace(mesh,"DG",0) V=VectorFunctionSpace(mesh,"DG",0) T=TensorFunctionSpace(mesh,"DG",0) Tsym = TensorFunctionSpace(mesh,"DG",0,symmetry=True) s = Function(S) s.interpolate(Expression('x[0]',element=S.ufl_element())) v = Function(V) v.interpolate(Expression(('x[0]','x[1]'),element=V.ufl_element())) t = Function(T) t.interpolate(Expression(( ('x[0]','1.0'),('2.0','x[1]')),element=T.ufl_element())) ts = Function(Tsym) ts.interpolate(Expression(( ('x[0]','1.0'),('x[1]',)),element=Tsym.ufl_element())) write_vtk_f("test.vtk",cellfunctions={'s':s,'v':v,'t':t,'tsym':ts})
def initialization(mesh, subdomains, boundaries): TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) UCG = VectorElement("CG", mesh.ufl_cell(), 2) BDM = FiniteElement("BDM", mesh.ufl_cell(), 1) PDG = FiniteElement("DG", mesh.ufl_cell(), 0) UCG_F = FunctionSpace(mesh, UCG) BDM_F = FunctionSpace(mesh, BDM) PDG_F = FunctionSpace(mesh, PDG) W = BlockFunctionSpace([BDM_F, PDG_F], restrict=[None, None]) U = BlockFunctionSpace([UCG_F]) I = Identity(mesh.topology().dim()) C_cg = FiniteElement("CG", mesh.ufl_cell(), 1) C_dg = FiniteElement("DG", mesh.ufl_cell(), 0) mini = C_cg + C_dg C = FunctionSpace(mesh, mini) C = BlockFunctionSpace([C]) #TODO solution0_h = BlockFunction(W) solution0_m = BlockFunction(U) solution0_c = BlockFunction(C) solution1_h = BlockFunction(W) solution1_m = BlockFunction(U) solution1_c = BlockFunction(C) solution2_h = BlockFunction(W) solution2_m = BlockFunction(U) solution2_c = BlockFunction(C) solution_h = BlockFunction(W) solution_m = BlockFunction(U) solution_c = BlockFunction(C) ## mechanics # 0 properties alpha1 = 0.74 K1 = 8.4 * 1000.e6 nu1 = 0.18 alpha2 = 0.74 K2 = 8.4 * 1000.e6 nu2 = 0.18 alpha_values = [alpha1, alpha2] K_values = [K1, K2] nu_values = [nu1, nu2] alpha_0 = Function(PM) K_0 = Function(PM) nu_0 = Function(PM) alpha_0 = init_scalar_parameter(alpha_0, alpha_values[0], 500, subdomains) K_0 = init_scalar_parameter(K_0, K_values[0], 500, subdomains) nu_0 = init_scalar_parameter(nu_0, nu_values[0], 500, subdomains) alpha_0 = init_scalar_parameter(alpha_0, alpha_values[1], 501, subdomains) K_0 = init_scalar_parameter(K_0, K_values[1], 501, subdomains) nu_0 = init_scalar_parameter(nu_0, nu_values[1], 501, subdomains) K_mult_min = 1.0 K_mult_max = 1.0 mu_l_0, lmbda_l_0, Ks_0, K_0 = \ bulk_modulus_update(mesh,solution0_c[0],K_mult_min,K_mult_max,K_0,nu_0,alpha_0,K_0) # n-1 properties alpha1 = 0.74 K1 = 8.4 * 1000.e6 nu1 = 0.18 alpha2 = 0.74 K2 = 8.4 * 1000.e6 nu2 = 0.18 alpha_values = [alpha1, alpha2] K_values = [K1, K2] nu_values = [nu1, nu2] alpha_1 = Function(PM) K_1 = Function(PM) nu_1 = Function(PM) alpha_1 = init_scalar_parameter(alpha_1, alpha_values[0], 500, subdomains) K_1 = init_scalar_parameter(K_1, K_values[0], 500, subdomains) nu_1 = init_scalar_parameter(nu_1, nu_values[0], 500, subdomains) alpha_1 = init_scalar_parameter(alpha_1, alpha_values[1], 501, subdomains) K_1 = init_scalar_parameter(K_1, K_values[1], 501, subdomains) nu_1 = init_scalar_parameter(nu_1, nu_values[1], 501, subdomains) K_mult_min = 1.0 K_mult_max = 1.0 mu_l_1, lmbda_l_1, Ks_1, K_1 = \ bulk_modulus_update(mesh,solution0_c[0],K_mult_min,K_mult_max,K_1,nu_1,alpha_1,K_0) # n properties alpha1 = 0.74 K2 = 8.4 * 1000.e6 nu1 = 0.18 alpha2 = 0.74 K2 = 8.4 * 1000.e6 nu2 = 0.18 alpha_values = [alpha1, alpha2] K_values = [K1, K2] nu_values = [nu1, nu2] alpha = Function(PM) K = Function(PM) nu = Function(PM) alpha = init_scalar_parameter(alpha, alpha_values[0], 500, subdomains) K = init_scalar_parameter(K, K_values[0], 500, subdomains) nu = init_scalar_parameter(nu, nu_values[0], 500, subdomains) alpha = init_scalar_parameter(alpha, alpha_values[1], 501, subdomains) K = init_scalar_parameter(K, K_values[1], 501, subdomains) nu = init_scalar_parameter(nu, nu_values[1], 501, subdomains) K_mult_min = 1.0 K_mult_max = 1.0 mu_l, lmbda_l, Ks, K = \ bulk_modulus_update(mesh,solution0_c[0],K_mult_min,K_mult_max,K,nu,alpha,K_0) ## flow # 0 properties cf1 = 1e-10 phi1 = 0.2 rho1 = 1000.0 mu1 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k1 = np.array([kx, 0., 0., ky]) cf2 = 1e-10 phi2 = 0.2 rho2 = 1000.0 mu2 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k2 = np.array([kx, 0., 0., ky]) cf_values = [cf1, cf2] phi_values = [phi1, phi2] rho_values = [rho1, rho2] mu_values = [mu1, mu2] k_values = [k1, k2] cf_0 = Function(PM) phi_0 = Function(PM) rho_0 = Function(PM) mu_0 = Function(PM) k_0 = Function(TM) cf_0 = init_scalar_parameter(cf_0, cf_values[0], 500, subdomains) phi_0 = init_scalar_parameter(phi_0, phi_values[0], 500, subdomains) rho_0 = init_scalar_parameter(rho_0, rho_values[0], 500, subdomains) mu_0 = init_scalar_parameter(mu_0, mu_values[0], 500, subdomains) k_0 = init_tensor_parameter(k_0, k_values[0], 500, subdomains, mesh.topology().dim()) cf_0 = init_scalar_parameter(cf_0, cf_values[1], 501, subdomains) phi_0 = init_scalar_parameter(phi_0, phi_values[1], 501, subdomains) rho_0 = init_scalar_parameter(rho_0, rho_values[1], 501, subdomains) mu_0 = init_scalar_parameter(mu_0, mu_values[1], 501, subdomains) k_0 = init_tensor_parameter(k_0, k_values[1], 501, subdomains, mesh.topology().dim()) #filename = "perm4.csv" #k_0 = init_from_file_parameter(k_0,0.,0.,filename) # n-1 properties cf1 = 1e-10 phi1 = 0.2 rho1 = 1000.0 mu1 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k1 = np.array([kx, 0., 0., ky]) cf2 = 1e-10 phi2 = 0.2 rho2 = 1000.0 mu2 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k2 = np.array([kx, 0., 0., ky]) cf_values = [cf1, cf2] phi_values = [phi1, phi2] rho_values = [rho1, rho2] mu_values = [mu1, mu2] k_values = [k1, k2] cf_1 = Function(PM) phi_1 = Function(PM) rho_1 = Function(PM) mu_1 = Function(PM) k_1 = Function(TM) cf_1 = init_scalar_parameter(cf_1, cf_values[0], 500, subdomains) phi_1 = init_scalar_parameter(phi_1, phi_values[0], 500, subdomains) rho_1 = init_scalar_parameter(rho_1, rho_values[0], 500, subdomains) mu_1 = init_scalar_parameter(mu_1, mu_values[0], 500, subdomains) k_1 = init_tensor_parameter(k_1, k_values[0], 500, subdomains, mesh.topology().dim()) cf_1 = init_scalar_parameter(cf_1, cf_values[1], 501, subdomains) phi_1 = init_scalar_parameter(phi_1, phi_values[1], 501, subdomains) rho_1 = init_scalar_parameter(rho_1, rho_values[1], 501, subdomains) mu_1 = init_scalar_parameter(mu_1, mu_values[1], 501, subdomains) k_1 = init_tensor_parameter(k_1, k_values[1], 501, subdomains, mesh.topology().dim()) #filename = "perm4.csv" #k_1 = init_from_file_parameter(k_1,0.,0.,filename) # n properties cf1 = 1e-10 phi1 = 0.2 rho1 = 1000.0 mu1 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k1 = np.array([kx, 0., 0., ky]) cf2 = 1e-10 phi2 = 0.2 rho2 = 1000.0 mu2 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k2 = np.array([kx, 0., 0., ky]) cf_values = [cf1, cf2] phi_values = [phi1, phi2] rho_values = [rho1, rho2] mu_values = [mu1, mu2] k_values = [k1, k2] cf = Function(PM) phi = Function(PM) rho = Function(PM) mu = Function(PM) k = Function(TM) cf = init_scalar_parameter(cf, cf_values[0], 500, subdomains) phi = init_scalar_parameter(phi, phi_values[0], 500, subdomains) rho = init_scalar_parameter(rho, rho_values[0], 500, subdomains) mu = init_scalar_parameter(mu, mu_values[0], 500, subdomains) k = init_tensor_parameter(k, k_values[0], 500, subdomains, mesh.topology().dim()) cf = init_scalar_parameter(cf, cf_values[1], 501, subdomains) phi = init_scalar_parameter(phi, phi_values[1], 501, subdomains) rho = init_scalar_parameter(rho, rho_values[1], 501, subdomains) mu = init_scalar_parameter(mu, mu_values[1], 501, subdomains) k = init_tensor_parameter(k, k_values[1], 501, subdomains, mesh.topology().dim()) #filename = "perm4.csv" #k = init_from_file_parameter(k,0.,0.,filename) ### transport # 0 dx1 = 1e-12 dy1 = 1e-12 d1 = np.array([dx1, 0., 0., dy1]) dx2 = 1e-12 dy2 = 1e-12 d2 = np.array([dx2, 0., 0., dy2]) d_values = [d1, d2] d_0 = Function(TM) d_0 = init_tensor_parameter(d_0, d_values[0], 500, subdomains, mesh.topology().dim()) d_0 = init_tensor_parameter(d_0, d_values[1], 501, subdomains, mesh.topology().dim()) # n-1 dx1 = 1e-12 dy1 = 1e-12 d1 = np.array([dx1, 0., 0., dy1]) dx2 = 1e-12 dy2 = 1e-12 d2 = np.array([dx2, 0., 0., dy2]) d_values = [d1, d2] d_1 = Function(TM) d_1 = init_tensor_parameter(d_1, d_values[0], 500, subdomains, mesh.topology().dim()) d_1 = init_tensor_parameter(d_1, d_values[1], 501, subdomains, mesh.topology().dim()) # n dx1 = 1e-12 dy1 = 1e-12 d1 = np.array([dx1, 0., 0., dy1]) dx2 = 1e-12 dy2 = 1e-12 d2 = np.array([dx2, 0., 0., dy2]) d_values = [d1, d2] d = Function(TM) d = init_tensor_parameter(d, d_values[0], 500, subdomains, mesh.topology().dim()) d = init_tensor_parameter(d, d_values[1], 501, subdomains, mesh.topology().dim()) ####initialization # initial u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution0_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution0_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution0_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution0_c.sub(0), c0_project) # n - 1 u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution1_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution1_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution1_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution1_c.sub(0), c0_project) # n - 2 u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution2_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution2_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution2_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution2_c.sub(0), c0_project) # n u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution_c.sub(0), c0_project) ###iterative parameters phi_it = Function(PM) assign(phi_it, phi_0) print('c_sat', c_sat_cal(1.0e8, 20.)) c_sat = c_sat_cal(1.0e8, 20.) c_sat = project(c_sat, PM) c_inject = Constant(0.0) c_inject = project(c_inject, PM) mu_c1_1 = 1.e-4 mu_c2_1 = 5.e-0 mu_c1_2 = 1.e-4 mu_c2_2 = 5.e-0 mu_c1_values = [mu_c1_1, mu_c1_2] mu_c2_values = [mu_c2_1, mu_c2_2] mu_c1 = Function(PM) mu_c2 = Function(PM) mu_c1 = init_scalar_parameter(mu_c1, mu_c1_values[0], 500, subdomains) mu_c2 = init_scalar_parameter(mu_c2, mu_c2_values[0], 500, subdomains) mu_c1 = init_scalar_parameter(mu_c1, mu_c1_values[1], 501, subdomains) mu_c2 = init_scalar_parameter(mu_c2, mu_c2_values[1], 501, subdomains) coeff_for_perm_1 = 22.2 coeff_for_perm_2 = 22.2 coeff_for_perm_values = [coeff_for_perm_1, coeff_for_perm_2] coeff_for_perm = Function(PM) coeff_for_perm = init_scalar_parameter(coeff_for_perm, coeff_for_perm_values[0], 500, subdomains) coeff_for_perm = init_scalar_parameter(coeff_for_perm, coeff_for_perm_values[1], 501, subdomains) solutionIt_h = BlockFunction(W) return solution0_m, solution0_h, solution0_c \ ,solution1_m, solution1_h, solution1_c \ ,solution2_m, solution2_h, solution2_c \ ,solution_m, solution_h, solution_c \ ,alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0 \ ,alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1 \ ,alpha, K, mu_l, lmbda_l, Ks \ ,cf_0, phi_0, rho_0, mu_0, k_0 \ ,cf_1, phi_1, rho_1, mu_1, k_1 \ ,cf, phi, rho, mu, k \ ,d_0, d_1, d, I \ ,phi_it, solutionIt_h, mu_c1, mu_c2 \ ,nu_0, nu_1, nu, coeff_for_perm \ ,c_sat, c_inject
mesh = UnitSquareMesh(10, 10) V = VectorFunctionSpace(mesh, "Lagrange", 2) expr1 = Expression("x[0]", mu_0=0., degree=1, cell=mesh.ufl_cell()) # f_5 expr2 = Expression(("x[0]", "x[1]"), mu_0=0., degree=1, cell=mesh.ufl_cell()) # f_6 expr3 = Expression((("1*x[0]", "2*x[1]"), ("3*x[0]", "4*x[1]")), mu_0=0., degree=1, cell=mesh.ufl_cell()) # f_7 expr4 = Expression((("4*x[0]", "3*x[1]"), ("2*x[0]", "1*x[1]")), mu_0=0., degree=1, cell=mesh.ufl_cell()) # f_8 expr5 = Expression("x[0]", degree=1, cell=mesh.ufl_cell()) # f_9 expr6 = Expression(("x[0]", "x[1]"), degree=1, cell=mesh.ufl_cell()) # f_10 expr7 = Expression((("1*x[0]", "2*x[1]"), ("3*x[0]", "4*x[1]")), degree=1, cell=mesh.ufl_cell()) # f_11 expr8 = Expression((("4*x[0]", "3*x[1]"), ("2*x[0]", "1*x[1]")), degree=1, cell=mesh.ufl_cell()) # f_12 expr9 = Constant(((1, 2), (3, 4))) # f_13 scalar_V = FunctionSpace(mesh, "Lagrange", 3) tensor_V = TensorFunctionSpace(mesh, "Lagrange", 1) expr10 = Function(scalar_V) # f_18 expr11 = Function(V) # f_21 expr12 = Function(tensor_V) # f_24 expr13 = Function(scalar_V) # f_27 expr14 = Function(V) # f_30 expr15 = Function(tensor_V) # f_33 class Problem(object): def __init__(self, name): self._name = name def name(self): return self._name _solution_to_problem_map[expr10] = Problem("problem10")
def transport_linear(integrator_type, mesh, subdomains, boundaries, t_start, dt, T, solution0, \ alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0, \ alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1, \ alpha, K, mu_l, lmbda_l, Ks, \ cf_0, phi_0, rho_0, mu_0, k_0,\ cf_1, phi_1, rho_1, mu_1, k_1,\ cf, phi, rho, mu, k, \ d_0, d_1, d_t, vel_c, p_con, A_0, Temp, c_extrapolate): # Create mesh and define function space parameters["ghost_mode"] = "shared_facet" # required by dS dx = Measure('dx', domain=mesh, subdomain_data=subdomains) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) dS = Measure('dS', domain=mesh, subdomain_data=boundaries) C_cg = FiniteElement("CG", mesh.ufl_cell(), 1) C_dg = FiniteElement("DG", mesh.ufl_cell(), 0) mini = C_cg + C_dg C = FunctionSpace(mesh, mini) C = BlockFunctionSpace([C]) TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc / fc h_avg = (vc('+') + vc('-')) / (2 * avg(fc)) penalty1 = Constant(1.0) tau = Function(PM) tau = tau_cal(tau, phi, -0.5) tuning_para = 0.25 vel_norm = (dot(vel_c, n) + abs(dot(vel_c, n))) / 2.0 cell_size = CellDiameter(mesh) vnorm = sqrt(dot(vel_c, vel_c)) I = Identity(mesh.topology().dim()) d_eff = Function(TM) d_eff = diff_coeff_cal_rev(d_eff, d_0, tau, phi) + tuning_para * cell_size * vnorm * I monitor_dt = dt # Define variational problem dc, = BlockTrialFunction(C) dc_dot, = BlockTrialFunction(C) psic, = BlockTestFunction(C) block_c = BlockFunction(C) c, = block_split(block_c) block_c_dot = BlockFunction(C) c_dot, = block_split(block_c_dot) theta = -1.0 a_time = phi * rho * inner(c_dot, psic) * dx a_dif = dot(rho*d_eff*grad(c),grad(psic))*dx \ - dot(avg_w(rho*d_eff*grad(c),weight_e(rho*d_eff,n)), jump(psic, n))*dS \ + theta*dot(avg_w(rho*d_eff*grad(psic),weight_e(rho*d_eff,n)), jump(c, n))*dS \ + penalty1/h_avg*k_e(rho*d_eff,n)*dot(jump(c, n), jump(psic, n))*dS a_adv = -dot(rho*vel_c*c,grad(psic))*dx \ + dot(jump(psic), rho('+')*vel_norm('+')*c('+') - rho('-')*vel_norm('-')*c('-') )*dS \ + dot(psic, rho*vel_norm*c)*ds(3) R_c = R_c_cal(c_extrapolate, p_con, Temp) c_D1 = Constant(0.5) rhs_c = R_c * A_s_cal(phi, phi_0, A_0) * psic * dx - dot( rho * phi * vel_c, n) * c_D1 * psic * ds(1) r_u = [a_dif + a_adv] j_u = block_derivative(r_u, [c], [dc]) r_u_dot = [a_time] j_u_dot = block_derivative(r_u_dot, [c_dot], [dc_dot]) r = [r_u_dot[0] + r_u[0] - rhs_c] # this part is not applied. exact_solution_expression1 = Expression("1.0", t=0, element=C[0].ufl_element()) def bc(t): p5 = DirichletBC(C.sub(0), exact_solution_expression1, boundaries, 1, method="geometric") return BlockDirichletBC([p5]) # Define problem wrapper class ProblemWrapper(object): def set_time(self, t): pass # Residual and jacobian functions def residual_eval(self, t, solution, solution_dot): return r def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient): return [[ Constant(solution_dot_coefficient) * j_u_dot[0, 0] + j_u[0, 0] ]] # Define boundary condition def bc_eval(self, t): pass # Define initial condition def ic_eval(self): return solution0 # Define custom monitor to plot the solution def monitor(self, t, solution, solution_dot): pass problem_wrapper = ProblemWrapper() (solution, solution_dot) = (block_c, block_c_dot) solver = TimeStepping(problem_wrapper, solution, solution_dot) solver.set_parameters({ "initial_time": t_start, "time_step_size": dt, "monitor": { "time_step_size": monitor_dt, }, "final_time": T, "exact_final_time": "stepover", "integrator_type": integrator_type, "problem_type": "linear", "linear_solver": "mumps", "report": True }) export_solution = solver.solve() return export_solution, T
def h_linear(integrator_type, mesh, subdomains, boundaries, t_start, dt, T, solution0, \ alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0, \ alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1, \ alpha, K, mu_l, lmbda_l, Ks, \ cf_0, phi_0, rho_0, mu_0, k_0,\ cf_1, phi_1, rho_1, mu_1, k_1,\ cf, phi, rho, mu, k, \ sigma_v_freeze, dphi_c_dt): # Create mesh and define function space parameters["ghost_mode"] = "shared_facet" # required by dS dx = Measure('dx', domain=mesh, subdomain_data=subdomains) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) dS = Measure('dS', domain=mesh, subdomain_data=boundaries) BDM = FiniteElement("BDM", mesh.ufl_cell(), 1) PDG = FiniteElement("DG", mesh.ufl_cell(), 0) BDM_F = FunctionSpace(mesh, BDM) PDG_F = FunctionSpace(mesh, PDG) W = BlockFunctionSpace([BDM_F, PDG_F], restrict=[None, None]) TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc / fc h_avg = (vc('+') + vc('-')) / (2 * avg(fc)) I = Identity(mesh.topology().dim()) monitor_dt = dt p_outlet = 0.1e6 p_inlet = 1000.0 M_inv = phi_0 * cf + (alpha - phi_0) / Ks # Define variational problem trial = BlockTrialFunction(W) dv, dp = block_split(trial) trial_dot = BlockTrialFunction(W) dv_dot, dp_dot = block_split(trial_dot) test = BlockTestFunction(W) psiv, psip = block_split(test) block_w = BlockFunction(W) v, p = block_split(block_w) block_w_dot = BlockFunction(W) v_dot, p_dot = block_split(block_w_dot) a_time = Constant(0.0) * inner(v_dot, psiv) * dx #quasi static # k is a function of phi #k = perm_update_rutqvist_newton(p,p0,phi0,phi,coeff) lhs_a = inner(dot(v, mu * inv(k)), psiv) * dx - p * div( psiv ) * dx #+ 6.0*inner(psiv,n)*ds(2) # - inner(gravity*(rho-rho0), psiv)*dx b_time = (M_inv + pow(alpha, 2.) / K) * p_dot * psip * dx lhs_b = div(v) * psip * dx #div(rho*v)*psip*dx #TODO rho rhs_v = -p_outlet * inner(psiv, n) * ds(3) rhs_p = -alpha / K * sigma_v_freeze * psip * dx - dphi_c_dt * psip * dx r_u = [lhs_a, lhs_b] j_u = block_derivative(r_u, block_w, trial) r_u_dot = [a_time, b_time] j_u_dot = block_derivative(r_u_dot, block_w_dot, trial_dot) r = [r_u_dot[0] + r_u[0] - rhs_v, \ r_u_dot[1] + r_u[1] - rhs_p] def bc(t): #bc_v = [DirichletBC(W.sub(0), (.0, .0), boundaries, 4)] v1 = DirichletBC(W.sub(0), (1.e-4 * 2.0, 0.0), boundaries, 1) v2 = DirichletBC(W.sub(0), (0.0, 0.0), boundaries, 2) v4 = DirichletBC(W.sub(0), (0.0, 0.0), boundaries, 4) bc_v = [v1, v2, v4] return BlockDirichletBC([bc_v, None]) # Define problem wrapper class ProblemWrapper(object): def set_time(self, t): pass #g.t = t # Residual and jacobian functions def residual_eval(self, t, solution, solution_dot): #print(as_backend_type(assemble(p_time - p_time_error)).vec().norm()) #print("gravity effect", as_backend_type(assemble(inner(gravity*(rho-rho0), psiv)*dx)).vec().norm()) return r def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient): return [[Constant(solution_dot_coefficient)*j_u_dot[0, 0] + j_u[0, 0], \ Constant(solution_dot_coefficient)*j_u_dot[0, 1] + j_u[0, 1]], \ [Constant(solution_dot_coefficient)*j_u_dot[1, 0] + j_u[1, 0], \ Constant(solution_dot_coefficient)*j_u_dot[1, 1] + j_u[1, 1]]] # Define boundary condition def bc_eval(self, t): return bc(t) # Define initial condition def ic_eval(self): return solution0 # Define custom monitor to plot the solution def monitor(self, t, solution, solution_dot): pass # Solve the time dependent problem problem_wrapper = ProblemWrapper() (solution, solution_dot) = (block_w, block_w_dot) solver = TimeStepping(problem_wrapper, solution, solution_dot) solver.set_parameters({ "initial_time": t_start, "time_step_size": dt, "monitor": { "time_step_size": monitor_dt, }, "final_time": T, "exact_final_time": "stepover", "integrator_type": integrator_type, "problem_type": "linear", "linear_solver": "mumps", "report": True }) export_solution = solver.solve() return export_solution, T
def perm_update_kk_newton(mesh, p, p0, phi0, phi, coeff): DG0 = TensorFunctionSpace(mesh, 'DG', 0) mult = pow(phi / phi0, 3.0) * pow(((1.0 - phi0) / (1.0 - phi)), 2.0) expr = p0 * mult return project(expr, DG0)
# Block function space V_element = VectorElement("CG", mesh.ufl_cell(), 2) if discretization == "DG": Q_element = FiniteElement("DG", mesh.ufl_cell(), 1) W_element = BlockElement(V_element, Q_element) elif discretization == "EG": Q_element = FiniteElement("CG", mesh.ufl_cell(), 1) D_element = FiniteElement("DG", mesh.ufl_cell(), 0) EG_element = Q_element + D_element W_element = BlockElement(V_element, EG_element) else: raise RuntimeError("Invalid discretization") W = BlockFunctionSpace(mesh, W_element) PM = FunctionSpace(mesh, "DG", 0) TM = TensorFunctionSpace(mesh, "DG", 0) I = Identity(mesh.topology().dim()) dx = Measure("dx", domain=mesh, subdomain_data=subdomains) ds = Measure("ds", domain=mesh, subdomain_data=boundaries) dS = Measure("dS", domain=mesh, subdomain_data=boundaries) # Test and trial functions vq = BlockTestFunction(W) (v, q) = block_split(vq) up = BlockTrialFunction(W) (u, p) = block_split(up) w = BlockFunction(W) w0 = BlockFunction(W)
def cal_delta_tm(mesh, p, p1): DG0 = TensorFunctionSpace(mesh, "DG", 0) return_p = Function(DG0) return_p.vector()[:] = p.vector()[:] - p1.vector()[:] return return_p
def m_linear(integrator_type, mesh, subdomains, boundaries, t_start, dt, T, solution0, \ alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0, \ alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1, \ alpha, K, mu_l, lmbda_l, Ks, \ cf_0, phi_0, rho_0, mu_0, k_0,\ cf_1, phi_1, rho_1, mu_1, k_1,\ cf, phi, rho, mu, k, \ pressure_freeze): # Create mesh and define function space parameters["ghost_mode"] = "shared_facet" # required by dS dx = Measure('dx', domain=mesh, subdomain_data=subdomains) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) dS = Measure('dS', domain=mesh, subdomain_data=boundaries) C = VectorFunctionSpace(mesh, "CG", 2) C = BlockFunctionSpace([C]) TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc/fc h_avg = (vc('+') + vc('-'))/(2*avg(fc)) monitor_dt = dt f_stress_x = Constant(-1.e3) f_stress_y = Constant(-20.0e6) f = Constant((0.0, 0.0)) #sink/source for displacement I = Identity(mesh.topology().dim()) # Define variational problem psiu, = BlockTestFunction(C) block_u = BlockTrialFunction(C) u, = block_split(block_u) w = BlockFunction(C) theta = -1.0 a_time = inner(-alpha*pressure_freeze*I,sym(grad(psiu)))*dx #quasi static a = inner(2*mu_l*strain(u)+lmbda_l*div(u)*I, sym(grad(psiu)))*dx rhs_a = inner(f,psiu)*dx \ + dot(f_stress_y*n,psiu)*ds(2) r_u = [a] #DirichletBC bcd1 = DirichletBC(C.sub(0).sub(0), 0.0, boundaries, 1) # No normal displacement for solid on left side bcd3 = DirichletBC(C.sub(0).sub(0), 0.0, boundaries, 3) # No normal displacement for solid on right side bcd4 = DirichletBC(C.sub(0).sub(1), 0.0, boundaries, 4) # No normal displacement for solid on bottom side bcs = BlockDirichletBC([bcd1,bcd3,bcd4]) AA = block_assemble([r_u]) FF = block_assemble([rhs_a - a_time]) bcs.apply(AA) bcs.apply(FF) block_solve(AA, w.block_vector(), FF, "mumps") export_solution = w return export_solution, T