def variational_forms(self, dt: df.Constant) -> Tuple[Any, Any]: """Create the variational forms corresponding to the given discretization of the given system of equations. *Arguments* kn (:py:class:`ufl.Expr` or float) The time step *Returns* (lhs, rhs) (:py:class:`tuple` of :py:class:`ufl.Form`) """ # Extract theta parameter and conductivities theta = self._parameters.theta Mi = self._intracellular_conductivity Me = self._extracellular_conductivity # Define variational formulation if self._parameters.linear_solver_type == "direct": v, u, multiplier = df.TrialFunctions(self._VUR) v_test, u_test, multiplier_test = df.TestFunctions(self._VUR) else: v, u = df.TrialFunctions(self._VUR) v_test, u_test = df.TestFunctions(self._VUR) Dt_v = (v - self._v_prev) / dt Dt_v *= self._chi_cm # Chi is surface to volume aration. Cm is capacitance v_mid = theta * v + (1.0 - theta) * self._v_prev # Set-up measure and rhs from stimulus dOmega = df.Measure("dx", domain=self._mesh, subdomain_data=self._cell_function) dGamma = df.Measure("ds", domain=self._mesh, subdomain_data=self._interface_function) # Loop over all domains G = Dt_v * v_test * dOmega() for key in self._cell_tags - self._restrict_tags: G += df.inner(Mi[key] * df.grad(v_mid), df.grad(v_test)) * dOmega(key) G += df.inner(Mi[key] * df.grad(v_mid), df.grad(u_test)) * dOmega(key) for key in self._cell_tags: G += df.inner(Mi[key] * df.grad(u), df.grad(v_test)) * dOmega(key) G += df.inner((Mi[key] + Me[key]) * df.grad(u), df.grad(u_test)) * dOmega(key) # If Lagrangian multiplier if self._parameters.linear_solver_type == "direct": G += (multiplier_test * u + multiplier * u_test) * dOmega(key) for key in set(self._interface_tags): # Default to 0 if not defined for tag G += self._neumann_bc.get(key, df.Constant(0)) * u_test * dGamma(key) a, L = df.system(G) return a, L
def setup(self, DG3, m, Ms, unit_length=1.0): self.DG3 = DG3 self.m = m self.Ms = Ms self.unit_length = unit_length mesh = DG3.mesh() self.mesh = mesh DG = df.FunctionSpace(mesh, "DG", 0) BDM = df.FunctionSpace(mesh, "BDM", 1) #deal with three components simultaneously, each represents a vector W1 = df.MixedFunctionSpace([BDM, BDM, BDM]) (sigma0, sigma1, sigma2) = df.TrialFunctions(W1) (tau0, tau1, tau2) = df.TestFunctions(W1) W2 = df.MixedFunctionSpace([DG, DG, DG]) (u0, u1, u2) = df.TrialFunctions(W2) (v0, v1, v2) = df.TestFunction(W2) # what we need is A x = K1 m a0 = (df.dot(sigma0, tau0) + df.dot(sigma1, tau1) + df.dot(sigma2, tau2)) * df.dx self.A = df.assemble(a0) a1 = -(df.div(tau0) * u0 + df.div(tau1) * u1 + df.div(tau2) * u2) * df.dx self.K1 = df.assemble(a1) def boundary(x, on_boundary): return on_boundary # actually, we need to apply the Neumann boundary conditions. # we need a tensor here zero = df.Constant((0, 0, 0, 0, 0, 0, 0, 0, 0)) self.bc = df.DirichletBC(W1, zero, boundary) self.bc.apply(self.A) a2 = (df.div(sigma0) * v0 + df.div(sigma1) * v1 + df.div(sigma2) * v2) * df.dx self.K2 = df.assemble(a2) self.L = df.assemble((v0 + v1 + v2) * df.dx).array() self.mu0 = mu0 self.exchange_factor = 2.0 * self.C / (self.mu0 * Ms * self.unit_length**2) self.coeff = self.exchange_factor / self.L # b = K m self.b = df.PETScVector() # the vector in BDM space self.sigma_v = df.PETScVector(self.K2.size(1)) # to store the exchange fields self.H = df.PETScVector()
def do_one_L_problem(self, i_vk, C_VK_data=0.1): """Solve the L equation for one (K,V) position using the "mixed" methodology, so that we get on-grid solutions of both `f` and `df/dL`. This requires that we use a 2nd-degree element to solve for the `df/dL` vector, called `sigma` here. """ W = d.FunctionSpace(self.l_mesh, self.c12.l_element * self.c11.l_element) D = d.Function(self.c12.l_space) # note: W.sub(0) does not work C_VK = d.Function(self.c11.l_space) sigma, u = d.TrialFunctions(W) # u is my 'f', sigma is df/dL tau, v = d.TestFunctions(W) soln = d.Function(W) bc = [d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary)] a = (u * tau.dx(0) + d.dot(sigma, tau) + D * sigma * v.dx(0)) * d.dx L = C_VK * v * d.dx equation = (a == L) # typical D value: 1e-23 ddata = np.array(self.c12.D_LL[:,i_vk]) #print('D:', ddata.min(), ddata.max(), ddata.mean(), np.sqrt((ddata**2).mean())) D.vector()[:] = ddata C_VK.vector()[:] = C_VK_data C_VK.vector()[self.i_lmax] += self.c11.source_term[:,i_vk] d.solve(equation, soln, bc) sigma, u = soln.split(deepcopy=True) return self.c11.l_sort(), self.c11.l_sort(u), self.c12.l_sort(), self.c12.l_sort(sigma)
def create(self): self.metadata = { "quadrature_degree": self.deg_q, "quadrature_scheme": "default", } self.dxm = df.dx(metadata=self.metadata, subdomain_data=self.mesh_function) # solution field Ed = df.VectorElement("CG", self.mesh.ufl_cell(), degree=self.deg_d) Ee = df.FiniteElement("CG", self.mesh.ufl_cell(), degree=self.deg_d) self.V = df.FunctionSpace(self.mesh, Ed * Ee) self.Vd, self.Ve = self.V.split() self.dd, self.de = df.TrialFunctions(self.V) self.d_, self.e_ = df.TestFunctions(self.V) self.u = df.Function(self.V, name="d-e mixed space") self.d, self.e = df.split(self.u) # generic quadrature function spaces VQF, VQV, VQT = c.helper.spaces(self.mesh, self.deg_q, c.q_dim(self.constraint)) # quadrature functions Q = c.Q # inputs to the model self.q_in = OrderedDict() self.q_in[Q.EPS] = df.Function(VQV, name="current strains") self.q_in[Q.E] = df.Function( VQF, name="current nonlocal equivalent strains") self.q_in_calc = {} self.q_in_calc[Q.EPS] = c.helper.LocalProjector( self.eps(self.d), VQV, self.dxm) self.q_in_calc[Q.E] = c.helper.LocalProjector(self.e, VQF, self.dxm) # outputs of the model self.q = {} self.q[Q.SIGMA] = df.Function(VQV, name="current stresses") self.q[Q.DSIGMA_DEPS] = df.Function(VQT, name="stress-strain tangent") self.q[Q.DSIGMA_DE] = df.Function( VQV, name="stress-nonlocal-strain tangent") self.q[Q.EEQ] = df.Function(VQF, name="current (local) equivalent strain") self.q[Q.DEEQ] = df.Function(VQV, name="equivalent-strain-strain tangent") self.q_history = { Q.KAPPA: df.Function(VQF, name="current history variable kappa") } self.n = len(self.q[Q.SIGMA].vector().get_local()) // c.q_dim( self.constraint) self.nq = self.n // self.mesh.num_cells() self.ip_flags = None if self.mesh_function is not None: self.ip_flags = np.repeat(self.mesh_function.array(), self.nq)
def do_VK_problems(self, C_L_data, dest_f_11=None, dest_dfdV_11=None, dest_dfdK_11=None): if dest_f_11 is None: dest_f_11 = np.empty(self.c11.cube_shape) else: assert dest_f_11.shape == self.c11.cube_shape if dest_dfdV_11 is None: dest_dfdV_11 = np.empty(self.c11.cube_shape) else: assert dest_dfdV_11.shape == self.c11.cube_shape if dest_dfdK_11 is None: dest_dfdK_11 = np.empty(self.c11.cube_shape) else: assert dest_dfdK_11.shape == self.c11.cube_shape W = d.FunctionSpace(self.vk_mesh, self.c21.vk_vector_element * self.c11.vk_scalar_element) # We can treat the tensor data array as having shape (N, 2, 2), where # N is the number of elements of the equivalent scalar gridding of the # mesh. array[:,0,0] is element [0,0] of the tensor. array[:,0,1] is # the upper right element, etc. D = d.Function(self.c21.vk_tensor_space) dbuf = np.empty(D.vector().size()).reshape((-1, 2, 2)) C_L = d.Function(self.c11.vk_scalar_space) sigma, u = d.TrialFunctions(W) # u is my 'f' tau, v = d.TestFunctions(W) a = (u * d.div(tau) + d.dot(sigma, tau) + d.inner(D * sigma, d.grad(v))) * d.dx L = C_L * v * d.dx equation = (a == L) soln = d.Function(W) bc = d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary) for i_l in range(self.c21.l_coords.size): dbuf[:,0,0] = self.c21.D_VV[i_l] dbuf[:,1,0] = self.c21.D_VK[i_l] dbuf[:,0,1] = self.c21.D_VK[i_l] dbuf[:,1,1] = self.c21.D_KK[i_l] D.vector()[:] = dbuf.reshape((-1,)) if i_l == self.i_lmax: C_L.vector()[:] = C_L_data[i_l] + self.c11.source_term else: C_L.vector()[:] = C_L_data[i_l] d.solve(equation, soln, bc) s_sigma, s_u = soln.split(deepcopy=True) dest_f_11[i_l] = s_u.vector().array() s_sigma = s_sigma.vector().array().reshape((-1, 2)) dest_dfdV_11[i_l] = self.c21.vk_downsample(s_sigma[:,0]) dest_dfdK_11[i_l] = self.c21.vk_downsample(s_sigma[:,1]) return dest_f_11, dest_dfdV_11, dest_dfdK_11
def do_L_problems(self, C_VK_data, dest_f_11=None, dest_dfdL_11=None): """Solve the L equations using the "mixed" methodology, so that we get on-grid solutions of both `f` and `df/dL`. This requires that we use a 2nd-degree element to solve for the `df/dL` vector, called `sigma` here. """ if dest_f_11 is None: dest_f_11 = np.empty(self.c11.cube_shape) else: assert dest_f_11.shape == self.c11.cube_shape if dest_dfdL_11 is None: dest_dfdL_11 = np.empty(self.c11.cube_shape) else: assert dest_dfdL_11.shape == self.c11.cube_shape W = d.FunctionSpace(self.l_mesh, self.c12.l_element * self.c11.l_element) D = d.Function(self.c12.l_space) C_VK = d.Function(self.c11.l_space) sigma, u = d.TrialFunctions(W) # u is my 'f' tau, v = d.TestFunctions(W) soln = d.Function(W) bc = [d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary)] a = (u * tau.dx(0) + d.dot(sigma, tau) + D * sigma * v.dx(0)) * d.dx L = C_VK * v * d.dx equation = (a == L) # contiguous arrays are required for input to `x.vector()[:]` so we # need buffers. Well, we don't need them, but this should save on # allocations. buf11 = np.empty(self.c11.l_coords.shape) buf12 = np.empty(self.c12.l_coords.shape) for i_vk in range(self.c12.logv_coords.size): buf12[:] = self.c12.D_LL[:,i_vk] D.vector()[:] = buf12 buf11[:] = C_VK_data[:,i_vk] buf11[self.i_lmax] += self.c11.source_term C_VK.vector()[:] = buf11 d.solve(equation, soln, bc) s_sigma, s_u = soln.split(deepcopy=True) dest_f_11[:,i_vk] = s_u.vector().array() dest_dfdL_11[:,i_vk] = self.c12.l_downsample(s_sigma) return dest_f_11, dest_dfdL_11
def set_function_space(self): """set_function_space.""" self.V = df.VectorFunctionSpace(self.mesh,'P',1,dim=self.number_of_moment) #Set test function(s) v_list = df.TestFunctions(self.V) #Convert to ufl form self.v = df.as_vector(v_list) #set trial function(s) if self.problem_type == 'nonlinear': u_list = df.Function(self.V) elif self.problem_type == 'linear': u_list = df.TrialFunctions(self.V) #Convert to ufl form self.u = df.as_vector(u_list)
def __init__(self, Vh_STATE, Vhs, bcs0, datafile, dx=dl.dx): self.dx = dx x, y, U, V, uu, vv, ww, uv, k = np.loadtxt(datafile, skiprows=2, unpack=True) u_fun_mean = VelocityDNS(x=x, y=y, U=U, V=V, symmetrize=True, coflow=0.) u_fun_data = VelocityDNS(x=x, y=y, U=U, V=V, symmetrize=False, coflow=0.) k_fun_mean = KDNS(x=x, y=y, k=k, symmetrize=True) k_fun_data = KDNS(x=x, y=y, k=k, symmetrize=False) u_data = dl.interpolate(u_fun_data, Vhs[0]) k_data = dl.interpolate(k_fun_data, Vhs[2]) noise_var_u = dl.assemble( dl.inner(u_data - u_fun_mean, u_data - u_fun_mean) * self.dx) noise_var_k = dl.assemble( dl.inner(k_data - k_fun_mean, k_data - k_fun_mean) * self.dx) u_trial, p_trial, k_trial, e_trial = dl.TrialFunctions(Vh_STATE) u_test, p_test, k_test, e_test = dl.TestFunctions(Vh_STATE) Wform = dl.Constant(1./noise_var_u)*dl.inner(u_trial, u_test)*self.dx + \ dl.Constant(1./noise_var_k)*dl.inner(k_trial, k_test)*self.dx self.W = dl.assemble(Wform) dummy = dl.Vector() self.W.init_vector(dummy, 0) [bc.zero(self.W) for bc in bcs0] Wt = Transpose(self.W) [bc.zero(Wt) for bc in bcs0] self.W = Transpose(Wt) xfun = dl.Function(Vh_STATE) assigner = dl.FunctionAssigner(Vh_STATE, Vhs) assigner.assign( xfun, [u_data, dl.Function(Vhs[1]), k_data, dl.Function(Vhs[3])]) self.d = xfun.vector()
def __init__(self, Vh_STATE, Vhs, weights, geo, bcs0, datafile, variance_u, variance_g): if hasattr(geo, "dx"): self.dx = geo.dx(geo.PHYSICAL) else: self.dx = dl.dx self.ds = geo.ds(geo.AXIS) x, y, U, V, uu, vv, ww, uv, k = np.loadtxt(datafile, skiprows=2, unpack=True) u_fun_data = VelocityDNS(x=x, y=y, U=U, V=V, symmetrize=True, coflow=0.) u_data = dl.interpolate(u_fun_data, Vhs[0]) if Vh_STATE.num_sub_spaces() == 3: u_trial, p_trial, g_trial = dl.TrialFunctions(Vh_STATE) u_test, p_test, g_test = dl.TestFunctions(Vh_STATE) else: raise InputError() Wform = dl.Constant(1./variance_u)*dl.inner(u_trial, u_test)*self.dx +\ dl.Constant(1./variance_g)*g_trial*g_test*self.ds self.W = dl.assemble(Wform) dummy = dl.Vector() self.W.init_vector(dummy, 0) [bc.zero(self.W) for bc in bcs0] Wt = Transpose(self.W) [bc.zero(Wt) for bc in bcs0] self.W = Transpose(Wt) xfun = dl.Function(Vh_STATE) assigner = dl.FunctionAssigner(Vh_STATE, Vhs) assigner.assign(xfun, [ u_data, dl.Function(Vhs[1]), dl.interpolate(dl.Constant(1.), Vhs[2]) ]) self.d = xfun.vector() self.w = (weights * 0.5)
def create_forms(W, rho, nu, g_a, boundary_markers, gamma=0.0): v, p = df.TrialFunctions(W) v_t, p_t = df.TestFunctions(W) a = ( 2.0 * nu * df.inner(df.sym(df.grad(v)), df.grad(v_t)) - p * df.div(v_t) - df.div(v) * p_t #- nu * df.div(v) * p_t ) * df.dx L = rho * df.inner(df.Constant((0.0, -g_a)), v_t) * df.dx # Grad-div stabilization a += df.Constant(gamma) * df.div(v) * df.div(v_t) * df.dx return a, L
def do_one_VK_problem(self, i_l, C_L_data=0.1): """Solve the VK equations for one L position using the "mixed" methodology, so that we get on-grid solutions for `f` and `grad_VK(f)`. """ W = d.FunctionSpace(self.vk_mesh, self.c21.vk_vector_element * self.c11.vk_scalar_element) # We can treat the tensor data array as having shape (N, 2, 2), where # N is the number of elements of the equivalent scalar gridding of the # mesh. array[:,0,0] is element [0,0] of the tensor. array[:,0,1] is # the upper right element, etc. D = d.Function(self.c21.vk_tensor_space) dbuf = np.empty(D.vector().size()).reshape((-1, 2, 2)) C_L = d.Function(self.c11.vk_scalar_space) sigma, u = d.TrialFunctions(W) # u is my 'f' tau, v = d.TestFunctions(W) a = (u * d.div(tau) + d.dot(sigma, tau) + d.inner(D * sigma, d.grad(v))) * d.dx L = C_L * v * d.dx equation = (a == L) soln = d.Function(W) bc = d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary) dbuf[:,0,0] = self.c21.D_VV[i_l] dbuf[:,1,0] = self.c21.D_VK[i_l] dbuf[:,0,1] = self.c21.D_VK[i_l] dbuf[:,1,1] = self.c21.D_KK[i_l] D.vector()[:] = dbuf.reshape((-1,)) if i_l == self.i_lmax: C_L.vector()[:] = C_L_data + self.c11.source_term else: C_L.vector()[:] = C_L_data d.solve(equation, soln, bc) ssigma, su = soln.split(deepcopy=True) u = self.c11.vk_to_rect(su.vector().array()) sigma = ssigma.vector().array().reshape((-1, 2)) dudv = self.c21.vk_to_rect(sigma[:,0]) dudk = self.c21.vk_to_rect(sigma[:,1]) return u, dudv, dudk
def create_forms(W, rho, nu, F, g_a, p_h, boundary_markers): v, p = df.TrialFunctions(W) v_t, p_t = df.TestFunctions(W) a = ( 2.0 * nu * df.inner(df.sym(df.grad(v)), df.grad(v_t)) - p * df.div(v_t) - df.div(v) * p_t #- nu * df.div(v) * p_t ) * df.dx L = rho * df.inner(df.Constant((0.0, -g_a)), v_t) * df.dx n = df.FacetNormal(W.mesh()) ds = df.Measure("ds", subdomain_data=boundary_markers) L += df.inner(df.Constant((F, 0.0)), v_t) * ds(3) # driving force L -= p_h * df.inner(n, v_t) * (ds(2) + ds(4)) # hydrostatic balance return a, L
def set_forms(self, unknown, geom_ord=[0]): """ Set up weak forms of elliptic PDE. """ if any(s >= 0 for s in geom_ord): ## forms for forward equation ## # 4. Define variational problem # functions if not hasattr(self, 'states_fwd'): self.states_fwd = df.Function(self.W) # u, l = df.split(self.states_fwd) u, l = df.TrialFunctions(self.W) v, m = df.TestFunctions(self.W) f = self._source_term(degree=2) # variational forms if 'true' in str(type(unknown)): unknown = df.interpolate(unknown, self.V) self.F = df.exp(unknown) * df.inner( df.grad(u), df.grad(v)) * df.dx + ( u * m + v * l) * self.ds - f * v * df.dx + self.nugg * l * m * df.dx # self.dFdstates = df.derivative(self.F, self.states_fwd) # Jacobian # self.a = unknown*df.inner(df.grad(u), df.grad(v))*df.dx + (u*m + v*l)*self.ds + self.nugg*l*m*df.dx # self.L = f*v*df.dx if any(s >= 1 for s in geom_ord): ## forms for adjoint equation ## # Set up the objective functional J # u,_,_ = df.split(self.states_fwd) # J_form = obj.form(u) # Compute adjoint of forward operator F2 = df.action(self.F, self.states_fwd) self.dFdstates = df.derivative( F2, self.states_fwd) # linearized forward operator args = ufl.algorithms.extract_arguments( self.dFdstates) # arguments for bookkeeping self.adj_dFdstates = df.adjoint( self.dFdstates, reordered_arguments=args ) # adjoint linearized forward operator # self.dJdstates = df.derivative(J_form, self.states_fwd, df.TestFunction(self.W)) # derivative of functional with respect to solution # self.dirac_1 = obj.ptsrc(u,1) # dirac_1 cannot be initialized here because it involves evaluation ## forms for gradient ## self.dFdunknown = df.derivative(F2, unknown) self.adj_dFdunknown = df.adjoint(self.dFdunknown)
def buildFunctionSpace(self): #----------------------------------------------------------------------------------- """ Built the product function space """ ## Deprecated code from earlier version of Fenics #self.VReal = df.FunctionSpace(self.mesh,'CG',self.meshOpt['polynomialOrder']) #elf.VImag = df.FunctionSpace(self.mesh,'CG',self.meshOpt['polynomialOrder']) #self.V = df.MixedFunctionSpace([self.VReal,self.VImag]) elem = df.FiniteElement('CG', self.mesh.ufl_cell(), self.meshOpt['polynomialOrder']) self.VReal = df.FunctionSpace(self.mesh, elem) self.VImag = df.FunctionSpace(self.mesh, elem) self.V = df.FunctionSpace(self.mesh, elem * elem) self.ur, self.ui = df.TrialFunctions(self.V) self.wr, self.wi = df.TestFunctions(self.V) self._buildFunctionSpaceCompleted = True return
def define_momentum_equation(self): """ Setup the momentum equation weak form """ sim = self.simulation Vuvw = sim.data['uvw_star'].function_space() tests = dolfin.TestFunctions(Vuvw) trials = dolfin.TrialFunctions(Vuvw) # Split into components v = dolfin.as_vector(tests[:]) u = dolfin.as_vector(trials[:]) # The pressure is explicit p* and q is zero (on a domain, to avoid warnings) p = sim.data['p'] class MyZero(Zero): def ufl_domains(self): return p.ufl_domains() q = MyZero() lm_trial = lm_test = None # Define the momentum equation weak form eq = define_dg_equations( u, v, p, q, lm_trial, lm_test, self.simulation, include_hydrostatic_pressure=self.include_hydrostatic_pressure, incompressibility_flux_type='central', # Only used with q use_grad_q_form=False, # Only used with q use_grad_p_form=self.use_grad_p_form, use_stress_divergence_form=self.use_stress_divergence_form, ) self.form_lhs, self.form_rhs = dolfin.system(eq)
def variational_forms(self, kn: df.Constant) -> tp.Tuple[tp.Any, tp.Any]: """Create the variational forms corresponding to the given discretization of the given system of equations. *Arguments* kn (:py:class:`ufl.Expr` or float) The time step *Returns* (lhs, rhs) (:py:class:`tuple` of :py:class:`ufl.Form`) """ # Extract theta parameter and conductivities theta = self._parameters["theta"] Mi = self._M_i Me = self._M_e # Define variational formulation if self._parameters["linear_solver_type"] == "direct": v, u, l = df.TrialFunctions(self.VUR) w, q, lamda = df.TestFunctions(self.VUR) else: v, u = df.TrialFunctions(self.VUR) w, q = df.TestFunctions(self.VUR) # Get physical parameters chi = self._parameters["Chi"] capacitance = self._parameters["Cm"] Dt_v = (v - self.v_) / kn Dt_v *= chi * capacitance v_mid = theta * v + (1.0 - theta) * self.v_ # Set-up measure and rhs from stimulus dz = df.Measure("dx", domain=self._mesh, subdomain_data=self._cell_domains) db = df.Measure("ds", domain=self._mesh, subdomain_data=self._facet_domains) # Get domain tags cell_tags = map(int, set( self._cell_domains.array())) # np.int64 does not work facet_tags = map(int, set(self._facet_domains.array())) # Loop over all domains G = Dt_v * w * dz() for key in cell_tags: G += df.inner(Mi[key] * df.grad(v_mid), df.grad(w)) * dz(key) G += df.inner(Mi[key] * df.grad(u), df.grad(w)) * dz(key) G += df.inner(Mi[key] * df.grad(v_mid), df.grad(q)) * dz(key) G += df.inner( (Mi[key] + Me[key]) * df.grad(u), df.grad(q)) * dz(key) if self._I_s is None: G -= chi * df.Constant(0) * w * dz(key) else: _is = self._I_s.get(key, df.Constant(0)) G -= chi * _is * w * dz(key) # If Lagrangian multiplier if self._parameters["linear_solver_type"] == "direct": G += (lamda * u + l * q) * dz(key) if self._I_a: G -= chi * self._I_a[key] * q * dz(key) for key in facet_tags: if self._ect_current is not None: # Default to 0 if not defined for tag I do not I should apply `chi` here. G += self._ect_current.get(key, df.Constant(0)) * q * db(key) a, L = df.system(G) return a, L
def step(self, t0: float, t1: float) -> None: """Solve on the given time interval (t0, t1). Arguments: interval (:py:class:`tuple`) The time interval (t0, t1) for the step *Invariants* Assuming that v\_ is in the correct state for t0, gives self.vur in correct state at t1. """ timer = df.Timer("PDE step") # Extract theta and conductivities theta = self._parameters["theta"] Mi = self._M_i Me = self._M_e # Extract interval and thus time-step kn = df.Constant(t1 - t0) # Define variational formulation if self._parameters["linear_solver_type"] == "direct": v, u, l = df.TrialFunctions(self.VUR) w, q, lamda = df.TestFunctions(self.VUR) else: v, u = df.TrialFunctions(self.VUR) w, q = df.TestFunctions(self.VUR) # Get physical parameters chi = self._parameters["Chi"] capacitance = self._parameters["Cm"] Dt_v = (v - self.v_) / kn Dt_v *= chi * capacitance v_mid = theta * v + (1.0 - theta) * self.v_ # Set time t = t0 + theta * (t1 - t0) self.time.assign(t) # Define spatial integration domains: dz = df.Measure("dx", domain=self._mesh, subdomain_data=self._cell_domains) db = df.Measure("ds", domain=self._mesh, subdomain_data=self._facet_domains) # Get domain labels cell_tags = map(int, set( self._cell_domains.array())) # np.int64 does not workv facet_tags = map(int, set(self._facet_domains.array())) # Loop overe all domain labels G = Dt_v * w * dz() for key in cell_tags: G += df.inner(Mi[key] * df.grad(v_mid), df.grad(w)) * dz(key) G += df.inner(Mi[key] * df.grad(u), df.grad(w)) * dz(key) G += df.inner(Mi[key] * df.grad(v_mid), df.grad(q)) * dz(key) G += df.inner( (Mi[key] + Me[key]) * df.grad(u), df.grad(q)) * dz(key) if self._I_s is None: G -= chi * df.Constant(0) * w * dz(key) else: # _is = self._I_s.get(key, df.Constant(0)) # G -= chi*_is*w*dz(key) G -= chi * self._I_s[key] * w * dz(key) # If Lagrangian multiplier if self._parameters["linear_solver_type"] == "direct": G += (lamda * u + l * q) * dz(key) # Add applied current as source in elliptic equation if applicable if self._I_a: G -= chi * self._I_a[key] * q * dz(key) if self._ect_current is not None: for key in facet_tags: # Detfalt to 0 if not defined for that facet tag # TODO: Should I include `chi` here? I do not think so G += self._ect_current.get(key, df.Constant(0)) * q * db(key) # Define variational problem a, L = df.system(G) pde = df.LinearVariationalProblem(a, L, self.vur, bcs=self._bcs) # Set-up solver solver = df.LinearVariationalSolver(pde) solver.solve()
values[line.index()] = other markers.set_values(values) def top(x): return np.abs(x[0] - width / 2) < dolfin.DOLFIN_EPS def bottom(x): return np.abs(x[0] + width / 2) < dolfin.DOLFIN_EPS print('ugly stuff took {:.2f} s'.format(time.time() - t_ini)) nodal_space = dolfin.FunctionSpace(mesh, 'Lagrange', 1) (L_i, ) = dolfin.TestFunctions(nodal_space) (L_j, ) = dolfin.TrialFunctions(nodal_space) bc_ground = dolfin.DirichletBC(nodal_space, dolfin.Constant(0.0), markers, other) bc_source = dolfin.DirichletBC(nodal_space, dolfin.Constant(1.0), markers, metal) rho = dolfin.Constant(0.0) A_ij = dolfin.inner(dolfin.grad(L_i), dolfin.grad(L_j)) * dolfin.dx b_ij = rho * L_j * dolfin.dx A = dolfin.assemble(A_ij) b = dolfin.assemble(b_ij) bc_ground.apply(A, b) bc_source.apply(A, b) phi = dolfin.Function(nodal_space) c = phi.vector()
def Cost(xp): comm = nMPI.COMM_WORLD mpi_rank = comm.Get_rank() x1, x2 = xp #The two variables (length and feed offset) rs = 8.0 # radiation boundary radius l = x1 # Patch length w = 4.5 # Patch width s1 = x2 * x1 / 2.0 # Feed offset h = 1.0 # Patch height t = 0.05 # Metal thickness lc = 1.0 # Coax length rc = 0.25 # Coax shield radius cc = 0.107 #Coax center conductor 50 ohm air diel eps = 1.0e-4 tol = 1.0e-6 eta = 377.0 # vacuum intrinsic wave impedance eps_c = 1.0 # dielectric permittivity k0 = 2.45 * 2.0 * np.pi / 30.0 # Frequency in GHz ls = 0.025 #Mesh density parameters for GMSH lm = 0.8 lw = 0.06 lp = 0.3 # Run GMSH only on one MPI processor (process 0). # We use the GMSH Python interface to generate the geometry and mesh objects if mpi_rank == 0: print("x[0] = {0:<f}, x[1] = {1:<f} ".format(xp[0], xp[1])) print("length = {0:<f}, width = {1:<f}, feed offset = {2:<f}".format(l, w, s1)) gmsh.initialize() gmsh.option.setNumber('General.Terminal', 1) gmsh.model.add("SimplePatchOpt") # Radiation sphere gmsh.model.occ.addSphere(0.0, 0.0, 0.0, rs, 1) gmsh.model.occ.addBox(0.0, -rs, 0.0, rs, 2*rs, rs, 2) gmsh.model.occ.intersect([(3,1)],[(3,2)], 3, removeObject=True, removeTool=True) # Patch gmsh.model.occ.addBox(0.0, -l/2, h, w/2, l, t, 4) # coax center gmsh.model.occ.addCylinder(0.0, s1, -lc, 0.0, 0.0, lc+h, cc, 5, 2.0*np.pi) # coax shield gmsh.model.occ.addCylinder(0.0, s1, -lc, 0.0, 0.0, lc, rc, 7) gmsh.model.occ.addBox(0.0, s1-rc, -lc, rc, 2.0*rc, lc, 8) gmsh.model.occ.intersect([(3,7)], [(3,8)], 9, removeObject=True, removeTool=True) gmsh.model.occ.fuse([(3,3)], [(3,9)], 10, removeObject=True, removeTool=True) # cutout internal boundaries gmsh.model.occ.cut([(3,10)], [(3,4),(3,5)], 11, removeObject=True, removeTool=True) gmsh.option.setNumber('Mesh.MeshSizeMin', ls) gmsh.option.setNumber('Mesh.MeshSizeMax', lm) gmsh.option.setNumber('Mesh.Algorithm', 6) gmsh.option.setNumber('Mesh.Algorithm3D', 1) gmsh.option.setNumber('Mesh.MshFileVersion', 4.1) gmsh.option.setNumber('Mesh.Format', 1) gmsh.option.setNumber('Mesh.MinimumCirclePoints', 36) gmsh.option.setNumber('Mesh.CharacteristicLengthFromCurvature', 1) gmsh.model.occ.synchronize() pts = gmsh.model.getEntities(0) gmsh.model.mesh.setSize(pts, lm) #Set background mesh density pts = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h-eps, w/2+eps, l/2+eps, h+t+eps) gmsh.model.mesh.setSize(pts, ls) pts = gmsh.model.getEntitiesInBoundingBox(-eps, s1-rc-eps, -lc-eps, rc+eps, s1+rc+eps, h+eps) gmsh.model.mesh.setSize(pts, lw) pts = gmsh.model.getEntitiesInBoundingBox(-eps, -rc-eps, -eps, rc+eps, rc+eps, eps) gmsh.model.mesh.setSize(pts, lw) # Embed points to reduce mesh density on patch faces fce1 = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h+t-eps, w/2+eps, l/2+eps, h+t+eps, 2) gmsh.model.occ.synchronize() gmsh.model.geo.addPoint(w/4, -l/4, h+t, lp, 1000) gmsh.model.geo.addPoint(w/4, 0.0, h+t, lp, 1001) gmsh.model.geo.addPoint(w/4, l/4, h+t, lp, 1002) gmsh.model.geo.synchronize() gmsh.model.occ.synchronize() print(fce1) fce2 = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h-eps, w/2+eps, l/2+eps, h+eps, 2) gmsh.model.geo.addPoint(w/4, -9*l/32, h, lp, 1003) gmsh.model.geo.addPoint(w/4, 0.0, h, lp, 1004) gmsh.model.geo.addPoint(w/4, 9*l/32, h, lp, 1005) gmsh.model.geo.synchronize() for tt in fce1: gmsh.model.mesh.embed(0, [1000, 1001, 1002], 2, tt[1]) for tt in fce2: gmsh.model.mesh.embed(0, [1003, 1004, 1005], 2, tt[1]) print(fce2) gmsh.model.occ.remove(fce1) gmsh.model.occ.remove(fce2) gmsh.model.occ.synchronize() gmsh.model.addPhysicalGroup(3, [11], 1) gmsh.model.setPhysicalName(3, 1, "Air") gmsh.model.mesh.optimize("Relocate3D", niter=5) gmsh.model.mesh.generate(3) gmsh.write("SimplePatch.msh") gmsh.finalize() # Mesh generation is finished. We now use Meshio to translate GMSH mesh to xdmf file for # importation into Fenics FE solver msh = meshio.read("SimplePatch.msh") for cell in msh.cells: if cell.type == "tetra": tetra_cells = cell.data for key in msh.cell_data_dict["gmsh:physical"].keys(): if key == "tetra": tetra_data = msh.cell_data_dict["gmsh:physical"][key] tetra_mesh = meshio.Mesh(points=msh.points, cells={"tetra": tetra_cells}, cell_data={"VolumeRegions":[tetra_data]}) meshio.write("mesh.xdmf", tetra_mesh) # Here we import the mesh into Fenics mesh = dolfin.Mesh() with dolfin.XDMFFile("mesh.xdmf") as infile: infile.read(mesh) mvc = dolfin.MeshValueCollection("size_t", mesh, 3) with dolfin.XDMFFile("mesh.xdmf") as infile: infile.read(mvc, "VolumeRegions") cf = dolfin.cpp.mesh.MeshFunctionSizet(mesh, mvc) # The boundary classes for the FE solver class PEC(dolfin.SubDomain): def inside(self, x, on_boundary): return on_boundary class InputBC(dolfin.SubDomain): def inside(self, x, on_boundary): return on_boundary and dolfin.near(x[2], -lc, tol) class OutputBC(dolfin.SubDomain): def inside(self, x, on_boundary): rr = np.sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2]) return on_boundary and dolfin.near(rr, 8.0, 1.0e-1) class PMC(dolfin.SubDomain): def inside(self, x, on_boundary): return on_boundary and dolfin.near(x[0], 0.0, tol) # Volume domains dolfin.File("VolSubDomains.pvd").write(cf) dolfin.File("Mesh.pvd").write(mesh) # Mark boundaries sub_domains = dolfin.MeshFunction("size_t", mesh, mesh.topology().dim() - 1) sub_domains.set_all(4) pec = PEC() pec.mark(sub_domains, 0) in_port = InputBC() in_port.mark(sub_domains, 1) out_port = OutputBC() out_port.mark(sub_domains, 2) pmc = PMC() pmc.mark(sub_domains, 3) dolfin.File("BoxSubDomains.pvd").write(sub_domains) # Set up function spaces cell = dolfin.tetrahedron ele_type = dolfin.FiniteElement('N1curl', cell, 2, variant="integral") # H(curl) element for EM V2 = dolfin.FunctionSpace(mesh, ele_type * ele_type) V = dolfin.FunctionSpace(mesh, ele_type) (u_r, u_i) = dolfin.TrialFunctions(V2) (v_r, v_i) = dolfin.TestFunctions(V2) dolfin.info(mesh) #surface integral definitions from boundaries ds = dolfin.Measure('ds', domain = mesh, subdomain_data = sub_domains) #volume regions dx_air = dolfin.Measure('dx', domain = mesh, subdomain_data = cf, subdomain_id = 1) dx_subst = dolfin.Measure('dx', domain = mesh, subdomain_data = cf, subdomain_id = 2) # with source and sink terms u0 = dolfin.Constant((0.0, 0.0, 0.0)) #PEC definition # The incident field sources (E and H-fields) h_src = dolfin.Expression(('-(x[1] - s) / (2.0 * pi * (pow(x[0], 2.0) + pow(x[1] - s,2.0)))', 'x[0] / (2.0 * pi *(pow(x[0],2.0) + pow(x[1] - s,2.0)))', 0.0), degree = 2, s = s1) e_src = dolfin.Expression(('x[0] / (2.0 * pi * (pow(x[0], 2.0) + pow(x[1] - s,2.0)))', 'x[1] / (2.0 * pi *(pow(x[0],2.0) + pow(x[1] - s,2.0)))', 0.0), degree = 2, s = s1) Rrad = dolfin.Expression(('sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2])'), degree = 2) #Boundary condition dictionary boundary_conditions = {0: {'PEC' : u0}, 1: {'InputBC': (h_src)}, 2: {'OutputBC': Rrad}} n = dolfin.FacetNormal(mesh) #Build PEC boundary conditions for real and imaginary parts bcs = [] for i in boundary_conditions: if 'PEC' in boundary_conditions[i]: bc = dolfin.DirichletBC(V2.sub(0), boundary_conditions[i]['PEC'], sub_domains, i) bcs.append(bc) bc = dolfin.DirichletBC(V2.sub(1), boundary_conditions[i]['PEC'], sub_domains, i) bcs.append(bc) # Build input BC source term and loading term integral_source = [] integrals_load =[] for i in boundary_conditions: if 'InputBC' in boundary_conditions[i]: r = boundary_conditions[i]['InputBC'] bb1 = 2.0 * (k0 * eta) * dolfin.inner(v_i, dolfin.cross(n, r)) * ds(i) #Factor of two from field equivalence principle integral_source.append(bb1) bb2 = dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_r)) * k0 * np.sqrt(eps_c) * ds(i) integrals_load.append(bb2) bb2 = dolfin.inner(-dolfin.cross(n, v_r), dolfin.cross(n, u_i)) * k0 * np.sqrt(eps_c) * ds(i) integrals_load.append(bb2) for i in boundary_conditions: if 'OutputBC' in boundary_conditions[i]: r = boundary_conditions[i]['OutputBC'] bb2 = (dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_r)) * k0 + 1.0 * dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_i)) / r)* ds(i) integrals_load.append(bb2) bb2 = (dolfin.inner(-dolfin.cross(n, v_r), dolfin.cross(n, u_i)) * k0 + 1.0 * dolfin.inner(dolfin.cross(n, v_r), dolfin.cross(n, u_r)) / r)* ds(i) integrals_load.append(bb2) # for PMC, do nothing. Natural BC. a = (dolfin.inner(dolfin.curl(v_r), dolfin.curl(u_r)) + dolfin.inner(dolfin.curl(v_i), dolfin.curl(u_i)) - eps_c * k0 * k0 * (dolfin.inner(v_r, u_r) + dolfin.inner(v_i, u_i))) * dx_subst + (dolfin.inner(dolfin.curl(v_r), dolfin.curl(u_r)) + dolfin.inner(dolfin.curl(v_i), dolfin.curl(u_i)) - k0 * k0 * (dolfin.inner(v_r, u_r) + dolfin.inner(v_i, u_i))) * dx_air + sum(integrals_load) L = sum(integral_source) u1 = dolfin.Function(V2) vdim = u1.vector().size() print("Solution vector size =", vdim) dolfin.solve(a == L, u1, bcs, solver_parameters = {'linear_solver' : 'mumps'}) #Here we write files of the field solution for inspection u1_r, u1_i = u1.split(True) fp = dolfin.File("EField_r.pvd") fp << u1_r fp = dolfin.File("EField_i.pvd") fp << u1_i # Compute power relationships and reflection coefficient H = dolfin.interpolate(h_src, V) # Get input field P = dolfin.assemble((-dolfin.dot(u1_r,dolfin.cross(dolfin.curl(u1_i),n))+dolfin.dot(u1_i,dolfin.cross(dolfin.curl(u1_r),n))) * ds(2)) P_refl = dolfin.assemble((-dolfin.dot(u1_i,dolfin.cross(dolfin.curl(u1_r), n)) + dolfin.dot(u1_r, dolfin.cross(dolfin.curl(u1_i), n))) * ds(1)) P_inc = dolfin.assemble((dolfin.dot(H, H) * eta / (2.0 * np.sqrt(eps_c))) * ds(1)) print("Integrated power on port 2:", P/(2.0 * k0 * eta)) print("Incident power at port 1:", P_inc) print("Integrated reflected power on port 1:", P_inc - P_refl / (2.0 * k0 * eta)) #Reflection coefficient is returned as cost function rho_old = (P_inc - P_refl / (2.0 * k0 * eta)) / P_inc #Fraction of incident power reflected as objective function return rho_old
def define_simple_equations(self): """ Setup weak forms for SIMPLE form """ sim = self.simulation self.Vuvw = sim.data['uvw_star'].function_space() Vp = sim.data['Vp'] # The trial and test functions in a coupled space (to be split) func_spaces = [self.Vuvw, Vp] e_mixed = dolfin.MixedElement([fs.ufl_element() for fs in func_spaces]) Vcoupled = dolfin.FunctionSpace(sim.data['mesh'], e_mixed) tests = dolfin.TestFunctions(Vcoupled) trials = dolfin.TrialFunctions(Vcoupled) # Split into components v = dolfin.as_vector(tests[0][:]) u = dolfin.as_vector(trials[0][:]) q = tests[-1] p = trials[-1] lm_trial = lm_test = None # Define the full coupled form and split it into subforms depending # on the test and trial functions eq = define_dg_equations( u, v, p, q, lm_trial, lm_test, self.simulation, include_hydrostatic_pressure=self.include_hydrostatic_pressure, incompressibility_flux_type=self.incompressibility_flux_type, use_grad_q_form=self.use_grad_q_form, use_grad_p_form=self.use_grad_p_form, use_stress_divergence_form=self.use_stress_divergence_form, ) mat, vec = split_form_into_matrix(eq, Vcoupled, Vcoupled, check_zeros=True) # Check matrix and vector shapes and that the matrix is a saddle point matrix assert mat.shape == (2, 2) assert vec.shape == (2, ) assert mat[ -1, -1] is None, 'Found p-q coupling, this is not a saddle point system!' # Store the forms self.eqA = mat[0, 0] self.eqB = mat[0, 1] self.eqC = mat[1, 0] self.eqD = vec[0] self.eqE = vec[1] if self.eqE is None: self.eqE = dolfin.TrialFunction(Vp) * dolfin.Constant( 0) * dolfin.dx if self.a_tilde_is_mass: # The mass matrix. Consistent with the implementation in define_dg_equations rho = sim.multi_phase_model.get_density(0) c1 = sim.data['time_coeffs'][0] dt = sim.data['dt'] eqM = rho * c1 / dt * dolfin.dot(u, v) * dolfin.dx matM, _vecM = split_form_into_matrix(eqM, Vcoupled, Vcoupled, check_zeros=True) self.eqM = dolfin.Form(matM[0, 0]) self.M = None
H=4.1, U0=1.5, t=0., t_ref=0.1) # inlet bc v_bc1 = dlfn.DirichletBC(Wh.sub(0), v_inlet, facet_marker, left_id) # wall no slip v_bc2 = dlfn.DirichletBC(Wh.sub(0), null_vector, facet_marker, wall_id) # circle no slip v_bc3 = dlfn.DirichletBC(Wh.sub(0), null_vector, facet_marker, cylinder_id) # collection of bcs bcs = [v_bc1, v_bc2, v_bc3] #============================================================================== # trial and test function (del_v, del_p) = dlfn.TestFunctions(Wh) (dv, dp) = dlfn.TrialFunctions(Wh) #============================================================================== # solution functions sol = dlfn.Function(Wh) sol0 = dlfn.Function(Wh) sol00 = dlfn.Function(Wh) v0, p0 = dlfn.split(sol0) v00, p00 = dlfn.split(sol00) #============================================================================== # define auxiliary operators def a_operator(phi, psi): return inner(grad(phi), grad(psi)) / Re * dV
field = subproblem[0]["name"] fields.append(field) field_to_subspace[field] = spaces[name] field_to_subproblem[field] = (name, -1) # Create initial folders for storing results newfolder, tstepfiles = create_initial_folders(folder, restart_folder, fields, tstep, parameters) # Create overarching test and trial functions test_functions = dict() trial_functions = dict() for name, subproblem in subproblems.items(): if len(subproblem) > 1: test_functions[name] = df.TestFunctions(spaces[name]) trial_functions[name] = df.TrialFunctions(spaces[name]) else: test_functions[name] = df.TestFunction(spaces[name]) trial_functions[name] = df.TrialFunction(spaces[name]) # Create work dictionaries for all subproblems w_ = dict((subproblem, df.Function(space, name=subproblem)) for subproblem, space in spaces.items()) w_1 = dict((subproblem, df.Function(space, name=subproblem + "_1")) for subproblem, space in spaces.items()) w_tmp = dict((subproblem, df.Function(space, name=subproblem + "_tmp")) for subproblem, space in spaces.items()) # Shortcuts to the fields x_ = dict() for name, subproblem in subproblems.items():
print("starting the state space") vector_order, nodal_order = 2, 2 vector_space = dolfin.FunctionSpace(mesh, 'Nedelec 1st kind H(curl)', vector_order) #vector_element = dolfin.VectorElement("Nedelec 1st kind H(curl)", mesh.ufl_cell(), vector_order) nodal_space = dolfin.FunctionSpace(mesh, 'Lagrange', nodal_order) #nodal_element = dolfin.FiniteElement("Lagrange", mesh.ufl_cell(), nodal_order) #combined_space = vector_space * nodal_space combined_space = dolfin.FunctionSpace( mesh, vector_space.ufl_element() * nodal_space.ufl_element()) #combined_element=dolfin.MixedElement([vector_element, nodal_element]) #combined_space=dolfin.FunctionSpace(mesh, combined_element) #combined_space=dolfin.FunctionSpace(mesh, dolfin.MixedElement([vector_element, nodal_element])) (N_i, L_i) = dolfin.TestFunctions(combined_space) (N_j, L_j) = dolfin.TrialFunctions(combined_space) er = 1. ur = 1. s_tt_ij = 1. / ur * dolfin.inner(dolfin.curl(N_i), dolfin.curl(N_j)) t_tt_ij = er * dolfin.inner(N_i, N_j) s_zz_ij = 1. / ur * dolfin.inner(dolfin.grad(L_i), dolfin.grad(L_j)) t_zz_ij = er * L_i * L_j s_ij = (s_tt_ij + s_zz_ij) * dolfin.dx t_ij = (t_tt_ij + t_zz_ij) * dolfin.dx S = dolfin.assemble(s_ij) T = dolfin.assemble(t_ij) print("starting the boundary conditions") markers = dolfin.MeshFunction('size_t', mesh, 1) markers.set_all(0)
def _setup_imex_problem(self): assert hasattr(self, "_parameters") assert hasattr(self, "_mesh") assert hasattr(self, "_Wh") assert hasattr(self, "_coefficients") assert hasattr(self, "_one") assert hasattr(self, "_omega") assert hasattr(self, "_v0") assert hasattr(self, "_v00") assert hasattr(self, "_T0") assert hasattr(self, "_T00") print " setup explicit imex problem..." #======================================================================= # retrieve imex coefficients a, b, c = self._imex_alpha, self._imex_beta, self._imex_gamma #======================================================================= # trial and test function (del_v, del_p, del_T) = dlfn.TestFunctions(self._Wh) (dv, dp, dT) = dlfn.TrialFunctions(self._Wh) # volume element dV = dlfn.Measure("dx", domain=self._mesh) # reference to time step timestep = self._timestep #======================================================================= from dolfin import dot, grad, inner # 1) lhs momentum equation lhs_momentum = a[0] / timestep * dot(dv, del_v) * dV \ + c[0] * self._coefficients[1] * a_op(dv, del_v) * dV\ - b_op(del_v, dp) * dV\ - b_op(dv, del_p) * dV # 2a) rhs momentum equation: time derivative rhs_momentum = -dot( a[1] / timestep * self._v0 + a[2] / timestep * self._v00, del_v) * dV # 2b) rhs momentum equation: nonlinear term nonlinear_term_velocity = b[0] * dot(grad(self._v0), self._v0) \ + b[1] * dot(grad(self._v00), self._v00) rhs_momentum -= dot(nonlinear_term_velocity, del_v) * dV # 2c) rhs momentum equation: linear term rhs_momentum -= self._coefficients[1] * inner( c[1] * grad(self._v0) + c[2] * grad(self._v00), grad(del_v)) * dV # 2d) rhs momentum equation: coriolis term if self._parameters.rotation is True: assert self._coefficients[0] != 0.0 print " adding rotation to the model..." # defining extrapolated velocity extrapolated_velocity = (self._one + self._omega) * self._v0 \ - self._omega * self._v00 # set Coriolis term if self._space_dim == 2: rhs_momentum -= self._coefficients[0] * ( -extrapolated_velocity[1] * del_v[0] + extrapolated_velocity[0] * del_v[1]) * dV elif self._space_dim == 3: from dolfin import cross coriolis_term = cross(self._rotation_vector, extrapolated_velocity) rhs_momentum -= self._coefficients[0] * dot( coriolis_term, del_v) * dV print " adding rotation to the model..." # 2e) rhs momentum equation: buoyancy term if self._parameters.buoyancy is True: assert self._coefficients[2] != 0.0 # defining extrapolated temperature extrapolated_temperature = ( self._one + self._omega) * self._T0 - self._omega * self._T00 # buoyancy term print " adding buoyancy to the model..." rhs_momentum -= self._coefficients[ 2] * extrapolated_temperature * dot(self._gravity, del_v) * dV #======================================================================= # 3) lhs energy equation lhs_energy = a[0] / timestep * dot(dT, del_T) * dV \ + self._coefficients[3] * a_op(dT, del_T) * dV # 4a) rhs energy equation: time derivative rhs_energy = -dot( a[1] / timestep * self._T0 + a[2] / timestep * self._T00, del_T) * dV # 4b) rhs energy equation: nonlinear term nonlinear_term_temperature = b[0] * dot(self._v0, grad(self._T0)) \ + b[1] * dot(self._v00, grad(self._T00)) rhs_energy -= nonlinear_term_temperature * del_T * dV # 4c) rhs energy equation: linear term rhs_energy -= self._coefficients[3] \ * dot(c[1] * grad(self._T0) + c[2] * grad(self._T00), grad(del_T)) * dV #======================================================================= # full problem self._lhs = lhs_momentum + lhs_energy self._rhs = rhs_momentum + rhs_energy if not hasattr(self, "_dirichlet_bcs"): self._setup_boundary_conditions() if self._parameters.use_assembler_method: # system assembler self._assembler = dlfn.SystemAssembler(self._lhs, self._rhs, bcs=self._dirichlet_bcs) self._system_matrix = dlfn.Matrix() self._system_rhs = dlfn.Vector() self._solver = dlfn.LUSolver(self._system_matrix) else: # linear problem problem = dlfn.LinearVariationalProblem(self._lhs, self._rhs, self._sol, bcs=self._dirichlet_bcs) self._solver = dlfn.LinearVariationalSolver(problem)
bcs.append(dlfn.DirichletBC(Wh.sub(0), null_vector, facet_marker, 2)) bcs.append(dlfn.DirichletBC(Wh.sub(0), null_vector, facet_marker, 3)) bcs.append(dlfn.DirichletBC(Wh.sub(0), null_vector, facet_marker, 4)) # temperature bcs on left and right boundary bcs.append(dlfn.DirichletBC(Wh.sub(2), temp_left, facet_marker, 1)) bcs.append(dlfn.DirichletBC(Wh.sub(2), temp_right, facet_marker, 2)) #============================================================================== # definition of volume / surface element dA = dlfn.Measure("ds", domain = mesh, subdomain_data = facet_marker) dV = dlfn.Measure("dx", domain = mesh) A = dlfn.assemble(1.*dV) # "volume" or rather area of geometry #n = dlfn.FacetNormal(mesh) #============================================================================== # trial and test function (del_v, del_p, del_T) = dlfn.TestFunctions(Wh) (dv, dp, dT) = dlfn.TrialFunctions(Wh) #============================================================================== # solution functions sol = dlfn.Function(Wh) sol0 = dlfn.Function(Wh) sol00 = dlfn.Function(Wh) v0, p0, T0 = dlfn.split(sol0) v00, p00, T00 = dlfn.split(sol00) #============================================================================== # define auxiliary operators def a_operator(phi, psi): return inner(grad(phi), grad(psi)) * dV def b_operator(phi, psi): return div(phi) * psi * dV def c_operator(phi, chi, psi): return dot(dot(grad(chi), phi), psi) * dV
def test_assembly_solve_taylor_hood(mesh): """Assemble Stokes problem with Taylor-Hood elements and solve.""" P2 = dolfin.VectorFunctionSpace(mesh, ("Lagrange", 2)) P1 = dolfin.FunctionSpace(mesh, ("Lagrange", 1)) def boundary0(x, only_boundary): """Define boundary x = 0""" return x[:, 0] < 10 * numpy.finfo(float).eps def boundary1(x, only_boundary): """Define boundary x = 1""" return x[:, 0] > (1.0 - 10 * numpy.finfo(float).eps) u0 = dolfin.Function(P2) u0.vector().set(1.0) u0.vector().ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) bc0 = dolfin.DirichletBC(P2, u0, boundary0) bc1 = dolfin.DirichletBC(P2, u0, boundary1) u, p = dolfin.TrialFunction(P2), dolfin.TrialFunction(P1) v, q = dolfin.TestFunction(P2), dolfin.TestFunction(P1) a00 = inner(ufl.grad(u), ufl.grad(v)) * dx a01 = ufl.inner(p, ufl.div(v)) * dx a10 = ufl.inner(ufl.div(u), q) * dx a11 = None p00 = a00 p01, p10 = None, None p11 = inner(p, q) * dx # FIXME # We need zero function for the 'zero' part of L p_zero = dolfin.Function(P1) f = dolfin.Function(P2) L0 = ufl.inner(f, v) * dx L1 = ufl.inner(p_zero, q) * dx # -- Blocked and nested A0 = dolfin.fem.assemble_matrix_nest([[a00, a01], [a10, a11]], [bc0, bc1]) A0norm = nest_matrix_norm(A0) P0 = dolfin.fem.assemble_matrix_nest([[p00, p01], [p10, p11]], [bc0, bc1]) P0norm = nest_matrix_norm(P0) b0 = dolfin.fem.assemble_vector_nest([L0, L1], [[a00, a01], [a10, a11]], [bc0, bc1]) b0norm = b0.norm() ksp = PETSc.KSP() ksp.create(mesh.mpi_comm()) ksp.setOperators(A0, P0) nested_IS = P0.getNestISs() ksp.setType("minres") pc = ksp.getPC() pc.setType("fieldsplit") pc.setFieldSplitIS(["u", nested_IS[0][0]], ["p", nested_IS[1][1]]) ksp_u, ksp_p = pc.getFieldSplitSubKSP() ksp_u.setType("preonly") ksp_u.getPC().setType('lu') ksp_u.getPC().setFactorSolverType('mumps') ksp_p.setType("preonly") def monitor(ksp, its, rnorm): # print("Num it, rnorm:", its, rnorm) pass ksp.setTolerances(rtol=1.0e-8, max_it=50) ksp.setMonitor(monitor) ksp.setFromOptions() x0 = b0.copy() ksp.solve(b0, x0) assert ksp.getConvergedReason() > 0 # -- Blocked and monolithic A1 = dolfin.fem.assemble_matrix_block([[a00, a01], [a10, a11]], [bc0, bc1]) assert A1.norm() == pytest.approx(A0norm, 1.0e-12) P1 = dolfin.fem.assemble_matrix_block([[p00, p01], [p10, p11]], [bc0, bc1]) assert P1.norm() == pytest.approx(P0norm, 1.0e-12) b1 = dolfin.fem.assemble_vector_block([L0, L1], [[a00, a01], [a10, a11]], [bc0, bc1]) assert b1.norm() == pytest.approx(b0norm, 1.0e-12) ksp = PETSc.KSP() ksp.create(mesh.mpi_comm()) ksp.setOperators(A1, P1) ksp.setType("minres") pc = ksp.getPC() pc.setType('lu') pc.setFactorSolverType('mumps') ksp.setTolerances(rtol=1.0e-8, max_it=50) ksp.setFromOptions() x1 = A1.createVecRight() ksp.solve(b1, x1) assert ksp.getConvergedReason() > 0 assert x1.norm() == pytest.approx(x0.norm(), 1e-8) # -- Monolithic P2 = ufl.VectorElement("Lagrange", mesh.ufl_cell(), 2) P1 = ufl.FiniteElement("Lagrange", mesh.ufl_cell(), 1) TH = P2 * P1 W = dolfin.FunctionSpace(mesh, TH) (u, p) = dolfin.TrialFunctions(W) (v, q) = dolfin.TestFunctions(W) a00 = ufl.inner(ufl.grad(u), ufl.grad(v)) * dx a01 = ufl.inner(p, ufl.div(v)) * dx a10 = ufl.inner(ufl.div(u), q) * dx a = a00 + a01 + a10 p00 = ufl.inner(ufl.grad(u), ufl.grad(v)) * dx p11 = ufl.inner(p, q) * dx p_form = p00 + p11 f = dolfin.Function(W.sub(0).collapse()) p_zero = dolfin.Function(W.sub(1).collapse()) L0 = inner(f, v) * dx L1 = inner(p_zero, q) * dx L = L0 + L1 bc0 = dolfin.DirichletBC(W.sub(0), u0, boundary0) bc1 = dolfin.DirichletBC(W.sub(0), u0, boundary1) A2 = dolfin.fem.assemble_matrix(a, [bc0, bc1]) A2.assemble() assert A2.norm() == pytest.approx(A0norm, 1.0e-12) P2 = dolfin.fem.assemble_matrix(p_form, [bc0, bc1]) P2.assemble() assert P2.norm() == pytest.approx(P0norm, 1.0e-12) b2 = dolfin.fem.assemble_vector(L) dolfin.fem.apply_lifting(b2, [a], [[bc0, bc1]]) b2.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) dolfin.fem.set_bc(b2, [bc0, bc1]) b2norm = b2.norm() assert b2norm == pytest.approx(b0norm, 1.0e-12) ksp = PETSc.KSP() ksp.create(mesh.mpi_comm()) ksp.setOperators(A2, P2) ksp.setType("minres") pc = ksp.getPC() pc.setType('lu') pc.setFactorSolverType('mumps') def monitor(ksp, its, rnorm): # print("Num it, rnorm:", its, rnorm) pass ksp.setTolerances(rtol=1.0e-8, max_it=50) ksp.setMonitor(monitor) ksp.setFromOptions() x2 = A2.createVecRight() ksp.solve(b2, x2) assert ksp.getConvergedReason() > 0 assert x0.norm() == pytest.approx(x2.norm(), 1e-8)
def __init__(self, fenics_2d_rve, **kwargs): """[summary] Parameters ---------- object : [type] [description] fenics_2d_rve : [type] [description] element : tuple or dict Type and degree of element for displacement FunctionSpace Ex: ('CG', 2) or {'family':'Lagrange', degree:2} solver : dict Choose the type of the solver, its method and the preconditioner. An up-to-date list of the available solvers and preconditioners can be obtained with dolfin.list_linear_solver_methods() and dolfin.list_krylov_solver_preconditioners(). """ self.rve = fenics_2d_rve self.topo_dim = topo_dim = fenics_2d_rve.dim try: bottom_left_corner = fenics_2d_rve.bottom_left_corner except AttributeError: logger.warning( "For the definition of the periodicity boundary conditions," "the bottom left corner of the RVE is assumed to be on (0.,0.)" ) bottom_left_corner = np.zeros(shape=(topo_dim, )) self.pbc = periodicity.PeriodicDomain.pbc_dual_base( fenics_2d_rve.gen_vect, "XY", bottom_left_corner, topo_dim) solver = kwargs.pop("solver", {}) # {'type': solver_type, 'method': solver_method, 'preconditioner': preconditioner} s_type = solver.pop("type", None) s_method = solver.pop("method", SOLVER_METHOD) s_precond = solver.pop("preconditioner", None) if s_type is None: if s_method in DOLFIN_KRYLOV_METHODS.keys(): s_type = "Krylov" elif s_method in DOLFIN_LU_METHODS.keys(): s_type = "LU" else: raise RuntimeError("The indicated solver method is unknown.") self._solver = dict(type=s_type, method=s_method) if s_precond: self._solver["preconditioner"] = s_precond element = kwargs.pop("element", ("Lagrange", 2)) if isinstance(element, dict): element = (element["family"], element["degree"]) self._element = element # * Function spaces cell = self.rve.mesh.ufl_cell() self.scalar_FE = fe.FiniteElement(element[0], cell, element[1]) self.displ_FE = fe.VectorElement(element[0], cell, element[1]) strain_deg = element[1] - 1 if element[1] >= 1 else 0 strain_dim = int(topo_dim * (topo_dim + 1) / 2) self.strain_FE = fe.VectorElement("DG", cell, strain_deg, dim=strain_dim) # Espace fonctionel scalaire self.X = fe.FunctionSpace(self.rve.mesh, self.scalar_FE, constrained_domain=self.pbc) # Espace fonctionnel 3D : deformations, notations de Voigt self.W = fe.FunctionSpace(self.rve.mesh, self.strain_FE) # Espace fonctionel 2D pour les champs de deplacement # TODO : reprendre le Ve défini pour l'espace fonctionnel mixte. Par ex: V = FunctionSpace(mesh, Ve) self.V = fe.VectorFunctionSpace(self.rve.mesh, element[0], element[1], constrained_domain=self.pbc) # * Espace fonctionel mixte pour la résolution : # * 2D pour les champs + scalaire pour multiplicateur de Lagrange # "R" : Real element with one global degree of freedom self.real_FE = fe.VectorElement("R", cell, 0) self.M = fe.FunctionSpace( self.rve.mesh, fe.MixedElement([self.displ_FE, self.real_FE]), constrained_domain=self.pbc, ) # Define variational problem self.v, self.lamb_ = fe.TestFunctions(self.M) self.u, self.lamb = fe.TrialFunctions(self.M) self.w = fe.Function(self.M) # bilinear form self.a = ( fe.inner(sigma(self.rve.C_per, epsilon(self.u)), epsilon(self.v)) * fe.dx + fe.dot(self.lamb_, self.u) * fe.dx + fe.dot(self.lamb, self.v) * fe.dx) self.K = fe.assemble(self.a) if self._solver["type"] == "Krylov": self.solver = fe.KrylovSolver(self.K, self._solver["method"]) elif self._solver["type"] == "LU": self.solver = fe.LUSolver(self.K, self._solver["method"]) self.solver.parameters["symmetric"] = True try: self.solver.parameters.preconditioner = self._solver[ "preconditioner"] except KeyError: pass # fe.info(self.solver.parameters, True) self.localization = dict() # dictionary of localization field objects, # will be filled up when calling auxiliary problems (lazy evaluation) self.ConstitutiveTensors = dict()
objects = Circle() cell = mesh.ufl_cell() # ufl cell V = df.FiniteElement("Lagrange", cell, 1) # CG elements of order 1 R = df.FiniteElement("Real", cell, 0) # Real elements of order 0 # Create the mixed function space W = df.FunctionSpace(mesh, df.MixedElement([V, R])) # Create DirichletBC of value 0 on the exterior boundaries ext_bc = df.DirichletBC(W.sub(0), df.Constant(0), boundaries, ext_bnd_id) int_bc = df.DirichletBC(W.sub(0), df.Constant(0), boundaries, int_bnd_id) int_bc2 = FloatingBC(W.sub(0), boundaries, int_bnd_id) # Create trial and test functions u, c = df.TrialFunctions(W) v, d = df.TestFunctions(W) # The object charge Q = df.Constant(10.) # Charge density in the domain # rho = df.Constant(0.) rho = df.Expression("sin(x[0]-x[1])", degree=3) # rho = df.Expression("(x[0]-pi)*(x[0]-pi)+(x[1]-pi)*(x[1]-pi)<=r*r ? 0.0 : sin(x[0]-x[1])", pi=np.pi, r=r, degree=2) # The normal vector to the facets n = df.FacetNormal(mesh) # The measure on exterior boundaries ds = df.Measure("ds", domain=mesh, subdomain_data=boundaries)
def __init__(self, Vh_STATE, Vhs, bcs0, datafile, dx=dl.dx): self.dx = dx x, y, U, V, uu, vv, ww, uv, k = np.loadtxt(datafile, skiprows=2, unpack=True) u_fun_mean = VelocityDNS(x=x, y=y, U=U, V=V, symmetrize=True, coflow=0.) u_fun_data = VelocityDNS(x=x, y=y, U=U, V=V, symmetrize=False, coflow=0.) u_mean = dl.interpolate(u_fun_mean, Vhs[0]) u_data = dl.interpolate(u_fun_data, Vhs[0]) q_order = dl.parameters["form_compiler"]["quadrature_degree"] dl.parameters["form_compiler"]["quadrature_degree"] = 6 noise_var_u = dl.assemble( dl.inner(u_fun_data - u_mean, u_fun_data - u_mean) * self.dx, form_compiler_parameters=dl.parameters["form_compiler"]) dl.parameters["form_compiler"]["quadrature_degree"] = q_order noise_var_u = 1.e-3 mpi_comm = Vh_STATE.mesh().mpi_comm() rank = dl.MPI.rank(mpi_comm) if rank == 0: print "Noise Variance = {0}".format(noise_var_u) if Vh_STATE.num_sub_spaces() == 2: u_trial, p_trial = dl.TrialFunctions(Vh_STATE) u_test, p_test = dl.TestFunctions(Vh_STATE) elif Vh_STATE.num_sub_spaces() == 3: u_trial, p_trial, g_trial = dl.TrialFunctions(Vh_STATE) u_test, p_test, g_test = dl.TestFunctions(Vh_STATE) else: raise InputError() Wform = dl.Constant(1. / noise_var_u) * dl.inner(u_trial, u_test) * self.dx self.W = dl.assemble(Wform) dummy = dl.Vector() self.W.init_vector(dummy, 0) [bc.zero(self.W) for bc in bcs0] Wt = Transpose(self.W) [bc.zero(Wt) for bc in bcs0] self.W = Transpose(Wt) xfun = dl.Function(Vh_STATE) assigner = dl.FunctionAssigner(Vh_STATE, Vhs) if Vh_STATE.num_sub_spaces() == 2: assigner.assign(xfun, [u_data, dl.Function(Vhs[1])]) elif Vh_STATE.num_sub_spaces() == 3: assigner.assign( xfun, [u_data, dl.Function(Vhs[1]), dl.Function(Vhs[2])]) self.d = xfun.vector()
def test_get_domains_gmsh(plots=False): """ Get subdomains and partition of the boundary from a .msh file. """ name = "test_domains" local_dir = Path(__file__).parent mesh_file = local_dir.joinpath(name + ".msh") gmsh.model.add(name) L_x, L_y = 2.0, 2.0 H = 1.0 vertices = [(0.0, 0.0), (0.0, L_y), (L_x, L_y), (L_x, 0.0)] contour = geo.LineLoop([geo.Point(np.array(c)) for c in vertices], False) surface = geo.PlaneSurface(contour) inclusion_vertices = list() for coord in [ (H / 2, -H / 2, 0.0), (H / 2, H / 2, 0.0), (-H / 2, H / 2, 0.0), (-H / 2, -H / 2, 0.0), ]: vertex = geo.translation(geo.Point((L_x / 2, L_y / 2)), coord) inclusion_vertices.append(vertex) inclusion = geo.PlaneSurface(geo.LineLoop(inclusion_vertices, False)) for s in [surface, inclusion]: s.add_gmsh() factory.synchronize() (stiff_s, ) = geo.surface_bool_cut(surface, inclusion) factory.synchronize() (soft_s, ) = geo.surface_bool_cut(surface, stiff_s) factory.synchronize() domains = { "stiff": geo.PhysicalGroup(stiff_s, 2), "soft": geo.PhysicalGroup(soft_s, 2), } boundaries = { "S": geo.PhysicalGroup(surface.ext_contour.sides[0], 1), "W": geo.PhysicalGroup(surface.ext_contour.sides[1], 1), "N": geo.PhysicalGroup(surface.ext_contour.sides[2], 1), "E": geo.PhysicalGroup(surface.ext_contour.sides[3], 1), } for group in domains.values(): group.add_gmsh() for group in boundaries.values(): group.add_gmsh() charact_field = mesh_tools.MathEvalField("0.05") mesh_tools.set_background_mesh(charact_field) geo.set_gmsh_option("Mesh.SaveAll", 0) model.mesh.generate(1) model.mesh.generate(2) gmsh.model.mesh.removeDuplicateNodes() gmsh.write(str(mesh_file)) E_1, E_2, nu = 1, 3, 0.3 materials = { domains["soft"].tag: mat.Material(E_1, nu, "cp"), domains["stiff"].tag: mat.Material(E_1, nu, "cp"), } test_part = part.FenicsPart.part_from_file(mesh_file, materials, subdomains_import=True) assert test_part.mat_area == approx(L_x * L_y) elem_type = "CG" degree = 2 V = fe.VectorFunctionSpace(test_part.mesh, elem_type, degree) W = fe.FunctionSpace( test_part.mesh, fe.VectorElement(elem_type, test_part.mesh.ufl_cell(), degree, dim=3), ) boundary_conditions = { boundaries["N"].tag: fe.Expression(("x[0]-1", "1"), degree=1), boundaries["S"].tag: fe.Expression(("x[0]-1", "-1"), degree=1), boundaries["E"].tag: fe.Expression(("1", "x[1]-1"), degree=1), boundaries["W"].tag: fe.Expression(("-1", "x[1]-1"), degree=1), } bcs = list() for tag, val in boundary_conditions.items(): bcs.append(fe.DirichletBC(V, val, test_part.facet_regions, tag)) ds = fe.Measure("ds", domain=test_part.mesh, subdomain_data=test_part.facet_regions) v = fe.TestFunctions(V) u = fe.TrialFunctions(V) F = (fe.inner(mat.sigma(test_part.elasticity_tensor, mat.epsilon(u)), mat.epsilon(v)) * fe.dx) a, L = fe.lhs(F), fe.rhs(F) u_sol = fe.Function(V) fe.solve(a == L, u_sol, bcs) strain = fe.project(mat.epsilon(u_sol), W) if plots: import matplotlib.pyplot as plt plt.figure() plot = fe.plot(u_sol) plt.colorbar(plot) plt.figure() plot = fe.plot(strain[0]) plt.colorbar(plot) plt.figure() plot = fe.plot(strain[1]) plt.colorbar(plot) plt.figure() plot = fe.plot(strain[2]) plt.colorbar(plot) plt.show() error = fe.errornorm( strain, fe.Expression(("1", "1", "0"), degree=0), degree_rise=3, mesh=test_part.mesh, ) assert error == approx(0, abs=1e-12) materials = { domains["soft"].tag: mat.Material(E_1, nu, "cp"), domains["stiff"].tag: mat.Material(E_2, nu, "cp"), } test_part = part.FenicsPart.part_from_file(mesh_file, materials, subdomains_import=True) V = fe.VectorFunctionSpace(test_part.mesh, elem_type, degree) W = fe.FunctionSpace( test_part.mesh, fe.VectorElement(elem_type, test_part.mesh.ufl_cell(), degree, dim=3), ) bcs = list() for tag, val in boundary_conditions.items(): bcs.append(fe.DirichletBC(V, val, test_part.facet_regions, tag)) v = fe.TestFunctions(V) u = fe.TrialFunctions(V) F = (fe.inner(mat.sigma(test_part.elasticity_tensor, mat.epsilon(u)), mat.epsilon(v)) * fe.dx) a, L = fe.lhs(F), fe.rhs(F) u_sol = fe.Function(V) fe.solve(a == L, u_sol, bcs) strain = mat.epsilon(u_sol) stress = mat.sigma(test_part.elasticity_tensor, strain) energy = 0.5 * fe.assemble( fe.inner(stress, strain) * fe.dx(test_part.mesh)) energy_abaqus = 12.8788939 assert energy == approx(energy_abaqus, rel=1e-3) geo.reset()