def project(v, V, dx_, bcs=[], nm=None): w = TestFunction(V) Pv = TrialFunction(V) a, L = as_ufl(0), as_ufl(0) zerofnc = Function(V) for n in range(len(dx_)): # check if we have passed in a list of functions or a function if isinstance(v, list): fnc = v[n] else: fnc = v if not isinstance(fnc, constantvalue.Zero): a += inner(w, Pv) * dx_[n] L += inner(w, fnc) * dx_[n] else: a += inner(w, Pv) * dx_[n] L += inner(w, zerofnc) * dx_[n] # solve linear system for projection function = Function(V, name=nm) lp = LinearProblem(a, L, bcs=bcs, u=function) lp.solve() return function
def function_arg(self, g): '''Set the value of this boundary condition.''' if isinstance(g, function.Function) and g.function_space() != self._function_space: raise RuntimeError("%r is defined on incompatible FunctionSpace!" % g) if not isinstance(g, expression.Expression): try: # Bare constant? as_ufl(g) except UFLException: try: # List of bare constants? Convert to UFL expression g = as_ufl(as_tensor(g)) if g.ufl_shape != self._function_space.shape: raise ValueError("%r doesn't match the shape of the function space." % (g,)) except UFLException: raise ValueError("%r is not a valid DirichletBC expression" % (g,)) if isinstance(g, expression.Expression) or has_type(as_ufl(g), SpatialCoordinate): if isinstance(g, expression.Expression): self._expression_state = g._state try: g = function.Function(self._function_space).interpolate(g) # Not a point evaluation space, need to project onto V except NotImplementedError: g = projection.project(g, self._function_space) self._function_arg = g self._currently_zeroed = False
def get_transition_bounds(self): '''Returns dictionary of `(lower, upper)` bounds for energy transitions. See :py:meth:`get_transition_lower_bounds` for more. ''' lowers = self.get_transition_lower_bounds() upper_bound = ufl.as_ufl(self.inf_upper_bound.m_as(self.energy_unit)) r = {} for k, lower in lowers.items(): lst = [] for k2, lower2 in lowers.items(): if k == k2: continue # FIXME: this is WRONG if lower is exactly equal to lower2 lst.append( dolfin.conditional( ufl.as_ufl(lower2.m) < lower.m, upper_bound, lower2.m)) if not lst: lst.append(upper_bound) upper = self._ufl_minimum(lst) r[k] = (lower, upper * lower.units) return r
def function_arg(self, g): '''Set the value of this boundary condition.''' if isinstance(g, firedrake.Function): if g.function_space() != self.function_space(): raise RuntimeError("%r is defined on incompatible FunctionSpace!" % g) self._function_arg = g elif isinstance(g, ufl.classes.Zero): if g.ufl_shape and g.ufl_shape != self.function_space().ufl_element().value_shape(): raise ValueError(f"Provided boundary value {g} does not match shape of space") # Special case. Scalar zero for direct Function.assign. self._function_arg = ufl.zero() elif isinstance(g, ufl.classes.Expr): if g.ufl_shape != self.function_space().ufl_element().value_shape(): raise RuntimeError(f"Provided boundary value {g} does not match shape of space") try: self._function_arg = firedrake.Function(self.function_space()) self._function_arg_update = firedrake.Interpolator(g, self._function_arg).interpolate except (NotImplementedError, AttributeError): # Element doesn't implement interpolation self._function_arg = firedrake.Function(self.function_space()).project(g) self._function_arg_update = firedrake.Projector(g, self._function_arg).project else: try: g = as_ufl(g) self._function_arg = g except UFLException: try: # Recurse to handle this through interpolation. self.function_arg = as_ufl(as_tensor(g)) except UFLException: raise ValueError(f"{g} is not a valid DirichletBC expression")
def test_imag(self): z0 = Zero() z1 = as_ufl(1.0) z2 = as_ufl(1j) z3 = ComplexValue(1+1j) assert Imag(z2) == z1 assert Imag(z3) == z1 assert Imag(z1) == z0
def test_latex_formatting_of_literals(): # Test literals assert expr2latex(ufl.as_ufl(2)) == "2" assert expr2latex(ufl.as_ufl(3.14)) == '3.14' assert expr2latex(ufl.as_ufl(0)) == "0" # These are actually converted to int before formatting: assert expr2latex(ufl.Identity(2)[0, 0]) == "1" assert expr2latex(ufl.Identity(2)[0, 1]) == "0" assert expr2latex(ufl.Identity(2)[1, 0]) == "0" assert expr2latex(ufl.Identity(2)[1, 1]) == "1" assert expr2latex(ufl.PermutationSymbol(3)[1, 2, 3]) == "1" assert expr2latex(ufl.PermutationSymbol(3)[2, 1, 3]) == "-1" assert expr2latex(ufl.PermutationSymbol(3)[1, 1, 3]) == "0"
def evaluate_active_stress_ode(self, t): # take care of Frank-Starling law (fiber stretch-dependent contractility) if self.have_frank_starling: amp_old_, na = [], 0 for n in range(self.num_domains): if self.mat_active_stress[n] and self.actstress[na].frankstarling: # old fiber stretch (needed for Frank-Starling law) if self.mat_growth[n]: lam_fib_old = self.ma[n].fibstretch_e(self.ki.C(self.u_old), self.theta_old, self.fib_func[0]) else: lam_fib_old = self.ki.fibstretch(self.u_old, self.fib_func[0]) amp_old_.append(self.actstress[na].amp(t-self.dt, lam_fib_old, self.amp_old)) else: amp_old_.append(as_ufl(0)) amp_old_proj = project(amp_old_, self.Vd_scalar, self.dx_) self.amp_old.vector.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) self.amp_old.interpolate(amp_old_proj) tau_a_, na = [], 0 for n in range(self.num_domains): if self.mat_active_stress[n]: # fiber stretch (needed for Frank-Starling law) if self.actstress[na].frankstarling: if self.mat_growth[n]: lam_fib = self.ma[n].fibstretch_e(self.ki.C(self.u), self.theta, self.fib_func[0]) else: lam_fib = self.ki.fibstretch(self.u, self.fib_func[0]) else: lam_fib = as_ufl(1) tau_a_.append(self.actstress[na].tau_act(self.tau_a_old, t, self.dt, lam_fib, self.amp_old)) na+=1 else: tau_a_.append(as_ufl(0)) # project and interpolate to quadrature function space tau_a_proj = project(tau_a_, self.Vd_scalar, self.dx_) self.tau_a.vector.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) self.tau_a.interpolate(tau_a_proj)
def assign(self, expr, subset=None): r"""Set the :class:`Function` value to the pointwise value of expr. expr may only contain :class:`Function`\s on the same :class:`.FunctionSpace` as the :class:`Function` being assigned to. Similar functionality is available for the augmented assignment operators `+=`, `-=`, `*=` and `/=`. For example, if `f` and `g` are both Functions on the same :class:`.FunctionSpace` then:: f += 2 * g will add twice `g` to `f`. If present, subset must be an :class:`pyop2.Subset` of this :class:`Function`'s ``node_set``. The expression will then only be assigned to the nodes on that subset. """ expr = ufl.as_ufl(expr) if isinstance(expr, ufl.classes.Zero): self.dat.zero(subset=subset) return self elif (isinstance(expr, Function) and expr.function_space() == self.function_space()): expr.dat.copy(self.dat, subset=subset) return self from firedrake import assemble_expressions assemble_expressions.evaluate_expression( assemble_expressions.Assign(self, expr), subset) return self
def g(self, lam): amp_min = self.params['amp_min'] amp_max = self.params['amp_max'] lam_threslo = self.params['lam_threslo'] lam_maxlo = self.params['lam_maxlo'] lam_threshi = self.params['lam_threshi'] lam_maxhi = self.params['lam_maxhi'] # Diss Hirschvogel eq. 2.107 # TeX: g(\lambda_{\mathrm{myo}}) = \begin{cases} a_{\mathrm{min}}, & \lambda_{\mathrm{myo}} \leq \hat{\lambda}_{\mathrm{myo}}^{\mathrm{thres,lo}}, \\ a_{\mathrm{min}}+\frac{1}{2}\left(a_{\mathrm{max}}-a_{\mathrm{min}}\right)\left(1-\cos \frac{\pi(\lambda_{\mathrm{myo}}-\hat{\lambda}_{\mathrm{myo}}^{\mathrm{thres,lo}})}{\hat{\lambda}_{\mathrm{myo}}^{\mathrm{max,lo}}-\hat{\lambda}_{\mathrm{myo}}^{\mathrm{thres,lo}}}\right), & \hat{\lambda}_{\mathrm{myo}}^{\mathrm{thres,lo}} \leq \lambda_{\mathrm{myo}} \leq \hat{\lambda}_{\mathrm{myo}}^{\mathrm{max,lo}}, \\ a_{\mathrm{max}}, & \hat{\lambda}_{\mathrm{myo}}^{\mathrm{max,lo}} \leq \lambda_{\mathrm{myo}} \leq \hat{\lambda}_{\mathrm{myo}}^{\mathrm{thres,hi}}, \\ a_{\mathrm{min}}+\frac{1}{2}\left(a_{\mathrm{max}}-a_{\mathrm{min}}\right)\left(1-\cos \frac{\pi(\lambda_{\mathrm{myo}}-\hat{\lambda}_{\mathrm{myo}}^{\mathrm{max,hi}})}{\hat{\lambda}_{\mathrm{myo}}^{\mathrm{max,hi}}-\hat{\lambda}_{\mathrm{myo}}^{\mathrm{thres,hi}}}\right), & \hat{\lambda}_{\mathrm{myo}}^{\mathrm{thres,hi}} \leq \lambda_{\mathrm{myo}} \leq \hat{\lambda}_{\mathrm{myo}}^{\mathrm{max,hi}}, \\ a_{\mathrm{min}}, & \lambda_{\mathrm{myo}} \geq \hat{\lambda}_{\mathrm{myo}}^{\mathrm{max,hi}} \end{cases} return conditional( le(lam, lam_threslo), amp_min, conditional( And(ge(lam, lam_threslo), le(lam, lam_maxlo)), amp_min + 0.5 * (amp_max - amp_min) * (1. - cos(pi * (lam - lam_threslo) / (lam_maxlo - lam_threslo))), conditional( And(ge(lam, lam_maxlo), le(lam, lam_threshi)), amp_max, conditional( And(ge(lam, lam_threshi), le(lam, lam_maxhi)), amp_min + 0.5 * (amp_max - amp_min) * (1. - cos(pi * (lam - lam_maxhi) / (lam_maxhi - lam_threshi))), conditional(ge(lam, lam_maxhi), amp_min, as_ufl(0))))))
def handle_conditional(v, si, deps, SV_factors, FV, sv2fv, e2fi): fac0 = SV_factors[deps[0]] fac1 = SV_factors[deps[1]] fac2 = SV_factors[deps[2]] assert not fac0, "Cannot have argument in condition." if not (fac1 or fac2): # non-arg ? non-arg : non-arg # Record non-argument subexpression sv2fv[si] = add_to_fv(v, FV, e2fi) factors = noargs else: f0 = FV[sv2fv[deps[0]]] f1 = FV[sv2fv[deps[1]]] f2 = FV[sv2fv[deps[2]]] # Term conditional(c, argument, non-argument) is not legal unless non-argument is 0.0 assert fac1 or isinstance(f1, Zero) assert fac2 or isinstance(f2, Zero) assert () not in fac1 assert () not in fac2 z = as_ufl(0.0) # In general, can decompose like this: # conditional(c, sum_i fi*ui, sum_j fj*uj) -> sum_i conditional(c, fi, 0)*ui + sum_j conditional(c, 0, fj)*uj mas = sorted(set(fac1.keys()) | set(fac2.keys())) factors = {} for k in mas: fi1 = fac1.get(k) fi2 = fac2.get(k) f1 = z if fi1 is None else FV[fi1] f2 = z if fi2 is None else FV[fi2] factors[k] = add_to_fv(conditional(f0, f1, f2), FV, e2fi) return factors
def handle_conditional(v, fac, sf, F): fac0 = fac[0] fac1 = fac[1] fac2 = fac[2] assert not fac0, "Cannot have argument in condition." if not (fac1 or fac2): # non-arg ? non-arg : non-arg raise RuntimeError("No arguments") else: f0 = sf[0] f1 = sf[1] f2 = sf[2] # Term conditional(c, argument, non-argument) is not legal unless non-argument is 0.0 assert fac1 or isinstance(f1, Zero) assert fac2 or isinstance(f2, Zero) assert () not in fac1 assert () not in fac2 z = as_ufl(0.0) # In general, can decompose like this: # conditional(c, sum_i fi*ui, sum_j fj*uj) -> sum_i conditional(c, fi, 0)*ui + sum_j conditional(c, 0, fj)*uj mas = sorted(set(fac1.keys()) | set(fac2.keys())) factors = {} for k in mas: fi1 = fac1.get(k) fi2 = fac2.get(k) f1 = z if fi1 is None else F.nodes[fi1]['expression'] f2 = z if fi2 is None else F.nodes[fi2]['expression'] factors[k] = graph_insert(F, conditional(f0, f1, f2)) return factors
def test_real(self): z0 = Zero() z1 = as_ufl(1.0) z2 = ComplexValue(1j) z3 = ComplexValue(1+1j) assert Real(z1) == z1 assert Real(z3) == z1 assert Real(z2) == z0
def xtest_cpp2_compile_scalar_literals(): M = as_ufl(0) * dx code = compile_form(M, 'unittest') print('\n', code) expected = 'TODO' assert code == expected M = as_ufl(3) * dx code = compile_form(M, 'unittest') print('\n', code) expected = 'TODO' assert code == expected M = as_ufl(1.03) * dx code = compile_form(M, 'unittest') print('\n', code) expected = 'TODO' assert code == expected
def robin_bcs(self, v, v_old): w, w_old = as_ufl(0), as_ufl(0) for r in self.bc_dict['robin']: if r['type'] == 'dashpot': if r['dir'] == 'xyz': for i in range(len(r['id'])): ds_ = ds( subdomain_data=self.io.mt_b1, subdomain_id=r['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_robin_dashpot( v, r['visc'], ds_) w_old += self.vf.deltaP_ext_robin_dashpot( v_old, r['visc'], ds_) elif r['dir'] == 'normal': # reference normal for i in range(len(r['id'])): ds_ = ds( subdomain_data=self.io.mt_b1, subdomain_id=r['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_robin_dashpot_normal( v, r['visc'], ds_) w_old += self.vf.deltaP_ext_robin_dashpot_normal( v_old, r['visc'], ds_) else: raise NameError("Unknown dir option for Robin BC!") else: raise NameError("Unknown type option for Robin BC!") return w, w_old
def test_conditional(mode, compile_args): cell = ufl.triangle element = ufl.FiniteElement("Lagrange", cell, 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) x = ufl.SpatialCoordinate(cell) condition = ufl.Or(ufl.ge(ufl.real(x[0] + x[1]), 0.1), ufl.ge(ufl.real(x[1] + x[1]**2), 0.1)) c1 = ufl.conditional(condition, 2.0, 1.0) a = c1 * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx x1x2 = ufl.real(x[0] + ufl.as_ufl(2) * x[1]) c2 = ufl.conditional(ufl.ge(x1x2, 0), 6.0, 0.0) b = c2 * ufl.conj(v) * ufl.dx forms = [a, b] compiled_forms, module = ffcx.codegeneration.jit.compile_forms( forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args) form0 = compiled_forms[0][0].create_cell_integral(-1) form1 = compiled_forms[1][0].create_cell_integral(-1) ffi = cffi.FFI() c_type, np_type = float_to_type(mode) A1 = np.zeros((3, 3), dtype=np_type) w1 = np.array([1.0, 1.0, 1.0], dtype=np_type) c = np.array([], dtype=np.float64) coords = np.array([0.0, 0.0, 1.0, 0.0, 0.0, 1.0], dtype=np.float64) form0.tabulate_tensor( ffi.cast('{type} *'.format(type=c_type), A1.ctypes.data), ffi.cast('{type} *'.format(type=c_type), w1.ctypes.data), ffi.cast('{type} *'.format(type=c_type), c.ctypes.data), ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL, 0) expected_result = np.array([[2, -1, -1], [-1, 1, 0], [-1, 0, 1]], dtype=np_type) assert np.allclose(A1, expected_result) A2 = np.zeros(3, dtype=np_type) w2 = np.array([1.0, 1.0, 1.0], dtype=np_type) coords = np.array([0.0, 0.0, 1.0, 0.0, 0.0, 1.0], dtype=np.float64) form1.tabulate_tensor( ffi.cast('{type} *'.format(type=c_type), A2.ctypes.data), ffi.cast('{type} *'.format(type=c_type), w2.ctypes.data), ffi.cast('{type} *'.format(type=c_type), c.ctypes.data), ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL, 0) expected_result = np.ones(3, dtype=np_type) assert np.allclose(A2, expected_result)
def xtest_literal_zero_compilation(): uexpr = as_ufl(0) lines, finals = compile_expression_lines(uexpr) expected_lines = [] expected_finals = ['0'] assert lines == expected_lines assert finals == expected_finals
def xtest_literal_float_compilation(): uexpr = as_ufl(2.56) lines, finals = compile_expression_lines(uexpr) expected_lines = [] expected_finals = ['2.56'] assert lines == expected_lines assert finals == expected_finals
def test_complex_algebra(self): z1 = ComplexValue(1j) z2 = ComplexValue(1+1j) # Remember that ufl.algebra functions return ComplexValues, but ufl.mathfunctions return complex Python scalar # Any operations with a ComplexValue and a complex Python scalar promote to ComplexValue assert z1*z2 == ComplexValue(-1+1j) assert z2/z1 == ComplexValue(1-1j) assert pow(z2, z1) == ComplexValue((1+1j)**1j) assert sqrt(z2) * as_ufl(1) == ComplexValue(cmath.sqrt(1+1j)) assert ((sin(z2) + cosh(z2) - atan(z2)) * z1) == ComplexValue((cmath.sin(1+1j) + cmath.cosh(1+1j) - cmath.atan(1+1j))*1j) assert (abs(z2) - ln(z2))/exp(z1) == ComplexValue((abs(1+1j) - cmath.log(1+1j))/cmath.exp(1j))
def __init__(self, lvalue, rvalue): """ :arg lvalue: The coefficient to assign into. :arg rvalue: The pointwise expression. """ if not isinstance(lvalue, ufl.Coefficient): raise ValueError("lvalue for pointwise assignment must be a coefficient") self.lvalue = lvalue self.rvalue = ufl.as_ufl(rvalue) n = len(self.lvalue.function_space()) if n > 1: self.splitter = MemoizerArg(_split) self.splitter.n = n
def compute_volume_large(self): J_all = as_ufl(0) for n in range(self.pb.pblarge.num_domains): J_all += self.pb.pblarge.ki.J( self.pb.pblarge.u) * self.pb.pblarge.dx_[n] vol = assemble_scalar(J_all) vol = self.pb.comm.allgather(vol) volume_large = sum(vol) if self.pb.comm.rank == 0: print('Volume of myocardium: %.4e' % (volume_large)) sys.stdout.flush() return volume_large
def set_homeostatic_threshold(self, t): # time is absolute time (should only be set in first cycle) eps = 1.0e-14 if t >= self.pb.t_gandr_setpoint - eps and t < self.pb.t_gandr_setpoint + self.pb.pbs.dt - eps: if self.pb.comm.rank == 0: print('Set homeostatic growth thresholds...') sys.stdout.flush() time.sleep(1) growth_thresolds = [] for n in range(self.pb.pbs.num_domains): if self.pb.pbs.mat_growth[n]: growth_settrig = self.pb.pbs.constitutive_models[ 'MAT' + str(n + 1) + '']['growth']['growth_settrig'] if growth_settrig == 'fibstretch': growth_thresolds.append(self.pb.pbs.ma[n].fibstretch_e( self.pb.pbs.ki.C(self.pb.pbs.u), self.pb.pbs.theta, self.pb.pbs.fib_func[0])) elif growth_settrig == 'volstress': growth_thresolds.append( tr(self.pb.pbs.ma[n].M_e( self.pb.pbs.u, self.pb.pbs.p, self.pb.pbs.ki.C(self.pb.pbs.u), ivar=self.pb.pbs.internalvars))) else: raise NameError( "Unknown growth trigger to be set as homeostatic threshold!" ) else: growth_thresolds.append(as_ufl(0)) growth_thres_proj = project(growth_thresolds, self.pb.pbs.Vd_scalar, self.pb.pbs.dx_) self.pb.pbs.growth_thres.vector.ghostUpdate( addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) self.pb.pbs.growth_thres.interpolate(growth_thres_proj)
def dtheta_dp(self, u_, p_, ivar, theta_old_, thres, dt): theta_ = ivar["theta"] dFg_dtheta = self.F_g(theta_,tang=True) ktheta = self.res_dtheta_growth(u_, p_, ivar, theta_old_, thres, dt, 'ktheta') K_growth = self.res_dtheta_growth(u_, p_, ivar, theta_old_, thres, dt, 'tang') if self.growth_trig == 'volstress': tangdp = (ktheta*dt/K_growth) * ( diff(tr(self.M_e(u_,p_,self.kin.C(u_),ivar)),p_) ) elif self.growth_trig == 'fibstretch': tangdp = as_ufl(0) else: raise NameError("Unkown growth_trig!") return tangdp
def compute_solid_growth_rate(self, N, t): dtheta_all = as_ufl(0) for n in range(self.num_domains): dtheta_all += (self.theta - self.theta_old) / (self.dt) * self.dx_[n] gr = assemble_scalar(dtheta_all) gr = self.comm.allgather(gr) self.growth_rate = sum(gr) if self.comm.rank == 0: print('Solid growth rate: %.4e' % (self.growth_rate)) sys.stdout.flush() if self.io.write_results_every > 0 and N % self.io.write_results_every == 0: if np.isclose(t,self.dt): mode='wt' else: mode='a' fl = self.io.output_path+'/results_'+self.simname+'_growthrate.txt' f = open(fl, mode) f.write('%.16E %.16E\n' % (t,self.growth_rate)) f.close()
def test_comparison_checker(self): cell = triangle element = FiniteElement("Lagrange", cell, 1) u = TrialFunction(element) v = TestFunction(element) a = conditional(ge(abs(u), imag(v)), u, v) b = conditional(le(sqrt(abs(u)), imag(v)), as_ufl(1), as_ufl(1j)) c = conditional(gt(abs(u), pow(imag(v), 0.5)), sin(u), cos(v)) d = conditional(lt(as_ufl(-1), as_ufl(1)), u, v) e = max_value(as_ufl(0), real(u)) f = min_value(sin(u), cos(v)) g = min_value(sin(pow(u, 3)), cos(abs(v))) assert do_comparison_check(a) == conditional(ge(real(abs(u)), real(imag(v))), u, v) with pytest.raises(ComplexComparisonError): b = do_comparison_check(b) with pytest.raises(ComplexComparisonError): c = do_comparison_check(c) assert do_comparison_check(d) == conditional(lt(real(as_ufl(-1)), real(as_ufl(1))), u, v) assert do_comparison_check(e) == max_value(real(as_ufl(0)), real(real(u))) assert do_comparison_check(f) == min_value(real(sin(u)), real(cos(v))) assert do_comparison_check(g) == min_value(real(sin(pow(u, 3))), real(cos(abs(v))))
def set_variational_forms_and_jacobians(self): # add constant Neumann terms for large scale problem (trigger pressures) self.neumann_funcs = [] w_neumann = as_ufl(0) for n in range(len(self.pbsmall.surface_p_ids)): self.neumann_funcs.append(Function(self.pblarge.Vd_scalar)) for i in range(len(self.pbsmall.surface_p_ids[n])): ds_ = ds( subdomain_data=self.pblarge.io.mt_b1, subdomain_id=self.pbsmall.surface_p_ids[n][i], metadata={'quadrature_degree': self.pblarge.quad_degree}) # we apply the pressure onto a fixed configuration of the G&R trigger point, determined by the displacement field u_set # in the last G&R cycle, we assure that growth falls below a tolerance and hence the current and the set configuration coincide w_neumann += self.pblarge.vf.deltaW_ext_neumann_true( self.pblarge.ki.J(self.pblarge.u_set), self.pblarge.ki.F(self.pblarge.u_set), self.neumann_funcs[-1], ds_) self.pblarge.weakform_u -= w_neumann
def _simplify_abs_(o, self, in_abs): if not in_abs: return o # Inline abs(constant) return ufl.as_ufl(abs(o._value))
def set_variational_forms_and_jacobians(self): self.cq, self.cq_old, self.dcq, self.dforce = [], [], [], [] self.coupfuncs, self.coupfuncs_old = [], [] # Lagrange multiplier stiffness matrix (most likely to be zero!) self.K_lm = PETSc.Mat().createAIJ(size=(self.num_coupling_surf, self.num_coupling_surf), bsize=None, nnz=None, csr=None, comm=self.comm) self.K_lm.setUp() # Lagrange multipliers self.lm, self.lm_old = self.K_lm.createVecLeft( ), self.K_lm.createVecLeft() # 3D constraint variable (volume or flux) self.constr, self.constr_old = [], [] self.work_coupling, self.work_coupling_old, self.work_coupling_prestr = as_ufl( 0), as_ufl(0), as_ufl(0) # coupling variational forms and Jacobian contributions for n in range(self.num_coupling_surf): self.pr0D = expression.template() self.coupfuncs.append(Function( self.pbs.Vd_scalar)), self.coupfuncs_old.append( Function(self.pbs.Vd_scalar)) self.coupfuncs[-1].interpolate( self.pr0D.evaluate), self.coupfuncs_old[-1].interpolate( self.pr0D.evaluate) cq_, cq_old_ = as_ufl(0), as_ufl(0) for i in range(len(self.surface_c_ids[n])): ds_vq = ds( subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_c_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree}) # currently, only volume or flux constraints are supported if self.coupling_params['constraint_quantity'] == 'volume': cq_ += self.pbs.vf.volume(self.pbs.u, self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), ds_vq) cq_old_ += self.pbs.vf.volume( self.pbs.u_old, self.pbs.ki.J(self.pbs.u_old), self.pbs.ki.F(self.pbs.u_old), ds_vq) elif self.coupling_params['constraint_quantity'] == 'flux': cq_ += self.pbs.vf.flux(self.pbs.vel, self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), ds_vq) cq_old_ += self.pbs.vf.flux(self.pbs.v_old, self.pbs.ki.J(self.pbs.u_old), self.pbs.ki.F(self.pbs.u_old), ds_vq) else: raise NameError( "Unknown constraint quantity! Choose either volume or flux!" ) self.cq.append(cq_), self.cq_old.append(cq_old_) self.dcq.append(derivative(self.cq[-1], self.pbs.u, self.pbs.du)) df_ = as_ufl(0) for i in range(len(self.surface_p_ids[n])): ds_p = ds(subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_p_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree}) df_ += self.pbs.timefac * self.pbs.vf.surface( self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), ds_p) # add to solid rhs contributions self.work_coupling += self.pbs.vf.deltaW_ext_neumann_true( self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), self.coupfuncs[-1], ds_p) self.work_coupling_old += self.pbs.vf.deltaW_ext_neumann_true( self.pbs.ki.J(self.pbs.u_old), self.pbs.ki.F(self.pbs.u_old), self.coupfuncs_old[-1], ds_p) # for prestressing, true loads should act on the reference, not the current configuration if self.pbs.prestress_initial: self.work_coupling_prestr += self.pbs.vf.deltaW_ext_neumann_refnormal( self.coupfuncs_old[-1], ds_p) self.dforce.append(df_) # minus sign, since contribution to external work! self.pbs.weakform_u += -self.pbs.timefac * self.work_coupling - ( 1. - self.pbs.timefac) * self.work_coupling_old # add to solid Jacobian self.pbs.jac_uu += -self.pbs.timefac * derivative( self.work_coupling, self.pbs.u, self.pbs.du)
def compute_argument_factorization(S, rank): """Factorizes a scalar expression graph w.r.t. scalar Argument components. The result is a triplet (AV, FV, IM): - The scalar argument component subgraph: AV[ai] = v with the property SV[arg_indices] == AV[:] - An expression graph vertex list with all non-argument factors: FV[fi] = f with the property that none of the expressions depend on Arguments. - A dict representation of the final integrand of rank r: IM = { (ai1_1, ..., ai1_r): fi1, (ai2_1, ..., ai2_r): fi2, } This mapping represents the factorization of SV[-1] w.r.t. Arguments s.t.: SV[-1] := sum(FV[fik] * product(AV[ai] for ai in aik) for aik, fik in IM.items()) where := means equivalence in the mathematical sense, of course in a different technical representation. """ # Extract argument component subgraph arg_indices = build_argument_indices(S) AV = [S.nodes[i]['expression'] for i in arg_indices] # Data structure for building non-argument factors F = ExpressionGraph() # Attach a quick lookup dict for expression to index F.e2i = {} # Insert arguments as first entries in factorisation graph # They will not be connected to other nodes, but will be available # and referred to by the factorisation indices of the 'target' nodes. for v in AV: graph_insert(F, v) # Adding 1.0 as an expression allows avoiding special representation # of arguments when first visited by representing "v" as "1*v" one_index = graph_insert(F, as_ufl(1.0)) # Intermediate factorization for each vertex in SV on the format # SV_factors[si] = None # if SV[si] does not depend on arguments # SV_factors[si] = { argkey: fi } # if SV[si] does depend on arguments, where: # FV[fi] is the expression SV[si] with arguments factored out # argkey is a tuple with indices into SV for each of the argument components SV[si] depends on # SV_factors[si] = { argkey1: fi1, argkey2: fi2, ... } # if SV[si] # is a linear combination of multiple argkey configurations # Factorize each subexpression in order: for si, attr in S.nodes.items(): deps = S.out_edges[si] v = attr['expression'] if si in arg_indices: assert len(deps) == 0 # v is a modified Argument factors = {(si, ): one_index} else: fac = [S.nodes[d]['factors'] for d in deps] if not any(fac): # Entirely scalar (i.e. no arg factors) # Just add unchanged to F graph_insert(F, v) factors = noargs else: # Get scalar factors for dependencies # which do not have arg factors sf = [] for i, d in enumerate(deps): if fac[i]: sf.append(None) else: sf.append(S.nodes[d]['expression']) # Use appropriate handler to deal with Sum, Product, etc. factors = handler(v, fac, sf, F) attr['factors'] = factors assert len(F.nodes) == len(F.e2i) # Find the (only) node in S that is marked as 'target' # Should always be the last one. S_targets = [i for i, v in S.nodes.items() if v.get('target', False)] assert len(S_targets) == 1 S_target = S_targets[0] # Get the factorizations of the target values if S.nodes[S_target]['factors'] == {}: if rank == 0: # Functionals and expressions: store as no args * factor factors = {(): F.e2i[S.nodes[S_target]['expression']]} else: # Zero form of arity 1 or higher: make factors empty factors = {} else: # Forms of arity 1 or higher: # Map argkeys from indices into SV to indices into AV, # and resort keys for canonical representation factors = { tuple(sorted(arg_indices.index(si) for si in argkey)): fi for argkey, fi in S.nodes[S_target]['factors'].items() } # Expecting all term keys to have length == rank # (this assumption will eventually have to change if we # implement joint bilinear+linear form factorization here) assert all(len(k) == rank for k in factors) # Indices into F that are needed for final result for i in factors.values(): F.nodes[i]['target'] = [] for k in factors: i = factors[k] F.nodes[i]['target'] += [k] # Compute dependencies in FV for i, v in F.nodes.items(): expr = v['expression'] if not expr._ufl_is_terminal_ and not expr._ufl_is_terminal_modifier_: for o in expr.ufl_operands: F.add_edge(i, F.e2i[o]) return F
def neumann_bcs(self, V, V_real): w, w_old = as_ufl(0), as_ufl(0) for n in self.bc_dict['neumann']: if n['dir'] == 'xyz': func, func_old = Function(V), Function(V) if 'curve' in n.keys(): load = expression.template_vector() load.val_x, load.val_y, load.val_z = self.ti.timecurves( n['curve'][0])(self.ti.t_init), self.ti.timecurves( n['curve'][1])(self.ti.t_init), self.ti.timecurves( n['curve'][2])(self.ti.t_init) func.interpolate(load.evaluate), func_old.interpolate( load.evaluate) self.ti.funcs_to_update_vec.append({ func: [ self.ti.timecurves(n['curve'][0]), self.ti.timecurves(n['curve'][1]), self.ti.timecurves(n['curve'][2]) ] }) self.ti.funcs_to_update_vec_old.append({ func_old: [ self.ti.timecurves(n['curve'][0]), self.ti.timecurves(n['curve'][1]), self.ti.timecurves(n['curve'][2]) ] }) else: func.vector.set( n['val'] ) # currently only one value for all directions - use constant load function otherwise! for i in range(len(n['id'])): ds_ = ds(subdomain_data=self.io.mt_b1, subdomain_id=n['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_neumann(func, ds_) w_old += self.vf.deltaP_ext_neumann(func_old, ds_) elif n['dir'] == 'normal': # reference normal func, func_old = Function(V_real), Function(V_real) if 'curve' in n.keys(): load = expression.template() load.val = self.ti.timecurves(n['curve'])(self.ti.t_init) func.interpolate(load.evaluate), func_old.interpolate( load.evaluate) self.ti.funcs_to_update.append( {func: self.ti.timecurves(n['curve'])}) self.ti.funcs_to_update_old.append( {func_old: self.ti.timecurves(n['curve'])}) else: func.vector.set(n['val']) for i in range(len(n['id'])): ds_ = ds(subdomain_data=self.io.mt_b1, subdomain_id=n['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_neumann_normal(func, ds_) w_old += self.vf.deltaP_ext_neumann_normal(func_old, ds_) else: raise NameError("Unknown dir option for Neumann BC!") return w, w_old
def set_variational_forms_and_jacobians(self): # set form for acceleration self.acc = self.ti.set_acc(self.v, self.v_old, self.a_old) # kinetic, internal, and pressure virtual power self.deltaP_kin, self.deltaP_kin_old = as_ufl(0), as_ufl(0) self.deltaP_int, self.deltaP_int_old = as_ufl(0), as_ufl(0) self.deltaP_p, self.deltaP_p_old = as_ufl(0), as_ufl(0) for n in range(self.num_domains): if self.timint != 'static': # kinetic virtual power self.deltaP_kin += self.vf.deltaP_kin(self.acc, self.v, self.rho[n], self.dx_[n]) self.deltaP_kin_old += self.vf.deltaP_kin( self.a_old, self.v_old, self.rho[n], self.dx_[n]) # internal virtual power self.deltaP_int += self.vf.deltaP_int( self.ma[n].sigma(self.v, self.p), self.dx_[n]) self.deltaP_int_old += self.vf.deltaP_int( self.ma[n].sigma(self.v_old, self.p_old), self.dx_[n]) # pressure virtual power self.deltaP_p += self.vf.deltaP_int_pres(self.v, self.dx_[n]) self.deltaP_p_old += self.vf.deltaP_int_pres( self.v_old, self.dx_[n]) # external virtual power (from Neumann or Robin boundary conditions, body forces, ...) w_neumann, w_neumann_old, w_robin, w_robin_old = as_ufl(0), as_ufl( 0), as_ufl(0), as_ufl(0) if 'neumann' in self.bc_dict.keys(): w_neumann, w_neumann_old = self.bc.neumann_bcs( self.V_v, self.Vd_scalar) if 'robin' in self.bc_dict.keys(): w_robin, w_robin_old = self.bc.robin_bcs(self.v, self.v_old) # TODO: Body forces! self.deltaP_ext = w_neumann + w_robin self.deltaP_ext_old = w_neumann_old + w_robin_old self.timefac_m, self.timefac = self.ti.timefactors() ### full weakforms # kinetic plus internal minus external virtual power self.weakform_u = self.timefac_m * self.deltaP_kin + (1.-self.timefac_m) * self.deltaP_kin_old + \ self.timefac * self.deltaP_int + (1.-self.timefac) * self.deltaP_int_old - \ self.timefac * self.deltaP_ext - (1.-self.timefac) * self.deltaP_ext_old self.weakform_p = self.timefac * self.deltaP_p + ( 1. - self.timefac) * self.deltaP_p_old # Reynolds number: ratio of inertial to viscous forces #self.Re = sqrt(dot(self.vf.f_inert(self.acc,self.v,self.rho), self.vf.f_inert(self.acc,self.v,self.rho))) / sqrt(dot(self.vf.f_viscous(self.ma[0].sigma(self.v, self.p)), self.vf.f_viscous(self.ma[0].sigma(self.v, self.p)))) if self.order_vel == self.order_pres: raise ValueError( "Equal order velocity and pressure interpolation requires stabilization! Not yet implemented! Use order_vel > order_pres." ) #dx1_stab = dx(subdomain_data=self.io.mt_d, subdomain_id=1, metadata={'quadrature_degree': 2*3}) ## stabilization stuff - TODO: FIXME and finish!!! #res_v_strong = self.vf.residual_v_strong(self.acc, self.v, self.rho, self.ma[0].sigma(self.v, self.p)) #res_v_strong_old = self.vf.residual_v_strong(self.a_old, self.v_old, self.rho, self.ma[0].sigma(self.v_old, self.p_old)) #res_p_strong = self.vf.residual_p_strong(self.v) #res_p_strong_old = self.vf.residual_p_strong(self.v_old) #vnorm, vnorm_old = sqrt(dot(self.v, self.v)), sqrt(dot(self.v_old, self.v_old)) #nu = 0.004/self.rho #Cinv = 16.*self.Re #tau_SUPG = Min(self.io.h0**2./(Cinv*nu), self.io.h0/(2.*vnorm)) ##tau = ( (2.*self.dt)**2. + (2.0*vnorm_/self.io.h0)**2 + (4.0*nu/self.io.h0**2.)**2. )**(-0.5) ##delta = conditional(ge(vnorm,1.0e-8), self.io.h0/(2.*vnorm), 0.) ##delta_old = conditional(ge(vnorm_old,1.0e-8), self.io.h0/(2.*vnorm_old), 0.) #stab_v = tau_SUPG * dot(dot(self.v, grad(self.var_v)),res_v_strong)*dx1_stab ##stab_p = tau_PSPG * dot(dot(self.v, grad(self.var_v)),res_p_strong)*dx1_stab #self.weakform_u += self.timefac * stab_v #+ (1.-self.timefac) * stab_old ##self.weakform_p += tau_SUPG*inner(grad(self.var_p), res_strong)*self.dx1 ### Jacobians self.jac_uu = derivative(self.weakform_u, self.v, self.dv) self.jac_up = derivative(self.weakform_u, self.p, self.dp) self.jac_pu = derivative(self.weakform_p, self.v, self.dv) # for saddle-point block-diagonal preconditioner - TODO: Doesn't work very well... self.a_p11 = as_ufl(0) for n in range(self.num_domains): self.a_p11 += inner(self.dp, self.var_p) * self.dx_[n]
def __init__(self, io_params, time_params, fem_params, constitutive_models, bc_dict, time_curves, io, comm=None): problem_base.__init__(self, io_params, time_params, comm) self.problem_physics = 'fluid' self.simname = io_params['simname'] self.io = io # number of distinct domains (each one has to be assigned a own material model) self.num_domains = len(constitutive_models) self.order_vel = fem_params['order_vel'] self.order_pres = fem_params['order_pres'] self.quad_degree = fem_params['quad_degree'] # collect domain data self.dx_, self.rho = [], [] for n in range(self.num_domains): # integration domains self.dx_.append( dx(subdomain_data=self.io.mt_d, subdomain_id=n + 1, metadata={'quadrature_degree': self.quad_degree})) # data for inertial forces: density self.rho.append(constitutive_models['MAT' + str(n + 1) + '']['inertia']['rho']) self.incompressible_2field = True # always true! self.localsolve = False # no idea what might have to be solved locally... self.prestress_initial = False # guess prestressing in fluid is somehow senseless... self.p11 = as_ufl( 0 ) # can't think of a fluid case with non-zero 11-block in system matrix... # type of discontinuous function spaces if str(self.io.mesh.ufl_cell()) == 'tetrahedron' or str( self.io.mesh.ufl_cell()) == 'triangle3D': dg_type = "DG" if (self.order_vel > 1 or self.order_pres > 1) and self.quad_degree < 3: raise ValueError( "Use at least a quadrature degree of 3 or more for higher-order meshes!" ) elif str(self.io.mesh.ufl_cell()) == 'hexahedron' or str( self.io.mesh.ufl_cell()) == 'quadrilateral3D': dg_type = "DQ" if (self.order_vel > 1 or self.order_pres > 1) and self.quad_degree < 5: raise ValueError( "Use at least a quadrature degree of 5 or more for higher-order meshes!" ) else: raise NameError("Unknown cell/element type!") # create finite element objects for v and p self.P_v = VectorElement("CG", self.io.mesh.ufl_cell(), self.order_vel) self.P_p = FiniteElement("CG", self.io.mesh.ufl_cell(), self.order_pres) # function spaces for v and p self.V_v = FunctionSpace(self.io.mesh, self.P_v) self.V_p = FunctionSpace(self.io.mesh, self.P_p) # a discontinuous tensor, vector, and scalar function space self.Vd_tensor = TensorFunctionSpace(self.io.mesh, (dg_type, self.order_vel - 1)) self.Vd_vector = VectorFunctionSpace(self.io.mesh, (dg_type, self.order_vel - 1)) self.Vd_scalar = FunctionSpace(self.io.mesh, (dg_type, self.order_vel - 1)) # functions self.dv = TrialFunction(self.V_v) # Incremental velocity self.var_v = TestFunction(self.V_v) # Test function self.dp = TrialFunction(self.V_p) # Incremental pressure self.var_p = TestFunction(self.V_p) # Test function self.v = Function(self.V_v, name="Velocity") self.p = Function(self.V_p, name="Pressure") # values of previous time step self.v_old = Function(self.V_v) self.a_old = Function(self.V_v) self.p_old = Function(self.V_p) self.ndof = self.v.vector.getSize() + self.p.vector.getSize() # initialize fluid time-integration class self.ti = timeintegration.timeintegration_fluid( time_params, fem_params, time_curves, self.t_init, self.comm) # initialize kinematics_constitutive class self.ki = fluid_kinematics_constitutive.kinematics() # initialize material/constitutive class self.ma = [] for n in range(self.num_domains): self.ma.append( fluid_kinematics_constitutive.constitutive( self.ki, constitutive_models['MAT' + str(n + 1) + ''])) # initialize fluid variational form class self.vf = fluid_variationalform.variationalform( self.var_v, self.dv, self.var_p, self.dp, self.io.n0) # initialize boundary condition class self.bc = boundaryconditions.boundary_cond_fluid( bc_dict, fem_params, self.io, self.ki, self.vf, self.ti) self.bc_dict = bc_dict # Dirichlet boundary conditions if 'dirichlet' in self.bc_dict.keys(): self.bc.dirichlet_bcs(self.V_v) self.set_variational_forms_and_jacobians()
def build_uflacs_ir(cell, integral_type, entitytype, integrands, tensor_shape, coefficient_numbering, quadrature_rules, parameters): # The intermediate representation dict we're building and returning here ir = {} # Extract uflacs specific optimization and code generation parameters p = parse_uflacs_optimization_parameters(parameters, integral_type) # Pass on parameters for consumption in code generation ir["params"] = p # { ufl coefficient: count } ir["coefficient_numbering"] = coefficient_numbering # Shared unique tables for all quadrature loops ir["unique_tables"] = {} ir["unique_table_types"] = {} # Shared piecewise expr_ir for all quadrature loops ir["piecewise_ir"] = empty_expr_ir() # { num_points: expr_ir for one integrand } ir["varying_irs"] = {} # Temporary data structures to build shared piecewise data pe2i = {} piecewise_modified_argument_indices = {} # Whether we expect the quadrature weight to be applied or not # (in some cases it's just set to 1 in ufl integral scaling) tdim = cell.topological_dimension() expect_weight = ( integral_type not in ("expression",) + point_integral_types and (entitytype == "cell" or (entitytype == "facet" and tdim > 1) or (integral_type in custom_integral_types) ) ) if integral_type == "expression": # TODO: Figure out how to get non-integrand expressions in here, this is just a draft: # Analyse all expressions in one list assert isinstance(integrands, (tuple, list)) all_num_points = [None] cases = [(None, integrands)] else: # Analyse each num_points/integrand separately assert isinstance(integrands, dict) all_num_points = sorted(integrands.keys()) cases = [(num_points, [integrands[num_points]]) for num_points in all_num_points] ir["all_num_points"] = all_num_points for num_points, expressions in cases: # Rebalance order of nested terminal modifiers expressions = [balance_modifiers(expr) for expr in expressions] # Build initial scalar list-based graph representation V, V_deps, V_targets = build_scalar_graph(expressions) # Build terminal_data from V here before factorization. # Then we can use it to derive table properties for all modified terminals, # and then use that to rebuild the scalar graph more efficiently before # argument factorization. We can build terminal_data again after factorization # if that's necessary. initial_terminal_indices = [i for i, v in enumerate(V) if is_modified_terminal(v)] initial_terminal_data = [analyse_modified_terminal(V[i]) for i in initial_terminal_indices] unique_tables, unique_table_types, unique_table_num_dofs, mt_unique_table_reference = \ build_optimized_tables(num_points, quadrature_rules, cell, integral_type, entitytype, initial_terminal_data, ir["unique_tables"], p["enable_table_zero_compression"], rtol=p["table_rtol"], atol=p["table_atol"]) # Replace some scalar modified terminals before reconstructing expressions # (could possibly use replace() on target expressions instead) z = as_ufl(0.0) one = as_ufl(1.0) for i, mt in zip(initial_terminal_indices, initial_terminal_data): if isinstance(mt.terminal, QuadratureWeight): # Replace quadrature weight with 1.0, will be added back later V[i] = one else: # Set modified terminals with zero tables to zero tr = mt_unique_table_reference.get(mt) if tr is not None and tr.ttype == "zeros": V[i] = z # Propagate expression changes using dependency list for i in range(len(V)): deps = [V[j] for j in V_deps[i]] if deps: V[i] = V[i]._ufl_expr_reconstruct_(*deps) # Rebuild scalar target expressions and graph # (this may be overkill and possible to optimize # away if it turns out to be costly) expressions = [V[i] for i in V_targets] # Rebuild scalar list-based graph representation SV, SV_deps, SV_targets = build_scalar_graph(expressions) assert all(i < len(SV) for i in SV_targets) # Compute factorization of arguments (argument_factorizations, modified_arguments, FV, FV_deps, FV_targets) = \ compute_argument_factorization(SV, SV_deps, SV_targets, len(tensor_shape)) assert len(SV_targets) == len(argument_factorizations) # TODO: Still expecting one target variable in code generation assert len(argument_factorizations) == 1 argument_factorization, = argument_factorizations # Store modified arguments in analysed form for i in range(len(modified_arguments)): modified_arguments[i] = analyse_modified_terminal(modified_arguments[i]) # Build set of modified_terminal indices into factorized_vertices modified_terminal_indices = [i for i, v in enumerate(FV) if is_modified_terminal(v)] # Build set of modified terminal ufl expressions modified_terminals = [analyse_modified_terminal(FV[i]) for i in modified_terminal_indices] # Make it easy to get mt object from FV index FV_mts = [None]*len(FV) for i, mt in zip(modified_terminal_indices, modified_terminals): FV_mts[i] = mt # Mark active modified arguments #active_modified_arguments = numpy.zeros(len(modified_arguments), dtype=int) #for ma_indices in argument_factorization: # for j in ma_indices: # active_modified_arguments[j] = 1 # Dependency analysis inv_FV_deps, FV_active, FV_piecewise, FV_varying = \ analyse_dependencies(FV, FV_deps, FV_targets, modified_terminal_indices, modified_terminals, mt_unique_table_reference) # Extend piecewise V with unique new FV_piecewise vertices pir = ir["piecewise_ir"] for i, v in enumerate(FV): if FV_piecewise[i]: j = pe2i.get(v) if j is None: j = len(pe2i) pe2i[v] = j pir["V"].append(v) pir["V_active"].append(1) mt = FV_mts[i] if mt is not None: pir["mt_tabledata"][mt] = mt_unique_table_reference.get(mt) pir["V_mts"].append(mt) # Extend piecewise modified_arguments list with unique new items for mt in modified_arguments: ma = piecewise_modified_argument_indices.get(mt) if ma is None: ma = len(pir["modified_arguments"]) pir["modified_arguments"].append(mt) piecewise_modified_argument_indices[mt] = ma # Loop over factorization terms block_contributions = defaultdict(list) for ma_indices, fi in sorted(argument_factorization.items()): # Get a bunch of information about this term rank = len(ma_indices) trs = tuple(mt_unique_table_reference[modified_arguments[ai]] for ai in ma_indices) unames = tuple(tr.name for tr in trs) ttypes = tuple(tr.ttype for tr in trs) assert not any(tt == "zeros" for tt in ttypes) blockmap = tuple(tr.dofmap for tr in trs) block_is_uniform = all(tr.is_uniform for tr in trs) # Collect relevant restrictions to identify blocks # correctly in interior facet integrals block_restrictions = [] for i, ma in enumerate(ma_indices): if trs[i].is_uniform: r = None else: r = modified_arguments[ma].restriction block_restrictions.append(r) block_restrictions = tuple(block_restrictions) # Store piecewise status for fi and translate # index to piecewise scope if relevant factor_is_piecewise = FV_piecewise[fi] if factor_is_piecewise: factor_index = pe2i[FV[fi]] else: factor_index = fi # TODO: Add separate block modes for quadrature # Both arguments in quadrature elements """ for iq fw = f*w #for i # for j # B[i,j] = fw*U[i]*V[j] = 0 if i != iq or j != iq BQ[iq] = B[iq,iq] = fw for (iq) A[iq+offset0, iq+offset1] = BQ[iq] """ # One argument in quadrature element """ for iq fw[iq] = f*w #for i # for j # B[i,j] = fw*UQ[i]*V[j] = 0 if i != iq for j BQ[iq,j] = fw[iq]*V[iq,j] for (iq) for (j) A[iq+offset, j+offset] = BQ[iq,j] """ # Decide how to handle code generation for this block if p["enable_preintegration"] and (factor_is_piecewise and rank > 0 and "quadrature" not in ttypes): # - Piecewise factor is an absolute prerequisite # - Could work for rank 0 as well but currently doesn't # - Haven't considered how quadrature elements work out block_mode = "preintegrated" elif p["enable_premultiplication"] and (rank > 0 and all(tt in piecewise_ttypes for tt in ttypes)): # Integrate functional in quadloop, scale block after quadloop block_mode = "premultiplied" elif p["enable_sum_factorization"]: if (rank == 2 and any(tt in piecewise_ttypes for tt in ttypes)): # Partial computation in quadloop of f*u[i], # compute (f*u[i])*v[i] outside quadloop, # (or with u,v swapped) block_mode = "partial" else: # Full runtime integration of f*u[i]*v[j], # can still do partial computation in quadloop of f*u[i] # but must compute (f*u[i])*v[i] as well inside quadloop. # (or with u,v swapped) block_mode = "full" else: # Use full runtime integration with nothing fancy going on block_mode = "safe" # Carry out decision if block_mode == "preintegrated": # Add to contributions: # P = sum_q weight*u*v; preintegrated here # B[...] = f * P[...]; generated after quadloop # A[blockmap] += B[...]; generated after quadloop cache = ir["piecewise_ir"]["preintegrated_blocks"] block_is_transposed = False pname = cache.get(unames) # Reuse transpose to save memory if p["enable_block_transpose_reuse"] and pname is None and len(unames) == 2: pname = cache.get((unames[1], unames[0])) if pname is not None: # Cache hit on transpose block_is_transposed = True if pname is None: # Cache miss, precompute block weights = quadrature_rules[num_points][1] if integral_type == "interior_facet": ptable = integrate_block_interior_facets(weights, unames, ttypes, unique_tables, unique_table_num_dofs) else: ptable = integrate_block(weights, unames, ttypes, unique_tables, unique_table_num_dofs) ptable = clamp_table_small_numbers(ptable, rtol=p["table_rtol"], atol=p["table_atol"]) pname = "PI%d" % (len(cache,)) cache[unames] = pname unique_tables[pname] = ptable unique_table_types[pname] = "preintegrated" assert factor_is_piecewise block_unames = (pname,) blockdata = preintegrated_block_data_t(block_mode, ttypes, factor_index, factor_is_piecewise, block_unames, block_restrictions, block_is_transposed, block_is_uniform, pname) block_is_piecewise = True elif block_mode == "premultiplied": # Add to contributions: # P = u*v; computed here # FI = sum_q weight * f; generated inside quadloop # B[...] = FI * P[...]; generated after quadloop # A[blockmap] += B[...]; generated after quadloop cache = ir["piecewise_ir"]["premultiplied_blocks"] block_is_transposed = False pname = cache.get(unames) # Reuse transpose to save memory if p["enable_block_transpose_reuse"] and pname is None and len(unames) == 2: pname = cache.get((unames[1], unames[0])) if pname is not None: # Cache hit on transpose block_is_transposed = True if pname is None: # Cache miss, precompute block if integral_type == "interior_facet": ptable = multiply_block_interior_facets(0, unames, ttypes, unique_tables, unique_table_num_dofs) else: ptable = multiply_block(0, unames, ttypes, unique_tables, unique_table_num_dofs) pname = "PM%d" % (len(cache,)) cache[unames] = pname unique_tables[pname] = ptable unique_table_types[pname] = "premultiplied" block_unames = (pname,) blockdata = premultiplied_block_data_t(block_mode, ttypes, factor_index, factor_is_piecewise, block_unames, block_restrictions, block_is_transposed, block_is_uniform, pname) block_is_piecewise = False elif block_mode == "scaled": # TODO: Add mode, block is piecewise but choose not to be premultiplied # Add to contributions: # FI = sum_q weight * f; generated inside quadloop # B[...] = FI * u * v; generated after quadloop # A[blockmap] += B[...]; generated after quadloop raise NotImplementedError("scaled block mode not implemented.") # (probably need mostly the same data as premultiplied, except no P table name or values) block_is_piecewise = False elif block_mode in ("partial", "full", "safe"): # Translate indices to piecewise context if necessary block_is_piecewise = factor_is_piecewise and not expect_weight ma_data = [] for i, ma in enumerate(ma_indices): if trs[i].is_piecewise: ma_index = piecewise_modified_argument_indices[modified_arguments[ma]] else: block_is_piecewise = False ma_index = ma ma_data.append(ma_data_t(ma_index, trs[i])) block_is_transposed = False # FIXME: Handle transposes for these block types if block_mode == "partial": # Add to contributions: # P[i] = sum_q weight * f * u[i]; generated inside quadloop # B[i,j] = P[i] * v[j]; generated after quadloop (where v is the piecewise ma) # A[blockmap] += B[...]; generated after quadloop # Find first piecewise index TODO: Is last better? just reverse range here for i in range(rank): if trs[i].is_piecewise: piecewise_ma_index = i break assert rank == 2 not_piecewise_ma_index = 1 - piecewise_ma_index block_unames = (unames[not_piecewise_ma_index],) blockdata = partial_block_data_t(block_mode, ttypes, factor_index, factor_is_piecewise, block_unames, block_restrictions, block_is_transposed, tuple(ma_data), piecewise_ma_index) elif block_mode in ("full", "safe"): # Add to contributions: # B[i] = sum_q weight * f * u[i] * v[j]; generated inside quadloop # A[blockmap] += B[i]; generated after quadloop block_unames = unames blockdata = full_block_data_t(block_mode, ttypes, factor_index, factor_is_piecewise, block_unames, block_restrictions, block_is_transposed, tuple(ma_data)) else: error("Invalid block_mode %s" % (block_mode,)) if block_is_piecewise: # Insert in piecewise expr_ir ir["piecewise_ir"]["block_contributions"][blockmap].append(blockdata) else: # Insert in varying expr_ir for this quadrature loop block_contributions[blockmap].append(blockdata) # Figure out which table names are referenced in unstructured partition active_table_names = set() for i, mt in zip(modified_terminal_indices, modified_terminals): tr = mt_unique_table_reference.get(mt) if tr is not None and FV_active[i]: active_table_names.add(tr.name) # Figure out which table names are referenced in blocks for blockmap, contributions in chain(block_contributions.items(), ir["piecewise_ir"]["block_contributions"].items()): for blockdata in contributions: if blockdata.block_mode in ("preintegrated", "premultiplied"): active_table_names.add(blockdata.name) elif blockdata.block_mode in ("partial", "full", "safe"): for mad in blockdata.ma_data: active_table_names.add(mad.tabledata.name) # Record all table types before dropping tables ir["unique_table_types"].update(unique_table_types) # Drop tables not referenced from modified terminals # and tables of zeros and ones unused_ttypes = ("zeros", "ones", "quadrature") keep_table_names = set() for name in active_table_names: ttype = ir["unique_table_types"][name] if ttype not in unused_ttypes: if name in unique_tables: keep_table_names.add(name) unique_tables = { name: unique_tables[name] for name in keep_table_names } # Add to global set of all tables for name, table in unique_tables.items(): tbl = ir["unique_tables"].get(name) if tbl is not None and not numpy.allclose(tbl, table, rtol=p["table_rtol"], atol=p["table_atol"]): error("Table values mismatch with same name.") ir["unique_tables"].update(unique_tables) # Analyse active terminals to check what we'll need to generate code for active_mts = [] for i, mt in zip(modified_terminal_indices, modified_terminals): if FV_active[i]: active_mts.append(mt) # Figure out if we need to access CellCoordinate to # avoid generating quadrature point table otherwise if integral_type == "cell": need_points = any(isinstance(mt.terminal, CellCoordinate) for mt in active_mts) elif integral_type in facet_integral_types: need_points = any(isinstance(mt.terminal, FacetCoordinate) for mt in active_mts) elif integral_type in custom_integral_types: need_points = True # TODO: Always? else: need_points = False # Figure out if we need to access QuadratureWeight to # avoid generating quadrature point table otherwise #need_weights = any(isinstance(mt.terminal, QuadratureWeight) # for mt in active_mts) # Count blocks of each mode block_modes = defaultdict(int) for blockmap, contributions in block_contributions.items(): for blockdata in contributions: block_modes[blockdata.block_mode] += 1 # Debug output summary = "\n".join(" %d\t%s" % (count, mode) for mode, count in sorted(block_modes.items())) debug("Blocks of each mode: \n" + summary) # If there are any blocks other than preintegrated we need weights if expect_weight and any(mode != "preintegrated" for mode in block_modes): need_weights = True elif integral_type in custom_integral_types: need_weights = True # TODO: Always? else: need_weights = False # Build IR dict for the given expressions expr_ir = {} # (array) FV-index -> UFL subexpression expr_ir["V"] = FV # (array) V indices for each input expression component in flattened order expr_ir["V_targets"] = FV_targets ### Result of factorization: # (array) MA-index -> UFL expression of modified arguments expr_ir["modified_arguments"] = modified_arguments # (dict) tuple(MA-indices) -> FV-index of monomial factor #expr_ir["argument_factorization"] = argument_factorization expr_ir["block_contributions"] = block_contributions ### Modified terminals # (array) list of FV-indices to modified terminals #expr_ir["modified_terminal_indices"] = modified_terminal_indices # Dependency structure of graph: # (CRSArray) FV-index -> direct dependency FV-index list #expr_ir["dependencies"] = FV_deps # (CRSArray) FV-index -> direct dependee FV-index list #expr_ir["inverse_dependencies"] = inv_FV_deps # Metadata about each vertex #expr_ir["active"] = FV_active # (array) FV-index -> bool #expr_ir["V_piecewise"] = FV_piecewise # (array) FV-index -> bool expr_ir["V_varying"] = FV_varying # (array) FV-index -> bool expr_ir["V_mts"] = FV_mts # Store mapping from modified terminal object to # table data, this is used in integralgenerator expr_ir["mt_tabledata"] = mt_unique_table_reference # To emit quadrature rules only if needed expr_ir["need_points"] = need_points expr_ir["need_weights"] = need_weights # Store final ir for this num_points ir["varying_irs"][num_points] = expr_ir return ir
def assertEqualValues(self, A, B): B = as_ufl(B) self.assertEqual(A.ufl_shape, B.ufl_shape) self.assertEqual(inner(A-B, A-B)(None), 0)
def write_output(self, pb, writemesh=False, N=1, t=0): if writemesh: if self.write_results_every > 0: self.resultsfiles = {} for res in self.results_to_write: outfile = XDMFFile(self.comm, self.output_path+'/results_'+pb.simname+'_'+res+'.xdmf', 'w') outfile.write_mesh(self.mesh) self.resultsfiles[res] = outfile return else: # write results every write_results_every steps if self.write_results_every > 0 and N % self.write_results_every == 0: # save solution to XDMF format for res in self.results_to_write: if res=='displacement': self.resultsfiles[res].write_function(pb.u, t) elif res=='velocity': # passed in v is not a function but form, so we have to project v_proj = project(pb.vel, pb.V_u, pb.dx_, nm="Velocity") self.resultsfiles[res].write_function(v_proj, t) elif res=='acceleration': # passed in a is not a function but form, so we have to project a_proj = project(pb.acc, pb.V_u, pb.dx_, nm="Acceleration") self.resultsfiles[res].write_function(a_proj, t) elif res=='pressure': self.resultsfiles[res].write_function(pb.p, t) elif res=='cauchystress': stressfuncs=[] for n in range(pb.num_domains): stressfuncs.append(pb.ma[n].sigma(pb.u,pb.p,ivar=pb.internalvars)) cauchystress = project(stressfuncs, pb.Vd_tensor, pb.dx_, nm="CauchyStress") self.resultsfiles[res].write_function(cauchystress, t) elif res=='trmandelstress': stressfuncs=[] for n in range(pb.num_domains): stressfuncs.append(tr(pb.ma[n].M(pb.u,pb.p,ivar=pb.internalvars))) trmandelstress = project(stressfuncs, pb.Vd_scalar, pb.dx_, nm="trMandelStress") self.resultsfiles[res].write_function(trmandelstress, t) elif res=='trmandelstress_e': stressfuncs=[] for n in range(pb.num_domains): if pb.mat_growth[n]: stressfuncs.append(tr(pb.ma[n].M_e(pb.u,pb.p,pb.ki.C(pb.u),ivar=pb.internalvars))) else: stressfuncs.append(as_ufl(0)) trmandelstress_e = project(stressfuncs, pb.Vd_scalar, pb.dx_, nm="trMandelStress_e") self.resultsfiles[res].write_function(trmandelstress_e, t) elif res=='vonmises_cauchystress': stressfuncs=[] for n in range(pb.num_domains): stressfuncs.append(pb.ma[n].sigma_vonmises(pb.u,pb.p,ivar=pb.internalvars)) vonmises_cauchystress = project(stressfuncs, pb.Vd_scalar, pb.dx_, nm="vonMises_CauchyStress") self.resultsfiles[res].write_function(vonmises_cauchystress, t) elif res=='pk1stress': stressfuncs=[] for n in range(pb.num_domains): stressfuncs.append(pb.ma[n].P(pb.u,pb.p,ivar=pb.internalvars)) pk1stress = project(stressfuncs, pb.Vd_tensor, pb.dx_, nm="PK1Stress") self.resultsfiles[res].write_function(pk1stress, t) elif res=='pk2stress': stressfuncs=[] for n in range(pb.num_domains): stressfuncs.append(pb.ma[n].S(pb.u,pb.p,ivar=pb.internalvars)) pk2stress = project(stressfuncs, pb.Vd_tensor, pb.dx_, nm="PK2Stress") self.resultsfiles[res].write_function(pk2stress, t) elif res=='jacobian': jacobian = project(pb.ki.J(pb.u), pb.Vd_scalar, pb.dx_, nm="Jacobian") self.resultsfiles[res].write_function(jacobian, t) elif res=='glstrain': glstrain = project(pb.ki.E(pb.u), pb.Vd_tensor, pb.dx_, nm="GreenLagrangeStrain") self.resultsfiles[res].write_function(glstrain, t) elif res=='eastrain': eastrain = project(pb.ki.e(pb.u), pb.Vd_tensor, pb.dx_, nm="EulerAlmansiStrain") self.resultsfiles[res].write_function(eastrain, t) elif res=='fiberstretch': fiberstretch = project(pb.ki.fibstretch(pb.u,pb.fib_func[0]), pb.Vd_scalar, pb.dx_, nm="FiberStretch") self.resultsfiles[res].write_function(fiberstretch, t) elif res=='fiberstretch_e': stretchfuncs=[] for n in range(pb.num_domains): if pb.mat_growth[n]: stretchfuncs.append(pb.ma[n].fibstretch_e(pb.ki.C(pb.u),pb.theta,pb.fib_func[0])) else: stretchfuncs.append(as_ufl(0)) fiberstretch_e = project(stretchfuncs, pb.Vd_scalar, pb.dx_, nm="FiberStretch_e") self.resultsfiles[res].write_function(fiberstretch_e, t) elif res=='theta': self.resultsfiles[res].write_function(pb.theta, t) elif res=='phi_remod': phifuncs=[] for n in range(pb.num_domains): if pb.mat_remodel[n]: phifuncs.append(pb.ma[n].phi_remod(pb.theta)) else: phifuncs.append(as_ufl(0)) phiremod = project(phifuncs, pb.Vd_scalar, pb.dx_, nm="phiRemodel") self.resultsfiles[res].write_function(phiremod, t) elif res=='tau_a': self.resultsfiles[res].write_function(pb.tau_a, t) elif res=='fiber1': fiber1 = project(pb.fib_func[0], pb.Vd_vector, pb.dx_, nm="Fiber1") self.resultsfiles[res].write_function(fiber1, t) elif res=='fiber2': fiber2 = project(pb.fib_func[1], pb.Vd_vector, pb.dx_, nm="Fiber2") self.resultsfiles[res].write_function(fiber2, t) else: raise NameError("Unknown output to write for solid mechanics!") if self.write_restart_every > 0 and N % self.write_restart_every == 0: self.writecheckpoint(pb, N)
def __init__(self, io_params, time_params, fem_params, constitutive_models, bc_dict, time_curves, io, comm=None): problem_base.__init__(self, io_params, time_params, comm) self.problem_physics = 'solid' self.simname = io_params['simname'] self.io = io # number of distinct domains (each one has to be assigned a own material model) self.num_domains = len(constitutive_models) self.order_disp = fem_params['order_disp'] try: self.order_pres = fem_params['order_pres'] except: self.order_pres = 1 self.quad_degree = fem_params['quad_degree'] self.incompressible_2field = fem_params['incompressible_2field'] self.fem_params = fem_params self.constitutive_models = constitutive_models # collect domain data self.dx_, self.rho0, self.rayleigh, self.eta_m, self.eta_k = [], [], [False]*self.num_domains, [], [] for n in range(self.num_domains): # integration domains self.dx_.append(dx(subdomain_data=self.io.mt_d, subdomain_id=n+1, metadata={'quadrature_degree': self.quad_degree})) # data for inertial and viscous forces: density and damping if self.timint != 'static': self.rho0.append(constitutive_models['MAT'+str(n+1)+'']['inertia']['rho0']) if 'rayleigh_damping' in constitutive_models['MAT'+str(n+1)+''].keys(): self.rayleigh[n] = True self.eta_m.append(constitutive_models['MAT'+str(n+1)+'']['rayleigh_damping']['eta_m']) self.eta_k.append(constitutive_models['MAT'+str(n+1)+'']['rayleigh_damping']['eta_k']) try: self.prestress_initial = fem_params['prestress_initial'] except: self.prestress_initial = False # type of discontinuous function spaces if str(self.io.mesh.ufl_cell()) == 'tetrahedron' or str(self.io.mesh.ufl_cell()) == 'triangle3D': dg_type = "DG" if (self.order_disp > 1 or self.order_pres > 1) and self.quad_degree < 3: raise ValueError("Use at least a quadrature degree of 3 or more for higher-order meshes!") elif str(self.io.mesh.ufl_cell()) == 'hexahedron' or str(self.io.mesh.ufl_cell()) == 'quadrilateral3D': dg_type = "DQ" if (self.order_disp > 1 or self.order_pres > 1) and self.quad_degree < 5: raise ValueError("Use at least a quadrature degree of 5 or more for higher-order meshes!") else: raise NameError("Unknown cell/element type!") # create finite element objects for u and p P_u = VectorElement("CG", self.io.mesh.ufl_cell(), self.order_disp) P_p = FiniteElement("CG", self.io.mesh.ufl_cell(), self.order_pres) # function spaces for u and p self.V_u = FunctionSpace(self.io.mesh, P_u) self.V_p = FunctionSpace(self.io.mesh, P_p) # Quadrature tensor, vector, and scalar elements Q_tensor = TensorElement("Quadrature", self.io.mesh.ufl_cell(), degree=1, quad_scheme="default") Q_vector = VectorElement("Quadrature", self.io.mesh.ufl_cell(), degree=1, quad_scheme="default") Q_scalar = FiniteElement("Quadrature", self.io.mesh.ufl_cell(), degree=1, quad_scheme="default") # not yet working - we cannot interpolate into Quadrature elements with the current dolfinx version currently! #self.Vd_tensor = FunctionSpace(self.io.mesh, Q_tensor) #self.Vd_vector = FunctionSpace(self.io.mesh, Q_vector) #self.Vd_scalar = FunctionSpace(self.io.mesh, Q_scalar) # Quadrature function spaces (currently not properly functioning for higher-order meshes!!!) self.Vd_tensor = TensorFunctionSpace(self.io.mesh, (dg_type, self.order_disp-1)) self.Vd_vector = VectorFunctionSpace(self.io.mesh, (dg_type, self.order_disp-1)) self.Vd_scalar = FunctionSpace(self.io.mesh, (dg_type, self.order_disp-1)) # functions self.du = TrialFunction(self.V_u) # Incremental displacement self.var_u = TestFunction(self.V_u) # Test function self.dp = TrialFunction(self.V_p) # Incremental pressure self.var_p = TestFunction(self.V_p) # Test function self.u = Function(self.V_u, name="Displacement") self.p = Function(self.V_p, name="Pressure") # values of previous time step self.u_old = Function(self.V_u) self.v_old = Function(self.V_u) self.a_old = Function(self.V_u) self.p_old = Function(self.V_p) # a setpoint displacement for multiscale analysis self.u_set = Function(self.V_u) self.p_set = Function(self.V_p) self.tau_a_set = Function(self.Vd_scalar) # initial (zero) functions for initial stiffness evaluation (e.g. for Rayleigh damping) self.u_ini, self.p_ini, self.theta_ini, self.tau_a_ini = Function(self.V_u), Function(self.V_p), Function(self.Vd_scalar), Function(self.Vd_scalar) self.theta_ini.vector.set(1.0) self.theta_ini.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # growth stretch self.theta = Function(self.Vd_scalar, name="theta") self.theta_old = Function(self.Vd_scalar) self.growth_thres = Function(self.Vd_scalar) # initialize to one (theta = 1 means no growth) self.theta.vector.set(1.0), self.theta_old.vector.set(1.0) self.theta.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD), self.theta_old.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # active stress self.tau_a = Function(self.Vd_scalar, name="tau_a") self.tau_a_old = Function(self.Vd_scalar) self.amp_old, self.amp_old_set = Function(self.Vd_scalar), Function(self.Vd_scalar) self.amp_old.vector.set(1.0), self.amp_old_set.vector.set(1.0) self.amp_old.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD), self.amp_old_set.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # prestressing history defgrad and spring prestress if self.prestress_initial: self.F_hist = Function(self.Vd_tensor, name="Defgrad_hist") self.u_pre = Function(self.V_u) else: self.F_hist = None self.u_pre = None self.internalvars = {"theta" : self.theta, "tau_a" : self.tau_a} self.internalvars_old = {"theta" : self.theta_old, "tau_a" : self.tau_a_old} # reference coordinates self.x_ref = Function(self.V_u) self.x_ref.interpolate(self.x_ref_expr) if self.incompressible_2field: self.ndof = self.u.vector.getSize() + self.p.vector.getSize() else: self.ndof = self.u.vector.getSize() # initialize solid time-integration class self.ti = timeintegration.timeintegration_solid(time_params, fem_params, time_curves, self.t_init, self.comm) # check for materials that need extra treatment (anisotropic, active stress, growth, ...) have_fiber1, have_fiber2 = False, False self.have_active_stress, self.active_stress_trig, self.have_frank_starling, self.have_growth = False, 'ode', False, False self.mat_active_stress, self.mat_growth, self.mat_remodel, self.mat_growth_dir, self.mat_growth_trig, self.mat_growth_thres = [False]*self.num_domains, [False]*self.num_domains, [False]*self.num_domains, [None]*self.num_domains, [None]*self.num_domains, []*self.num_domains self.localsolve, growth_dir = False, None self.actstress = [] for n in range(self.num_domains): if 'holzapfelogden_dev' in self.constitutive_models['MAT'+str(n+1)+''].keys() or 'guccione_dev' in self.constitutive_models['MAT'+str(n+1)+''].keys(): have_fiber1, have_fiber2 = True, True if 'active_fiber' in self.constitutive_models['MAT'+str(n+1)+''].keys(): have_fiber1 = True self.mat_active_stress[n], self.have_active_stress = True, True # if one mat has a prescribed active stress, all have to be! if 'prescribed_curve' in self.constitutive_models['MAT'+str(n+1)+'']['active_fiber']: self.active_stress_trig = 'prescribed' if 'prescribed_multiscale' in self.constitutive_models['MAT'+str(n+1)+'']['active_fiber']: self.active_stress_trig = 'prescribed_multiscale' if self.active_stress_trig == 'ode': act_curve = self.ti.timecurves(self.constitutive_models['MAT'+str(n+1)+'']['active_fiber']['activation_curve']) self.actstress.append(activestress_activation(self.constitutive_models['MAT'+str(n+1)+'']['active_fiber'], act_curve)) if self.actstress[-1].frankstarling: self.have_frank_starling = True if self.active_stress_trig == 'prescribed': self.ti.funcs_to_update.append({self.tau_a : self.ti.timecurves(self.constitutive_models['MAT'+str(n+1)+'']['active_fiber']['prescribed_curve'])}) if 'active_iso' in self.constitutive_models['MAT'+str(n+1)+''].keys(): self.mat_active_stress[n], self.have_active_stress = True, True # if one mat has a prescribed active stress, all have to be! if 'prescribed_curve' in self.constitutive_models['MAT'+str(n+1)+'']['active_iso']: self.active_stress_trig = 'prescribed' if 'prescribed_multiscale' in self.constitutive_models['MAT'+str(n+1)+'']['active_iso']: self.active_stress_trig = 'prescribed_multiscale' if self.active_stress_trig == 'ode': act_curve = self.ti.timecurves(self.constitutive_models['MAT'+str(n+1)+'']['active_iso']['activation_curve']) self.actstress.append(activestress_activation(self.constitutive_models['MAT'+str(n+1)+'']['active_iso'], act_curve)) if self.active_stress_trig == 'prescribed': self.ti.funcs_to_update.append({self.tau_a : self.ti.timecurves(self.constitutive_models['MAT'+str(n+1)+'']['active_iso']['prescribed_curve'])}) if 'growth' in self.constitutive_models['MAT'+str(n+1)+''].keys(): self.mat_growth[n], self.have_growth = True, True self.mat_growth_dir[n] = self.constitutive_models['MAT'+str(n+1)+'']['growth']['growth_dir'] self.mat_growth_trig[n] = self.constitutive_models['MAT'+str(n+1)+'']['growth']['growth_trig'] # need to have fiber fields for the following growth options if self.mat_growth_dir[n] == 'fiber' or self.mat_growth_trig[n] == 'fibstretch': have_fiber1 = True if self.mat_growth_dir[n] == 'radial': have_fiber1, have_fiber2 = True, True # in this case, we have a theta that is (nonlinearly) dependent on the deformation, theta = theta(C(u)), # therefore we need a local Newton iteration to solve for equilibrium theta (return mapping) prior to entering # the global Newton scheme - so flag localsolve to true if self.mat_growth_trig[n] != 'prescribed' and self.mat_growth_trig[n] != 'prescribed_multiscale': self.localsolve = True self.mat_growth_thres.append(self.constitutive_models['MAT'+str(n+1)+'']['growth']['growth_thres']) else: self.mat_growth_thres.append(as_ufl(0)) # for the case that we have a prescribed growth stretch over time, append curve to functions that need time updates # if one mat has a prescribed growth model, all have to be! if self.mat_growth_trig[n] == 'prescribed': self.ti.funcs_to_update.append({self.theta : self.ti.timecurves(self.constitutive_models['MAT'+str(n+1)+'']['growth']['prescribed_curve'])}) if 'remodeling_mat' in self.constitutive_models['MAT'+str(n+1)+'']['growth'].keys(): self.mat_remodel[n] = True else: self.mat_growth_thres.append(as_ufl(0)) # full linearization of our remodeling law can lead to excessive compiler times for ffcx... :-/ # let's try if we might can go without one of the critial terms (derivative of remodeling fraction w.r.t. C) try: self.lin_remod_full = fem_params['lin_remodeling_full'] except: self.lin_remod_full = True # growth threshold (as function, since in multiscale approach, it can vary element-wise) if self.have_growth and self.localsolve: growth_thres_proj = project(self.mat_growth_thres, self.Vd_scalar, self.dx_) self.growth_thres.vector.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) self.growth_thres.interpolate(growth_thres_proj) # read in fiber data if have_fiber1: fibarray = ['fiber'] if have_fiber2: fibarray.append('sheet') # fiber function space - vector defined on quadrature points V_fib = self.Vd_vector self.fib_func = self.io.readin_fibers(fibarray, V_fib, self.dx_) else: self.fib_func = None # for multiscale G&R analysis self.tol_stop_large = 0 # initialize kinematics class self.ki = solid_kinematics_constitutive.kinematics(fib_funcs=self.fib_func, F_hist=self.F_hist) # initialize material/constitutive class self.ma = [] for n in range(self.num_domains): self.ma.append(solid_kinematics_constitutive.constitutive(self.ki, self.constitutive_models['MAT'+str(n+1)+''], self.incompressible_2field, mat_growth=self.mat_growth[n], mat_remodel=self.mat_remodel[n])) # initialize solid variational form class self.vf = solid_variationalform.variationalform(self.var_u, self.du, self.var_p, self.dp, self.io.n0, self.x_ref) # initialize boundary condition class self.bc = boundaryconditions.boundary_cond_solid(bc_dict, self.fem_params, self.io, self.ki, self.vf, self.ti) if self.prestress_initial: # initialize prestressing history deformation gradient Id_proj = project(Identity(len(self.u)), self.Vd_tensor, self.dx_) self.F_hist.interpolate(Id_proj) self.bc_dict = bc_dict # Dirichlet boundary conditions if 'dirichlet' in self.bc_dict.keys(): self.bc.dirichlet_bcs(self.V_u) self.set_variational_forms_and_jacobians()
def set_variational_forms_and_jacobians(self): # set forms for acceleration and velocity self.acc, self.vel = self.ti.set_acc_vel(self.u, self.u_old, self.v_old, self.a_old) # kinetic, internal, and pressure virtual work self.deltaW_kin, self.deltaW_kin_old = as_ufl(0), as_ufl(0) self.deltaW_int, self.deltaW_int_old = as_ufl(0), as_ufl(0) self.deltaW_damp, self.deltaW_damp_old = as_ufl(0), as_ufl(0) self.deltaW_p, self.deltaW_p_old = as_ufl(0), as_ufl(0) for n in range(self.num_domains): if self.timint != 'static': # kinetic virtual work self.deltaW_kin += self.vf.deltaW_kin(self.acc, self.rho0[n], self.dx_[n]) self.deltaW_kin_old += self.vf.deltaW_kin(self.a_old, self.rho0[n], self.dx_[n]) # Rayleigh damping virtual work if self.rayleigh[n]: self.deltaW_damp += self.vf.deltaW_damp(self.eta_m[n], self.eta_k[n], self.rho0[n], self.ma[n].S(self.u_ini, self.p_ini, ivar={"theta" : self.theta_ini, "tau_a" : self.tau_a_ini}, tang=True), self.vel, self.dx_[n]) self.deltaW_damp_old += self.vf.deltaW_damp(self.eta_m[n], self.eta_k[n], self.rho0[n], self.ma[n].S(self.u_ini, self.p_ini, ivar={"theta" : self.theta_ini, "tau_a" : self.tau_a_ini}, tang=True), self.v_old, self.dx_[n]) # internal virtual work self.deltaW_int += self.vf.deltaW_int(self.ma[n].S(self.u, self.p, ivar=self.internalvars), self.ki.F(self.u), self.dx_[n]) self.deltaW_int_old += self.vf.deltaW_int(self.ma[n].S(self.u_old, self.p_old, ivar=self.internalvars_old), self.ki.F(self.u_old), self.dx_[n]) # pressure virtual work (for incompressible formulation) # this has to be treated like the evaluation of a volumetric material, hence with the elastic part of J if self.mat_growth[n]: J, J_old = self.ma[n].J_e(self.u, self.theta), self.ma[n].J_e(self.u_old, self.theta_old) else: J, J_old = self.ki.J(self.u), self.ki.J(self.u_old) self.deltaW_p += self.vf.deltaW_int_pres(J, self.dx_[n]) self.deltaW_p_old += self.vf.deltaW_int_pres(J_old, self.dx_[n]) # external virtual work (from Neumann or Robin boundary conditions, body forces, ...) w_neumann, w_neumann_old, w_robin, w_robin_old = as_ufl(0), as_ufl(0), as_ufl(0), as_ufl(0) if 'neumann' in self.bc_dict.keys(): w_neumann, w_neumann_old = self.bc.neumann_bcs(self.V_u, self.Vd_scalar, self.u, self.u_old) if 'robin' in self.bc_dict.keys(): w_robin, w_robin_old = self.bc.robin_bcs(self.u, self.vel, self.u_old, self.v_old, self.u_pre) # for (quasi-static) prestressing, we need to eliminate dashpots and replace true with reference Neumann loads in our external virtual work w_neumann_prestr, w_robin_prestr = as_ufl(0), as_ufl(0) if self.prestress_initial: bc_dict_prestr = copy.deepcopy(self.bc_dict) # get rid of dashpots if 'robin' in bc_dict_prestr.keys(): for r in bc_dict_prestr['robin']: if r['type'] == 'dashpot': r['visc'] = 0. # replace true Neumann loads by reference ones if 'neumann' in bc_dict_prestr.keys(): for n in bc_dict_prestr['neumann']: if n['type'] == 'true': n['type'] = 'pk1' bc_prestr = boundaryconditions.boundary_cond_solid(bc_dict_prestr, self.fem_params, self.io, self.ki, self.vf, self.ti) if 'neumann' in bc_dict_prestr.keys(): w_neumann_prestr, _ = bc_prestr.neumann_bcs(self.V_u, self.Vd_scalar, self.u, self.u_old) if 'robin' in bc_dict_prestr.keys(): w_robin_prestr, _ = bc_prestr.robin_bcs(self.u, self.vel, self.u_old, self.v_old, self.u_pre) self.deltaW_prestr_ext = w_neumann_prestr + w_robin_prestr # TODO: Body forces! self.deltaW_ext = w_neumann + w_robin self.deltaW_ext_old = w_neumann_old + w_robin_old self.timefac_m, self.timefac = self.ti.timefactors() ### full weakforms # quasi-static weak form: internal minus external virtual work if self.timint == 'static': self.weakform_u = self.deltaW_int - self.deltaW_ext if self.incompressible_2field: self.weakform_p = self.deltaW_p # full dynamic weak form: kinetic plus internal (plus damping) minus external virtual work else: self.weakform_u = self.timefac_m * self.deltaW_kin + (1.-self.timefac_m) * self.deltaW_kin_old + \ self.timefac * self.deltaW_damp + (1.-self.timefac) * self.deltaW_damp_old + \ self.timefac * self.deltaW_int + (1.-self.timefac) * self.deltaW_int_old - \ self.timefac * self.deltaW_ext - (1.-self.timefac) * self.deltaW_ext_old if self.incompressible_2field: self.weakform_p = self.timefac * self.deltaW_p + (1.-self.timefac) * self.deltaW_p_old ### local weak forms at Gauss points for inelastic materials self.r_growth, self.del_theta = [], [] for n in range(self.num_domains): if self.mat_growth[n] and self.mat_growth_trig[n] != 'prescribed' and self.mat_growth_trig[n] != 'prescribed_multiscale': # growth residual and increment a, b = self.ma[n].res_dtheta_growth(self.u, self.p, self.internalvars, self.theta_old, self.growth_thres, self.dt, 'res_del') self.r_growth.append(a), self.del_theta.append(b) else: self.r_growth.append(as_ufl(0)), self.del_theta.append(as_ufl(0)) ### Jacobians # kinetic virtual work linearization (deltaW_kin already has contributions from all domains) self.jac_uu = self.timefac_m * derivative(self.deltaW_kin, self.u, self.du) # internal virtual work linearization treated differently: since we want to be able to account for nonlinear materials at Gauss # point level with deformation-dependent internal variables (i.e. growth or plasticity), we make use of a more explicit formulation # of the linearization which involves the fourth-order material tangent operator Ctang ("derivative" cannot take care of the # dependence of the internal variables on the deformation if this dependence is nonlinear and cannot be expressed analytically) for n in range(self.num_domains): # material tangent operator Cmat = self.ma[n].S(self.u, self.p, ivar=self.internalvars, tang=True) if self.mat_growth[n] and self.mat_growth_trig[n] != 'prescribed' and self.mat_growth_trig[n] != 'prescribed_multiscale': # growth tangent operator Cgrowth = self.ma[n].Cgrowth(self.u, self.p, self.internalvars, self.theta_old, self.growth_thres, self.dt) if self.mat_remodel[n] and self.lin_remod_full: # remodeling tangent operator Cremod = self.ma[n].Cremod(self.u, self.p, self.internalvars, self.theta_old, self.growth_thres, self.dt) Ctang = Cmat + Cgrowth + Cremod else: Ctang = Cmat + Cgrowth else: Ctang = Cmat self.jac_uu += self.timefac * self.vf.Lin_deltaW_int_du(self.ma[n].S(self.u, self.p, ivar=self.internalvars), self.ki.F(self.u), self.u, Ctang, self.dx_[n]) # Rayleigh damping virtual work contribution to stiffness self.jac_uu += self.timefac * derivative(self.deltaW_damp, self.u, self.du) # external virtual work contribution to stiffness (from nonlinear follower loads or Robin boundary tractions) self.jac_uu += -self.timefac * derivative(self.deltaW_ext, self.u, self.du) # pressure contributions if self.incompressible_2field: self.jac_up, self.jac_pu, self.a_p11, self.p11 = as_ufl(0), as_ufl(0), as_ufl(0), as_ufl(0) for n in range(self.num_domains): # this has to be treated like the evaluation of a volumetric material, hence with the elastic part of J if self.mat_growth[n]: J = self.ma[n].J_e(self.u, self.theta) Jmat = self.ma[n].dJedC(self.u, self.theta) else: J = self.ki.J(self.u) Jmat = self.ki.dJdC(self.u) Cmat_p = diff(self.ma[n].S(self.u, self.p, ivar=self.internalvars), self.p) if self.mat_growth[n] and self.mat_growth_trig[n] != 'prescribed' and self.mat_growth_trig[n] != 'prescribed_multiscale': Cmat = self.ma[n].S(self.u, self.p, ivar=self.internalvars, tang=True) # growth tangent operators - keep in mind that we have theta = theta(C(u),p) in general! # for stress-mediated growth, we get a contribution to the pressure material tangent operator Cgrowth_p = self.ma[n].Cgrowth_p(self.u, self.p, self.internalvars, self.theta_old, self.growth_thres, self.dt) if self.mat_remodel[n] and self.lin_remod_full: # remodeling tangent operator Cremod_p = self.ma[n].Cremod_p(self.u, self.p, self.internalvars, self.theta_old, self.growth_thres, self.dt) Ctang_p = Cmat_p + Cgrowth_p + Cremod_p else: Ctang_p = Cmat_p + Cgrowth_p # for all types of deformation-dependent growth, we need to add the growth contributions to the Jacobian tangent operator Jgrowth = diff(J,self.theta) * self.ma[n].dtheta_dC(self.u, self.p, self.internalvars, self.theta_old, self.growth_thres, self.dt) Jtang = Jmat + Jgrowth # ok... for stress-mediated growth, we actually get a non-zero right-bottom (11) block in our saddle-point system matrix, # since Je = Je(C,theta(C,p)) ---> dJe/dp = dJe/dtheta * dtheta/dp # TeX: D_{\Delta p}\!\int\limits_{\Omega_0} (J^{\mathrm{e}}-1)\delta p\,\mathrm{d}V = \int\limits_{\Omega_0} \frac{\partial J^{\mathrm{e}}}{\partial p}\Delta p \,\delta p\,\mathrm{d}V, # with \frac{\partial J^{\mathrm{e}}}{\partial p} = \frac{\partial J^{\mathrm{e}}}{\partial \vartheta}\frac{\partial \vartheta}{\partial p} dthetadp = self.ma[n].dtheta_dp(self.u, self.p, self.internalvars, self.theta_old, self.growth_thres, self.dt) if not isinstance(dthetadp, constantvalue.Zero): self.p11 += diff(J,self.theta) * dthetadp * self.dp * self.var_p * self.dx_[n] else: Ctang_p = Cmat_p Jtang = Jmat self.jac_up += self.timefac * self.vf.Lin_deltaW_int_dp(self.ki.F(self.u), Ctang_p, self.dx_[n]) self.jac_pu += self.timefac * self.vf.Lin_deltaW_int_pres_du(self.ki.F(self.u), Jtang, self.u, self.dx_[n]) # for saddle-point block-diagonal preconditioner self.a_p11 += inner(self.dp, self.var_p) * self.dx_[n] if self.prestress_initial: # quasi-static weak forms (don't dare to use fancy growth laws or other inelastic stuff during prestressing...) self.weakform_prestress_u = self.deltaW_int - self.deltaW_prestr_ext self.jac_prestress_uu = derivative(self.weakform_prestress_u, self.u, self.du) if self.incompressible_2field: self.weakform_prestress_p = self.deltaW_p self.jac_prestress_up = derivative(self.weakform_prestress_u, self.p, self.dp) self.jac_prestress_pu = derivative(self.weakform_prestress_p, self.u, self.du)