def _apply_inverse(matrix, r, v, options=None): options = options or _solver_options() solver = options.get('solver') preconditioner = options.get('preconditioner') # preconditioner argument may only be specified for iterative solvers: options = (solver, preconditioner) if preconditioner else (solver,) df.solve(matrix, r, v, *options)
def computeVelocityField(mesh): Xh = dl.VectorFunctionSpace(mesh,'Lagrange', 2) Wh = dl.FunctionSpace(mesh, 'Lagrange', 1) if dlversion() <= (1,6,0): XW = dl.MixedFunctionSpace([Xh, Wh]) else: mixed_element = dl.MixedElement([Xh.ufl_element(), Wh.ufl_element()]) XW = dl.FunctionSpace(mesh, mixed_element) Re = 1e2 g = dl.Expression(('0.0','(x[0] < 1e-14) - (x[0] > 1 - 1e-14)'), element=Xh.ufl_element()) bc1 = dl.DirichletBC(XW.sub(0), g, v_boundary) bc2 = dl.DirichletBC(XW.sub(1), dl.Constant(0), q_boundary, 'pointwise') bcs = [bc1, bc2] vq = dl.Function(XW) (v,q) = dl.split(vq) (v_test, q_test) = dl.TestFunctions (XW) def strain(v): return dl.sym(dl.nabla_grad(v)) F = ( (2./Re)*dl.inner(strain(v),strain(v_test))+ dl.inner (dl.nabla_grad(v)*v, v_test) - (q * dl.div(v_test)) + ( dl.div(v) * q_test) ) * dl.dx dl.solve(F == 0, vq, bcs, solver_parameters={"newton_solver": {"relative_tolerance":1e-4, "maximum_iterations":100, "linear_solver":"default"}}) return v
def get_distance_function(config, domains): V = dolfin.FunctionSpace(config.domain.mesh, "CG", 1) v = dolfin.TestFunction(V) d = dolfin.TrialFunction(V) sol = dolfin.Function(V) s = dolfin.interpolate(Constant(1.0), V) domains_func = dolfin.Function(dolfin.FunctionSpace(config.domain.mesh, "DG", 0)) domains_func.vector().set_local(domains.array().astype(numpy.float)) def boundary(x): eps_x = config.params["turbine_x"] eps_y = config.params["turbine_y"] min_val = 1 for e_x, e_y in [(-eps_x, 0), (eps_x, 0), (0, -eps_y), (0, eps_y)]: try: min_val = min(min_val, domains_func((x[0] + e_x, x[1] + e_y))) except RuntimeError: pass return min_val == 1.0 bc = dolfin.DirichletBC(V, 0.0, boundary) # Solve the diffusion problem with a constant source term log(INFO, "Solving diffusion problem to identify feasible area ...") a = dolfin.inner(dolfin.grad(d), dolfin.grad(v)) * dolfin.dx L = dolfin.inner(s, v) * dolfin.dx dolfin.solve(a == L, sol, bc) return sol
def __init__(self, config, feasible_area, attraction_center): ''' Generates the inequality constraints to enforce the turbines in the feasible area. If the turbine is outside the domain, the constraints is equal to the distance between the turbine and the attraction center. ''' self.config = config self.feasible_area = feasible_area # Compute the gradient of the feasible area fs = dolfin.FunctionSpace(feasible_area.function_space().mesh(), "DG", feasible_area.function_space().ufl_element().degree() - 1) feasible_area_grad = (dolfin.Function(fs), dolfin.Function(fs)) t = dolfin.TestFunction(fs) log(INFO, "Solving for gradient of feasible area") for i in range(2): form = dolfin.inner(feasible_area_grad[i], t) * dolfin.dx - dolfin.inner(feasible_area.dx(i), t) * dolfin.dx if dolfin.NonlinearVariationalSolver.default_parameters().has_parameter("linear_solver"): dolfin.solve(form == 0, feasible_area_grad[i], solver_parameters={"linear_solver": "cg", "preconditioner": "amg"}) else: dolfin.solve(form == 0, feasible_area_grad[i], solver_parameters={"newton_solver": {"linear_solver": "cg", "preconditioner": "amg"}}) self.feasible_area_grad = feasible_area_grad self.attraction_center = attraction_center
def time_step(self, rhs): dfn.begin("Computing velocity correction") if params['all_steps_adaptive']: self.adaptive_step(rhs) else: [bc.apply(self.A, rhs) for bc in self.bcs] dfn.solve(self.A, self.cur_vel.vector(), rhs, "cg", "amg") dfn.end()
def apply_inverse(self, V, ind=None, mu=None, options=None): assert V in self.range assert options is None # for now, simply use the default solver options set in FEniCS. vectors = V._list if ind is None else [V._list[ind]] if isinstance(ind, Number) else [V._list[i] for i in ind] R = self.source.zeros(len(vectors)) for r, v in zip(R._list, vectors): df.solve(self.matrix, r.impl, v.impl) return R
def apply_inverse(self, V, ind=None, mu=None, least_squares=False): assert V in self.range if least_squares: raise NotImplementedError vectors = V._list if ind is None else [V._list[ind]] if isinstance(ind, Number) else [V._list[i] for i in ind] R = self.source.zeros(len(vectors)) for r, v in zip(R._list, vectors): df.solve(self.matrix, r.impl, v.impl) return R
def compute_direct_sample_solution_old(pde, RV_samples, coeff_field, A, maxm, proj_basis): a = coeff_field.mean_func for m in range(maxm): a_m = RV_samples[m] * coeff_field[m][0] a = a + a_m A = pde.assemble_lhs(basis=proj_basis, coeff=a) b = pde.assemble_rhs(basis=proj_basis, coeff=a) X = 0 * b solve(A, X, b) return FEniCSVector(Function(proj_basis._fefs, X)), a
def eval_poisson(vec=None): if vec == None: # set default vector for new indices # mesh0 = refine(Mesh(lshape_xml)) mesh0 = UnitSquare(4, 4) fs = FunctionSpace(mesh0, "CG", 1) vec = FEniCSVector(Function(fs)) pde = FEMPoisson() fem_A = pde.assemble_lhs(diffcoeff, vec.basis) fem_b = pde.assemble_rhs(f, vec.basis) solve(fem_A, vec.coeffs, fem_b) return vec
def solve(self): """ Solve the equation. This calls the DOLFIN solve function. """ if self.is_linear() and form_rank(self.__eq.lhs) == 1: raise NotImplementedException("Solve for linear variational problem with rank 1 LHS not implemented") if self.__J is None: dolfin.solve(self.__eq, self.__x, self.__bcs, solver_parameters = self.__solver_parameters) else: dolfin.solve(self.__eq, self.__x, self.__bcs, J = self.__J, solver_parameters = self.__solver_parameters) return
def compute_Nij(Nij, G_matr, G_under, tensdim, Sijmats, Sijfcomps, delta_CG1_sq, alphaval=None, u_f=None, **NS_namespace): """ Function for computing Nij in ScaleDepLagrangian """ Sijf = Sijfcomps alpha = alphaval deltasq = 2 * delta_CG1_sq.vector().array() # Need to compute F(F(Sij)), set up right hand sides if tensdim == 3: Ax, Ay = Sijmats uf = u_f[0].vector() vf = u_f[1].vector() # Filtered rhs buf = [Ax * uf, 0.5 * (Ay * uf + Ax * vf), Ay * vf] else: Ax, Ay, Az = Sijmats uf = u_f[0].vector() vf = u_f[1].vector() wf = u_f[2].vector() buf = [ Ax * uf, 0.5 * (Ay * uf + Ax * vf), 0.5 * (Az * uf + Ax * wf), Ay * vf, 0.5 * (Az * vf + Ay * wf), Az * wf ] for i in xrange(tensdim): # Solve for the diff. components of F(F(Sij))) solve(G_matr, Sijf[i].vector(), buf[i], "cg", "default") # Compute magSf magSf = mag(Sijf, tensdim) for i in xrange(tensdim): # Filter Nij = F(|S|Sij) --> F(F(|S|Sij)) tophatfilter(unfiltered=Nij[i], filtered=Nij[i], weight=1, **vars()) # Compute 2*delta**2*(F(F(|S|Sij)) - alpha**2*F(F(|S))F(F(Sij))) Nij[i].vector().set_local( deltasq * (Nij[i].vector().array() - (alpha**2) * magSf * Sijf[i].vector().array())) Nij[i].vector().apply("insert")
def advance_one_timestep(self, f, u_1): """ Solve the PDE for one time step. f: the source term in the PDE. u_1: solution at the previous time step. """ from dolfin import TestFunction, dx, solve V, a, dt = self.V, self.a, self.dt # strip off self prefix v = TestFunction(V) L = (u_1 + dt * f) * v * dx solve(self.a == L, self.U) return self.U
def computeObservation(self, u_o): """ Compute the synthetic observation """ mt = dl.interpolate(self.mtrue, Vh[PARAMETER]) x = [self.generate_vector(STATE), mt.vector(), None] A, b = self.assembleA(x, assemble_rhs = True) A.init_vector(u_o, 1) dl.solve(A, u_o, b, "cg", amg_method()) # Create noisy data, ud MAX = u_o.norm("linf") parRandom.normal_perturb(.01 * MAX, u_o)
def compute_velocity_correction( ui, p0, p1, u_bcs, rho, mu, dt, rotational_form, my_dx, tol, verbose ): """Compute the velocity correction according to .. math:: U = u_0 - \\frac{dt}{\\rho} \\nabla (p_1-p_0). """ W = ui.function_space() P = p1.function_space() u = TrialFunction(W) v = TestFunction(W) a3 = dot(u, v) * my_dx phi = Function(P) phi.assign(p1) if p0: phi -= p0 if rotational_form: r = SpatialCoordinate(W.mesh())[0] div_ui = 1 / r * (r * ui[0]).dx(0) + ui[1].dx(1) phi += mu * div_ui L3 = dot(ui, v) * my_dx - dt / rho * (phi.dx(0) * v[0] + phi.dx(1) * v[1]) * my_dx u1 = Function(W) solve( a3 == L3, u1, bcs=u_bcs, solver_parameters={ "linear_solver": "iterative", "symmetric": True, "preconditioner": "hypre_amg", "krylov_solver": { "relative_tolerance": tol, "absolute_tolerance": 0.0, "maximum_iterations": 100, "monitor_convergence": verbose, }, }, ) # u = project(ui - k/rho * grad(phi), V) # div_u = 1/r * div(r*u) r = SpatialCoordinate(W.mesh())[0] div_u1 = 1.0 / r * (r * u1[0]).dx(0) + u1[1].dx(1) info("||u||_div = {:e}".format(sqrt(assemble(div_u1 * div_u1 * my_dx)))) return u1
def field(self, state, x = None, lump = True, rhs_func = None): """ Returns the effective-field contribution for a given state. This method uses a projection method to retrieve the field from the RHS-form given by the :code:`form_rhs` method. It should be overriden for better performance. *Arguments* state (:class:`State`) the simulation state x (:class:`dolfin.Vector`) the vector to store the result or :code:`None` *Returns* :class:`dolfin.Function` the effective-field contribution """ # TODO set particular solver # TODO use caching for mass matrix if rhs_func is None: w = TestFunction(state.VectorFunctionSpace()) b = assemble(self.form_rhs(state, w) / Constant(Constants.gamma) * state.dx('magnetic')) else: b = rhs_func(state) # Optional mass lumping if x is None: result = Function(state.VectorFunctionSpace()) else: result = Function(state.VectorFunctionSpace(), x) if lump: A = state.M_inv_diag('magnetic') A.mult(b, result.vector()) else: w = TestFunction(state.VectorFunctionSpace()) h = TrialFunction(state.VectorFunctionSpace()) A = assemble(inner(w, h) * state.dx('magnetic')) solve(A, result.vector(), b) return result
def __invert_mass_matrix(self,u): """ Helper routine to invert mass matrix Args: u: current values Returns: inv(M)*u """ me = fenics_mesh(self.V) A = 1.0*self.M b = fenics_mesh(u) df.solve(A,me.values.vector(),b.values.vector()) return me
def _error_estimator(dx, phi, mu, sigma, omega, conv, voltages): '''Simple error estimator from A posteriori error estimation and adaptive mesh-refinement techniques; R. Verfürth; Journal of Computational and Applied Mathematics; Volume 50, Issues 1-3, 20 May 1994, Pages 67-83; <https://www.sciencedirect.com/science/article/pii/0377042794902909>. The strong PDE is - div(1/(mu r) grad(rphi)) + <u, 1/r grad(rphi)> + i sigma omega phi = sigma v_k / (2 pi r). ''' from dolfin import cells mesh = phi.function_space().mesh() # Assemble the cell-wise residual in DG space DG = FunctionSpace(mesh, 'DG', 0) # get residual in DG v = TestFunction(DG) R = _residual_strong(dx, v, phi, mu, sigma, omega, conv, voltages) r_r = assemble(R[0]) r_i = assemble(R[1]) r = r_r * r_r + r_i * r_i visualize = True if visualize: # Plot the cell-wise residual u = TrialFunction(DG) a = zero() * dx(0) subdomain_indices = mu.keys() for i in subdomain_indices: a += u * v * dx(i) A = assemble(a) R2 = Function(DG) solve(A, R2.vector(), r) plot(R2, title='||R||^2') interactive() K = r.array() info('%r' % K) h = numpy.array([c.diameter() for c in cells(mesh)]) eta = h * numpy.sqrt(K) return eta
def solve_system(self,rhs,factor,u0,t): """ Dolfin's linear solver for (M-dtA)u = rhs Args: rhs: right-hand side for the nonlinear system factor: abbrev. for the node-to-node stepsize (or any other factor required) u0: initial guess for the iterative solver (not used here so far) Returns: solution as mesh """ A = self.M - factor*self.K b = fenics_mesh(rhs) u = fenics_mesh(u0) df.solve(A,u.values.vector(),b.values.vector()) return u
def amr(mesh, m, DirichletBoundary, g, mesh2d, s0=1, alpha=1): """Function for computing the Anisotropic MagnetoResistance (AMR), using given magnetisation configuration.""" # Scalar and vector function spaces. V = df.FunctionSpace(mesh, "CG", 1) VV = df.VectorFunctionSpace(mesh, 'CG', 1, 3) # Define boundary conditions. bcs = df.DirichletBC(V, g, DirichletBoundary()) # Nonlinear conductivity. def sigma(u): E = -df.grad(u) costheta = df.dot(m, E)/(df.sqrt(df.dot(E, E))*df.sqrt(df.dot(m, m))) return s0/(1 + alpha*costheta**2) # Define variational problem for Picard iteration. u = df.TrialFunction(V) # electric potential v = df.TestFunction(V) u_k = df.interpolate(df.Expression('x[0]'), V) # previous (known) u a = df.inner(sigma(u_k)*df.grad(u), df.grad(v))*df.dx # RHS to mimic linear problem. f = df.Constant(0.0) # set to 0 -> nonlinear Poisson equation. L = f*v*df.dx u = df.Function(V) # new unknown function eps = 1.0 # error measure ||u-u_k|| tol = 1.0e-20 # tolerance iter = 0 # iteration counter maxiter = 50 # maximum number of iterations allowed while eps > tol and iter < maxiter: iter += 1 df.solve(a == L, u, bcs) diff = u.vector().array() - u_k.vector().array() eps = np.linalg.norm(diff, ord=np.Inf) print 'iter=%d: norm=%g' % (iter, eps) u_k.assign(u) # update for next iteration j = df.project(-sigma(u)*df.grad(u), VV) return u, j, compute_flux(j, mesh2d)
def __eval_fexpl(self,u,t): """ Helper routine to evaluate the explicit part of the RHS Args: u: current values (not used here) t: current time Returns: explicit part of RHS """ A = 1.0*self.K b = self.apply_mass_matrix(u) psi = fenics_mesh(self.V) df.solve(A,psi.values.vector(),b.values.vector()) fexpl = fenics_mesh(self.V) fexpl.values = df.project(df.Dx(psi.values,1)*df.Dx(u.values,0) - df.Dx(psi.values,0)*df.Dx(u.values,1),self.V) return fexpl
def __init__(self, config, feasible_area, attraction_center): ''' Generates the inequality constraints to enforce the turbines in the feasible area. If the turbine is outside the domain, the constraints is equal to the distance between the turbine and the attraction center. ''' self.config = config self.feasible_area = feasible_area # Compute the gradient of the feasible area fs = dolfin.FunctionSpace(feasible_area.function_space().mesh(), "DG", feasible_area.function_space().ufl_element().degree() - 1) feasible_area_grad = (dolfin.Function(fs), dolfin.Function(fs)) t = dolfin.TestFunction(fs) info_blue("Solving for gradient of feasible area") for i in range(2): form = dolfin.inner(feasible_area_grad[i], t) * dolfin.dx - dolfin.inner(feasible_area.dx(i), t) * dolfin.dx dolfin.solve(form == 0, feasible_area_grad[i]) self.feasible_area_grad = feasible_area_grad self.attraction_center = attraction_center
def compute_direct_sample_solution(pde, RV_samples, coeff_field, A, maxm, proj_basis, cache=None): try: A0 = cache.A A_m = cache.A_m b = cache.b logger.debug("compute_direct_sample_solution: CACHE USED") print "CACHE USED" except AttributeError: with timing(msg="direct_sample_sol: compute A_0, b", logfunc=logger.info): a = coeff_field.mean_func A0 = pde.assemble_lhs(basis=proj_basis, coeff=a, withDirichletBC=False) b = pde.assemble_rhs(basis=proj_basis, coeff=a, withDirichletBC=False) A_m = [None] * maxm logger.debug("compute_direct_sample_solution: CACHE NOT USED") print "CACHE NOT USED" if cache is not None: cache.A = A0 cache.A_m = A_m cache.b = b with timing(msg="direct_sample_sol: compute A_m", logfunc=logger.info): A = A0.copy() for m in range(maxm): if A_m[m] is None: a_m = coeff_field[m][0] A_m[m] = pde.assemble_lhs(basis=proj_basis, coeff=a_m, withDirichletBC=False) A += RV_samples[m] * A_m[m] with timing(msg="direct_sample_sol: apply BCs", logfunc=logger.info): A, b = pde.apply_dirichlet_bc(proj_basis._fefs, A, b) with timing(msg="direct_sample_sol: solve linear system", logfunc=logger.info): X = 0 * b logger.info("compute_direct_sample_solution with %i dofs" % b.size()) solve(A, X, b) return FEniCSVector(Function(proj_basis._fefs, X))
F = u * v * dx + dt * dot(grad(u), grad(v)) * dx - (u_n + dt * f) * v * dx a, L = lhs(F), rhs(F) # Time-stepping u = Function(V) t = 0 for n in range(num_steps): # Update current time t += dt u_D.t = t # Compute solution solve(a == L, u, bc) # Plot solution # plot(u) # axi.triplot(u) sol_file << u # Compute error at vertices # u_e = interpolate(u_D, V) # error = np.abs(u_e.vector().array() - u.vector().array()).max() # print('t = %.2f: error = %.3g' % (t, error)) # Update previous solution u_n.assign(u) # Compute error in L2 norm
try: # If the solvers don't converge, reduce the time step and try again. bmelt = -20.0 bdot = df.conditional(df.gt(H, np.abs(bmelt)), bmelt, -H) * (1 - grounded) P = None if climate in "ltop": P = get_adot_from_orog_precip(ltop_constants) adot.vector().set_local(P) print(t, dt_float, H0.vector().max(), df.assemble(h_s0 * df.dx)) assigner_s.assign(T, [B0, Qs0, h_s0, h_s_0, h_eff0]) assigner_g.assign(U, [ubar0, udef0, H0, H0_]) # Solve for water flux df.solve(A_Qw == b_Qw, Qw) # Solve for sediment variables print("solving sed") sed_solver = df.NonlinearVariationalSolver(sed_problem) sed_solver.parameters["nonlinear_solver"] = "newton" sed_solver.parameters["newton_solver"]["relative_tolerance"] = 1e-2 sed_solver.parameters["newton_solver"]["absolute_tolerance"] = 1e-2 sed_solver.parameters["newton_solver"][ "error_on_nonconvergence"] = True sed_solver.parameters["newton_solver"]["linear_solver"] = "gmres" sed_solver.parameters["newton_solver"]["maximum_iterations"] = 10 sed_solver.parameters["newton_solver"]["report"] = True sed_solver.parameters["newton_solver"]["relaxation_parameter"] = 0.7 sed_solver.parameters["newton_solver"]["krylov_solver"][
def solve(tstep, w_, w_1, w_tmp, solvers, enable_PF, enable_EC, enable_NS, **namespace): """ Solve equations. """ timer_outer = df.Timer("Solve system") for subproblem, enable in zip(["PF", "EC"], [enable_PF, enable_EC]): timer_inner = df.Timer("Solve subproblem " + subproblem) df.mpi_comm_world().barrier() solvers[subproblem].solve() timer_inner.stop() if enable_NS: # timer = df.Timer("NS: Assemble matrices") # A1 = df.assemble(solvers["NSu"]["a1"]) # A2 = df.assemble(solvers["NSp"]["a2"]) # A3 = df.assemble(solvers["NSu"]["a3"]) # timer.stop() # timer = df.Timer("NS: Apply BCs 1") # [bc.apply(A1) for bc in solvers["NSu"]["bcs"]] # [bc.apply(A2) for bc in solvers["NSp"]["bcs"]] # timer.stop() du = np.array([1e9]) tol = 1e-6 max_num_iterations = 1 i_iter = 0 Fu = solvers["NSu"]["Fu"] bcs_u = solvers["NSu"]["bcs"] while du > tol and i_iter < max_num_iterations: print du[0] i_iter += 1 du[0] = 0. # Step 1: Tentative velocity timer = df.Timer("NS: Tentative velocity") # b1 = df.assemble(solvers["NSu"]["L1"]) # [bc.apply(b1) for bc in solvers["NSu"]["bcs"]] # df.solve(A1, w_["NSu"].vector(), b1) w_tmp["NSu"].vector()[:] = w_["NSu"].vector() # A, L = df.system(Fu) # df.solve(A == L, w_["NSu"], bcs_u) df.solve(df.lhs(Fu) == df.rhs(Fu), w_["NSu"], bcs_u) du[0] += df.norm(w_["NSu"].vector() - w_tmp["NSu"].vector()) timer.stop() # Step 2: Pressure correction timer = df.Timer("NS: Pressure correction") # b2 = df.assemble(solvers["NSp"]["L2"]) # [bc.apply(b2) for bc in solvers["NSp"]["bcs"]] # df.solve(A2, w_["NSp"].vector(), b2) Fp = solvers["NSp"]["Fp"] bcs_p = solvers["NSp"]["bcs"] df.solve(df.lhs(Fp) == df.rhs(Fp), w_["NSp"], bcs_p) w_1["NSp"].assign(w_["NSp"]) timer.stop() # Step 3: Velocity correction timer = df.Timer("NS: Velocity correction") # b3 = df.assemble(solvers["NSu"]["L3"]) # df.solve(A3, w_["NSu"].vector(), b3) Fu_corr = solvers["NSu"]["Fu_corr"] df.solve(df.lhs(Fu_corr) == df.rhs(Fu_corr), w_["NSu"], bcs_u) timer.stop() timer_outer.stop()
deltaH*rob*Kinetic_oxy(u_Tn)*u_A*u_B*(pow(R*u_T, 2)/Uc1)*v_T*dx + \ (rof*cpf*Vz)*(u_T - T_in)*v_T*ds_in + \ hw*(u_T - Twall)*v_T*ds_wall F = F_A + F_B + F_C + F_T for n in range(num_steps): print('{} out of {}'.format(n, num_steps)) t += delta_t # Update current time # Solve variational problem for time step solve(F == 0, u, solver_parameters={ "newton_solver": { "relative_tolerance": 1e-6 }, "newton_solver": { "maximum_iterations": 60 } }) print('solver done') # Save solution to files for visualization and postprocessing(HDF5) _u_A, _u_B, _u_C, _u_T = u_n.split() _u_D = _u_C * 3 if Writting_xdmf: xdmffile_A.write(_u_A, t) xdmffile_B.write(_u_B, t) xdmffile_C.write(_u_C, t)
def compute_pressure( P, p0, mu, ui, u, my_dx, p_bcs=None, rotational_form=False, tol=1.0e-10, verbose=True, ): """Solve the pressure Poisson equation .. math:: \\begin{align} -\\frac{1}{r} \\div(r \\nabla (p_1-p_0)) = -\\frac{1}{r} \\div(r u),\\\\ \\text{(with boundary conditions)}, \\end{align} for :math:`\\nabla p = u`. The pressure correction is based on the update formula .. math:: \\frac{\\rho}{dt} (u_{n+1}-u^*) + \\begin{pmatrix} \\text{d}\\phi/\\text{d}r\\\\ \\text{d}\\phi/\\text{d}z\\\\ \\frac{1}{r} \\text{d}\\phi/\\text{d}\\theta \\end{pmatrix} = 0 with :math:`\\phi = p_{n+1} - p^*` and .. math:: \\frac{1}{r} \\frac{\\text{d}}{\\text{d}r} (r u_r^{(n+1)}) + \\frac{\\text{d}}{\\text{d}z} (u_z^{(n+1)}) + \\frac{1}{r} \\frac{\\text{d}}{\\text{d}\\theta} (u_{\\theta}^{(n+1)}) = 0 With the assumption that u does not change in the direction :math:`\\theta`, one derives .. math:: - \\frac{1}{r} \\div(r \\nabla \\phi) = \\frac{1}{r} \\frac{\\rho}{dt} \\div(r (u_{n+1} - u^*))\\\\ - \\frac{1}{r} \\langle n, r \\nabla \\phi\\rangle = \\frac{1}{r} \\frac{\\rho}{dt} \\langle n, r (u_{n+1} - u^*)\\rangle In its weak form, this is .. math:: \\int r \\langle\\nabla\\phi, \\nabla q\\rangle \\,2 \\pi = - \\frac{\\rho}{dt} \\int \\div(r u^*) q \\, 2 \\pi - \\frac{\\rho}{dt} \\int_{\\Gamma} \\langle n, r (u_{n+1}-u^*)\\rangle q \\, 2\\pi. (The terms :math:`1/r` cancel with the volume elements :math:`2\\pi r`.) If the Dirichlet boundary conditions are applied to both :math:`u^*` and :math:`u_n` (the latter in the velocity correction step), the boundary integral vanishes. If no Dirichlet conditions are given (which is the default case), the system has no unique solution; one eigenvalue is 0. This however, does not hurt CG convergence if the system is consistent, cf. :cite:`vdV03`. And indeed it is consistent if and only if .. math:: \\int_\\Gamma r \\langle n, u\\rangle = 0. This condition makes clear that for incompressible Navier-Stokes, one either needs to make sure that inflow and outflow always add up to 0, or one has to specify pressure boundary conditions. Note that, when using a multigrid preconditioner as is done here, the coarse solver must be chosen such that it preserves the nullspace of the problem. """ W = ui.function_space() r = SpatialCoordinate(W.mesh())[0] p = TrialFunction(P) q = TestFunction(P) a2 = dot(r * grad(p), grad(q)) * 2 * pi * my_dx # The boundary conditions # n.(p1-p0) = 0 # are implicitly included. # # L2 = -div(r*u) * q * 2*pi*my_dx div_u = 1 / r * (r * u[0]).dx(0) + u[1].dx(1) L2 = -div_u * q * 2 * pi * r * my_dx if p0: L2 += r * dot(grad(p0), grad(q)) * 2 * pi * my_dx # In the Cartesian variant of the rotational form, one makes use of the # fact that # # curl(curl(u)) = grad(div(u)) - div(grad(u)). # # The same equation holds true in cylindrical form. Hence, to get the # rotational form of the splitting scheme, we need to # # rotational form if rotational_form: # If there is no dependence of the angular coordinate, what is # div(grad(div(u))) in Cartesian coordinates becomes # # 1/r div(r * grad(1/r div(r*u))) # # in cylindrical coordinates (div and grad are in cylindrical # coordinates). Unfortunately, we cannot write it down that # compactly since u_phi is in the game. # When using P2 elements, this value will be 0 anyways. div_ui = 1 / r * (r * ui[0]).dx(0) + ui[1].dx(1) grad_div_ui = as_vector((div_ui.dx(0), div_ui.dx(1))) L2 -= r * mu * dot(grad_div_ui, grad(q)) * 2 * pi * my_dx # div_grad_div_ui = 1/r * (r * grad_div_ui[0]).dx(0) \ # + (grad_div_ui[1]).dx(1) # L2 += mu * div_grad_div_ui * q * 2*pi*r*dx # n = FacetNormal(Q.mesh()) # L2 -= mu * (n[0] * grad_div_ui[0] + n[1] * grad_div_ui[1]) \ # * q * 2*pi*r*ds p1 = Function(P) if p_bcs: solve( a2 == L2, p1, bcs=p_bcs, solver_parameters={ "linear_solver": "iterative", "symmetric": True, "preconditioner": "hypre_amg", "krylov_solver": { "relative_tolerance": tol, "absolute_tolerance": 0.0, "maximum_iterations": 100, "monitor_convergence": verbose, }, }, ) else: # If we're dealing with a pure Neumann problem here (which is the # default case), this doesn't hurt CG if the system is consistent, # cf. :cite:`vdV03`. And indeed it is consistent if and only if # # \int_\Gamma r n.u = 0. # # This makes clear that for incompressible Navier-Stokes, one # either needs to make sure that inflow and outflow always add up # to 0, or one has to specify pressure boundary conditions. # # If the right-hand side is very small, round-off errors may impair # the consistency of the system. Make sure the system we are # solving remains consistent. A = assemble(a2) b = assemble(L2) # Assert that the system is indeed consistent. e = Function(P) e.interpolate(Constant(1.0)) evec = e.vector() evec /= norm(evec) alpha = b.inner(evec) normB = norm(b) # Assume that in every component of the vector, a round-off error # of the magnitude DOLFIN_EPS is present. This leads to the # criterion # |<b,e>| / (||b||*||e||) < DOLFIN_EPS # as a check whether to consider the system consistent up to # round-off error. # # TODO think about condition here # if abs(alpha) > normB * DOLFIN_EPS: if abs(alpha) > normB * 1.0e-12: # divu = 1 / r * (r * u[0]).dx(0) + u[1].dx(1) adivu = assemble(((r * u[0]).dx(0) + u[1].dx(1)) * 2 * pi * my_dx) info("\\int 1/r * div(r*u) * 2*pi*r = {:e}".format(adivu)) n = FacetNormal(P.mesh()) boundary_integral = assemble((n[0] * u[0] + n[1] * u[1]) * 2 * pi * r * ds) info("\\int_Gamma n.u * 2*pi*r = {:e}".format(boundary_integral)) message = ( "System not consistent! " "<b,e> = {:g}, ||b|| = {:g}, <b,e>/||b|| = {:e}.".format( alpha, normB, alpha / normB ) ) info(message) # # Plot the stuff, and project it to a finer mesh with linear # # elements for the purpose. # plot(divu, title='div(u_tentative)') # # Vp = FunctionSpace(Q.mesh(), 'CG', 2) # # Wp = MixedFunctionSpace([Vp, Vp]) # # up = project(u, Wp) # fine_mesh = Q.mesh() # for k in range(1): # fine_mesh = refine(fine_mesh) # V = FunctionSpace(fine_mesh, 'CG', 1) # W = V * V # # uplot = Function(W) # # uplot.interpolate(u) # uplot = project(u, W) # plot(uplot[0], title='u_tentative[0]') # plot(uplot[1], title='u_tentative[1]') # # plot(u, title='u_tentative') # interactive() # exit() raise RuntimeError(message) # Project out the roundoff error. b -= alpha * evec # # In principle, the ILU preconditioner isn't advised here since it # might destroy the semidefiniteness needed for CG. # # The system is consistent, but the matrix has an eigenvalue 0. # This does not harm the convergence of CG, but when # preconditioning one has to make sure that the preconditioner # preserves the kernel. ILU might destroy this (and the # semidefiniteness). With AMG, the coarse grid solves cannot be LU # then, so try Jacobi here. # <http://lists.mcs.anl.gov/pipermail/petsc-users/2012-February/012139.html> # prec = PETScPreconditioner("hypre_amg") from dolfin import PETScOptions PETScOptions.set("pc_hypre_boomeramg_relax_type_coarse", "jacobi") solver = PETScKrylovSolver("cg", prec) solver.parameters["absolute_tolerance"] = 0.0 solver.parameters["relative_tolerance"] = tol solver.parameters["maximum_iterations"] = 100 solver.parameters["monitor_convergence"] = verbose # Create solver and solve system A_petsc = as_backend_type(A) b_petsc = as_backend_type(b) p1_petsc = as_backend_type(p1.vector()) solver.set_operator(A_petsc) solver.solve(p1_petsc, b_petsc) return p1
bval = lambda lv, rv: df.Expression("({0}*(1-x[0])+{1}*(1+x[0]))/2".format( lv, rv)) bcs = df.DirichletBC(V, bval(1, -1), u0_boundary) dx = df.Measure("dx") # Define variational problem u = df.Function(V) u_x = u.dx(0) v = df.TestFunction(V) v_x = v.dx(0) mu = df.Constant(0.1) F = (mu * u_x * v_x + v * u * u_x) * dx df.solve(F == 0, u, bcs, solver_parameters={"newton_solver": { "relative_tolerance": 1e-6 }}) # plot solution df.plot(u, title="Velocity") # hold plot df.interactive()
def run_with_params(Tb, mu_value, k_s, path): run_time_init = clock() mesh = BoxMesh(Point(0.0, 0.0, 0.0), Point(mesh_width, mesh_width, mesh_height), nx, ny, nz) pbc = PeriodicBoundary() WE = VectorElement('CG', mesh.ufl_cell(), 2) SE = FiniteElement('CG', mesh.ufl_cell(), 1) WSSS = FunctionSpace(mesh, MixedElement(WE, SE, SE, SE), constrained_domain=pbc) # W = FunctionSpace(mesh, WE, constrained_domain=pbc) # S = FunctionSpace(mesh, SE, constrained_domain=pbc) W = WSSS.sub(0).collapse() S = WSSS.sub(1).collapse() temperature_vals = [27.0 + 273, Tb + 273, 1300.0 + 273, 1305.0 + 273] temp_prof = TemperatureProfile(temperature_vals, element=S.ufl_element()) mu_a = mu_value # this was taken from the Blankenbach paper, can change Ep = b / temp_prof.delta mu_bot = exp(-Ep * (temp_prof.bottom * temp_prof.delta - 1573.0) + cc) * mu_a # TODO: verify exponentiation Ra = rho_0 * alpha * g * temp_prof.delta * h**3 / (kappa_0 * mu_a) w0 = rho_0 * alpha * g * temp_prof.delta * h**2 / mu_a tau = h / w0 p0 = mu_a * w0 / h log(mu_a, mu_bot, Ra, w0, p0) slip_vx = 1.6E-09 / w0 # Non-dimensional slip_velocity = Constant((slip_vx, 0.0, 0.0)) zero_slip = Constant((0.0, 0.0, 0.0)) time_step = 3.0E11 / tau * 2 dt = Constant(time_step) t_end = 3.0E15 / tau / 5.0 # Non-dimensional times u = Function(WSSS) # Instead of TrialFunctions, we use split(u) for our non-linear problem v, p, T, Tf = split(u) v_t, p_t, T_t, Tf_t = TestFunctions(WSSS) T0 = interpolate(temp_prof, S) mu_exp = Expression( 'exp(-Ep * (T_val * dTemp - 1573.0) + cc * x[2] / mesh_height)', Ep=Ep, dTemp=temp_prof.delta, cc=cc, mesh_height=mesh_height, T_val=T0, element=S.ufl_element()) Tf0 = interpolate(temp_prof, S) mu = Function(S) v0 = Function(W) v_theta = (1.0 - theta) * v0 + theta * v T_theta = (1.0 - theta) * T0 + theta * T Tf_theta = (1.0 - theta) * Tf0 + theta * Tf # TODO: Verify forms r_v = (inner(sym(grad(v_t)), 2.0 * mu * sym(grad(v))) - div(v_t) * p - T * v_t[2]) * dx r_p = p_t * div(v) * dx heat_transfer = Constant(k_s) * (Tf_theta - T_theta) * dt r_T = ( T_t * ((T - T0) + dt * inner(v_theta, grad(T_theta))) # TODO: Inner vs dot + (dt / Ra) * inner(grad(T_t), grad(T_theta)) - T_t * heat_transfer) * dx v_melt = Function(W) z_hat = Constant((0.0, 0.0, 1.0)) # TODO: inner -> dot, take out Tf_t r_Tf = (Tf_t * ((Tf - Tf0) + dt * inner(v_melt, grad(Tf_theta))) + Tf_t * heat_transfer) * dx r = r_v + r_p + r_T + r_Tf bcv0 = DirichletBC(WSSS.sub(0), zero_slip, top) bcv1 = DirichletBC(WSSS.sub(0), slip_velocity, bottom) bcv2 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), back) bcv3 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), front) bcp0 = DirichletBC(WSSS.sub(1), Constant(0.0), bottom) bct0 = DirichletBC(WSSS.sub(2), Constant(temp_prof.surface), top) bct1 = DirichletBC(WSSS.sub(2), Constant(temp_prof.bottom), bottom) bctf1 = DirichletBC(WSSS.sub(3), Constant(temp_prof.bottom), bottom) bcs = [bcv0, bcv1, bcv2, bcv3, bcp0, bct0, bct1, bctf1] t = 0 count = 0 files = DefaultDictByKey(partial(create_xdmf, path)) while t < t_end: mu.interpolate(mu_exp) rhosolid = rho_0 * (1.0 - alpha * (T0 * temp_prof.delta - 1573.0)) deltarho = rhosolid - rho_melt # TODO: project (accuracy) vs interpolate assign( v_melt, project( v0 - darcy * (grad(p) * p0 / h - deltarho * z_hat * g) / w0, W)) # TODO: Written out one step later? # v_melt.assign(v0 - darcy * (grad(p) * p0 / h - deltarho * yvec * g) / w0) # TODO: use nP after to avoid projection? solve(r == 0, u, bcs) nV, nP, nT, nTf = u.split() # TODO: write with Tf, ... etc if count % output_every == 0: time_left(count, t_end / time_step, run_time_init) # TODO: timestep vs dt # TODO: Make sure all writes are to the same function for each time step files['T_fluid'].write(nTf, t) files['p'].write(nP, t) files['v_solid'].write(nV, t) files['T_solid'].write(nT, t) files['mu'].write(mu, t) files['v_melt'].write(v_melt, t) files['gradp'].write(project(grad(nP), W), t) files['rho'].write(project(rhosolid, S), t) files['Tf_grad'].write(project(grad(Tf), W), t) files['advect'].write(project(dt * dot(v_melt, grad(nTf))), t) files['ht'].write(project(heat_transfer, S), t) assign(T0, nT) assign(v0, nV) assign(Tf0, nTf) t += time_step count += 1 log('Case mu={}, Tb={}, k={} complete. Run time = {:.2f} minutes'.format( mu_a, Tb, k_s, (clock() - run_time_init) / 60.0))
# Loop over all layers in y-direction for j in range(ny + 1): y_.assign(ydofs[j]) L = assemble(L_) # This is ordinary matrix multiplication # np.max(np.abs(M*Vtilde[3,:] - M.array() @ Vtilde[3,:])) Mvtmp.vector().set_local(M * Vtilde[j, :]) Lfull = Mvtmp.vector() - dt[i] * L bc_Vx.apply(Lfull) # Solve problem in j'th layer solve(Sfull, vsol.vector(), Lfull) # Store solution for j'th layer vold[j, :] = vsol.vector().get_local() if storeV: Vsol[i + 1, :, :] = vold if plotSol: Z = np.abs(vold - u_numpy(trange[i], X, Y)) if plotMode == 'contour': plotViContour( X[:, ix], Y[:, ix], Z[:, ix],
def stokes_solve( up_out, mu, u_bcs, p_bcs, f, dx=dx, verbose=True, tol=1.0e-10, maxiter=1000 ): # Some initial sanity checks. assert mu > 0.0 WP = up_out.function_space() # Translate the boundary conditions into the product space. new_bcs = [] for k, bcs in enumerate([u_bcs, p_bcs]): for bc in bcs: space = bc.function_space() C = space.component() if len(C) == 0: new_bcs.append(DirichletBC(WP.sub(k), bc.value(), bc.domain_args[0])) elif len(C) == 1: new_bcs.append(DirichletBC(WP.sub(k).sub(int(C[0])), bc.value(), bc.domain_args[0])) else: raise RuntimeError('Illegal number of subspace components.') # TODO define p*=-1 and reverse sign in the end to get symmetric system? # Define variational problem (u, p) = TrialFunctions(WP) (v, q) = TestFunctions(WP) r = Expression('x[0]', degree=1, domain=WP.mesh()) print("mu = %e" % mu) # build system a = mu * inner(r * grad(u), grad(v)) * 2 * pi * dx \ - ((r * v[0]).dx(0) + (r * v[1]).dx(1)) * p * 2 * pi * dx \ + ((r * u[0]).dx(0) + (r * u[1]).dx(1)) * q * 2 * pi * dx #- div(r*v)*p* 2*pi*dx \ #+ q*div(r*u)* 2*pi*dx L = inner(f, v) * 2 * pi * r * dx A, b = assemble_system(a, L, new_bcs) mode = 'lu' if mode == 'lu': solve(A, up_out.vector(), b, 'lu') elif mode == 'gmres': # For preconditioners for the Stokes system, see # # Fast iterative solvers for discrete Stokes equations; # J. Peters, V. Reichelt, A. Reusken. # prec = mu * inner(r * grad(u), grad(v)) * 2 * pi * dx \ - p * q * 2 * pi * r * dx P, btmp = assemble_system(prec, L, new_bcs) solver = KrylovSolver('tfqmr', 'amg') #solver = KrylovSolver('gmres', 'amg') solver.set_operators(A, P) solver.parameters['monitor_convergence'] = verbose solver.parameters['report'] = verbose solver.parameters['absolute_tolerance'] = 0.0 solver.parameters['relative_tolerance'] = tol solver.parameters['maximum_iterations'] = maxiter # Solve solver.solve(up_out.vector(), b) elif mode == 'fieldsplit': raise NotImplementedError('Fieldsplit solver not yet implemented.') # For an assortment of preconditioners, see # # Performance and analysis of saddle point preconditioners # for the discrete steady-state Navier-Stokes equations; # H.C. Elman, D.J. Silvester, A.J. Wathen; # Numer. Math. (2002) 90: 665-688; # <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.145.3554>. # # Set up field split. W = SubSpace(WP, 0) P = SubSpace(WP, 1) u_dofs = W.dofmap().dofs() p_dofs = P.dofmap().dofs() prec = PETScPreconditioner() prec.set_fieldsplit([u_dofs, p_dofs], ['u', 'p']) PETScOptions.set('pc_type', 'fieldsplit') PETScOptions.set('pc_fieldsplit_type', 'additive') PETScOptions.set('fieldsplit_u_pc_type', 'lu') PETScOptions.set('fieldsplit_p_pc_type', 'jacobi') # Create Krylov solver with custom preconditioner. solver = PETScKrylovSolver('gmres', prec) solver.set_operator(A) return
def solve_regular(a, L, bcs, u, solver_parameters): if solver_parameters is None: solver_parameters = {"linear_solver": "gmres"} df.solve(a == L, u, bcs, solver_parameters=solver_parameters)
def _evaluateGlobalMixedEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree, vectorspace_type='BDM'): """Evaluation of global mixed equilibrated estimator.""" # set quadrature degree # quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"] # parameters["form_compiler"]["quadrature_degree"] = quadrature_degree # logger.debug("residual quadrature order = " + str(quadrature_degree)) # prepare numerical flux and f sigma_mu, f_mu = evaluate_numerical_flux(w, mu, coeff_field, f) # ################### # ## MIXED PROBLEM ## # ################### # get setup data for mixed problem V = w[mu]._fefunc.function_space() mesh = V.mesh() degree = element_degree(w[mu]._fefunc) # create function spaces DG0 = FunctionSpace(mesh, 'DG', 0) DG0_dofs = [DG0.dofmap().cell_dofs(c.index())[0] for c in cells(mesh)] RT = FunctionSpace(mesh, vectorspace_type, degree) W = RT * DG0 # setup boundary conditions # bcs = pde.create_dirichlet_bcs(W.sub(1)) # debug === # from dolfin import DOLFIN_EPS, DirichletBC # def boundary(x): # return x[0] < DOLFIN_EPS or x[0] > 1.0 + DOLFIN_EPS or x[1] < DOLFIN_EPS or x[1] > 1.0 + DOLFIN_EPS # bcs = [DirichletBC(W.sub(1), Constant(0.0), boundary)] # === debug # create trial and test functions (sigma, u) = TrialFunctions(W) (tau, v) = TestFunctions(W) # define variational form a_eq = (dot(sigma, tau) + div(tau) * u + div(sigma) * v) * dx L_eq = (- f_mu * v + dot(sigma_mu, tau)) * dx # compute solution w_eq = Function(W) solve(a_eq == L_eq, w_eq) (sigma_mixed, u_mixed) = w_eq.split() # ############################# # ## EQUILIBRATION ESTIMATOR ## # ############################# # evaluate error estimator dg0 = TestFunction(DG0) eta_mu = inner(sigma_mu, sigma_mu) * dg0 * dx eta_T = assemble(eta_mu, form_compiler_parameters={'quadrature_degree': quadrature_degree}) eta_T = np.array([sqrt(e) for e in eta_T]) # evaluate global error eta = sqrt(sum(i**2 for i in eta_T)) # reorder array entries for local estimators eta_T = eta_T[DG0_dofs] # restore quadrature degree # parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old return eta, FlatVector(eta_T)
# We also need to create a :py:class:`Function # <dolfin.cpp.function.Function>` to store the solution(s). The (full) # solution will be stored in ``w``, which we initialize using the mixed # function space ``W``. The actual # computation is performed by calling solve with the arguments ``a``, # ``L``, ``w`` and ``bcs``. The separate components ``u`` and ``p`` of # the solution can be extracted by calling the :py:meth:`split # <dolfin.functions.function.Function.split>` function. Here we use an # optional argument True in the split function to specify that we want a # deep copy. If no argument is given we will get a shallow copy. We want # a deep copy for further computations on the coefficient vectors:: # Compute solution w = Function(W) solve(a == L, w, bcs, petsc_options={"ksp_type": "preonly", "pc_type": "lu", "pc_factor_mat_solver_type": "mumps"}) # Split the mixed solution and collapse u = w.sub(0).collapse() p = w.sub(1).collapse() # We can calculate the :math:`L^2` norms of u and p as follows:: print("Norm of velocity coefficient vector: %.15g" % u.vector.norm()) print("Norm of pressure coefficient vector: %.15g" % p.vector.norm()) # Check pressure norm assert np.isclose(p.vector.norm(), 4147.69457577) # Finally, we can save and plot the solutions::
def test_heat_equation_fenics(): # Define problem class Heat(object): ''' u' = \\Delta u + f ''' def __init__(self, V): self.V = V u = TrialFunction(V) v = TestFunction(V) self.M = assemble(u * v * dx) self.A = assemble(-dot(grad(u), grad(v)) * dx) self.b = assemble(1.0 * v * dx) self.bcs = DirichletBC(self.V, 0.0, 'on_boundary') return # pylint: disable=unused-argument def eval_alpha_M_beta_F(self, alpha, beta, u, t): # Evaluate alpha * M * u + beta * F(u, t). uvec = u.vector() return alpha * (self.M * uvec) + beta * (self.A * uvec + self.b) def solve_alpha_M_beta_F(self, alpha, beta, b, t): # Solve alpha * M * u + beta * F(u, t) = b for u. A = alpha * self.M + beta * self.A rhs = b - beta * self.b self.bcs.apply(A, rhs) solver = KrylovSolver('gmres', 'ilu') solver.parameters['relative_tolerance'] = 1.0e-13 solver.parameters['absolute_tolerance'] = 0.0 solver.parameters['maximum_iterations'] = 100 solver.parameters['monitor_convergence'] = True solver.set_operator(A) u = Function(self.V) solver.solve(u.vector(), rhs) return u # create initial guess mesh = UnitSquareMesh(20, 20, 'crossed') V = FunctionSpace(mesh, 'CG', 1) u = TrialFunction(V) v = TestFunction(V) u0 = Function(V) solve(u * v * dx == Constant(0.0) * v * dx, u0) u1 = Function(V) u1.assign(u0) # create time stepper # stepper = parabolic.Dummy(Heat(V)) # stepper = parabolic.ExplicitEuler(Heat(V)) stepper = parabolic.ImplicitEuler(Heat(V)) # stepper = parabolic.Trapezoidal(Heat(V)) # step t = 0.0 dt = 1.0e-3 with XDMFFile('heat.xdmf') as xf: xf.write(u1, t) for _ in range(10): u1.assign(stepper.step(u0, t, dt)) u0.assign(u1) t += dt xf.write(u1, t) return
def comp_full_model_heat_dirichlet(mat_obj, mesh_obj, bc, omega, save_path): # ====== # Parameters # ====== E = mat_obj.E rho = mat_obj.rho nu = mat_obj.nu mesh = mesh_obj.create() Rext = mesh_obj.Rext Rint = mesh_obj.Rint G = 1 # FAKE # ====== # Thermal load # ====== alpha = 1 T = 1 # ====== # Thickness profile # ====== h = 1 omega_velo = 1 #Fake # ====== # markers # ====== cell_markers, facet_markers = define_markers(mesh, Rext, Rint) # rename x[0], x[1] by x, y x, y = df.SpatialCoordinate(mesh) dim = mesh.topology().dim() coord = mesh.coordinates() # ====== # Create function space # ====== V = df.FunctionSpace(mesh, "CG", 1) degree = 1 fi_ele = FiniteElement("CG", mesh.ufl_cell(), degree) # CG: Continuous Galerkin vec_ele = VectorElement("CG", mesh.ufl_cell(), degree) total_ele = MixedElement([fi_ele, fi_ele]) W = df.FunctionSpace(mesh, total_ele) # ====== # Define boundary condition # ====== if bc == 'CC': u_Dbc = [ df.DirichletBC(W.sub(0), df.Constant(0.0), facet_markers, bc) for bc in [1, 2] ] v_Dbc = [ df.DirichletBC(W.sub(1), df.Constant(0.0), facet_markers, bc) for bc in [1, 2] ] elif bc == 'CF': u_Dbc = [ df.DirichletBC(W.sub(0), df.Constant(0.0), facet_markers, bc) for bc in [2] ] v_Dbc = [ df.DirichletBC(W.sub(1), df.Constant(0.0), facet_markers, bc) for bc in [2] ] Dbc = u_Dbc + v_Dbc # ====== # Define functions # ====== dunks = df.TrialFunction(W) tunks = df.TestFunction(W) unks = df.Function(W, name='displacement') # u(x,y): displacement in radial direction # v(x,y): displacement in tangential direction (du, dv) = df.split(dunks) (tu, tv) = df.split(tunks) (u, v) = df.split(unks) # ====== # Define variable # ====== class THETA(df.UserExpression): def eval(self, values, x): values[0] = math.atan2(x[1], x[0]) def value_shape(self): #return (1,) # vector return () # scalar theta = THETA(degree=1) #theta_int = df.interpolate(theta, df.FunctionSpace(mesh, "DG", 0)) #df.File(save_path + 'theta.pvd') << theta_int class RADIUS(df.UserExpression): def eval(self, values, x): values[0] = df.sqrt(x[0] * x[0] + x[1] * x[1]) def value_shape(self): return () # scalar r = RADIUS(degree=1) # ====== # Define week form # ====== def d_dr(du): return 1.0 / r * (x * df.Dx(du, 0) + y * df.Dx(du, 1)) def d_dtheta(du): return -y * df.Dx(du, 0) + x * df.Dx(du, 1) # strain radial def epsilon_r(du): return d_dr(du) # strain circumferential def epsilon_theta(du, dv): return du / r + 1.0 / r * d_dtheta(dv) # shear srain component def gamma_rtheta(du, dv): return d_dr(dv) - dv / r + 1.0 / r * d_dtheta(du) epsilon_r(du) epsilon_theta(du, dv) gamma_rtheta(du, dv) ''' S = [[1.0/E, -nu/E, 0.0], [-nu/E, 1.0/E, 0.0], [0.0, 0.0, 1.0/G]] C = df.inv(df.as_matrix(S)) eps_vector = df.as_vector([epsilon_r(du), epsilon_theta(du, dv), gamma_rtheta(du, dv)]) sig_vector = dot(C, eps_vector) ''' def sigma_r(du, dv): return E / (1.0 - nu**2) * ((epsilon_r(du) - alpha * T) + nu * (epsilon_theta(du, dv) - alpha * T)) def sigma_theta(du, dv): return E / (1.0 - nu**2) * ((epsilon_theta(du, dv) - alpha * T) + nu * (epsilon_r(du) - alpha * T)) def tau_rtheta(du, dv): return G * gamma_rtheta(du, dv) # week form dF_sigma = -sigma_r(du, dv) * h * r * d_dr(tu) * dx dF_sigma = dF_sigma - tau_rtheta(du, dv) * h * d_dtheta(tu) * dx dF_sigma = dF_sigma - sigma_theta(du, dv) * h * tu * dx dF_sigma = dF_sigma + rho * omega**2 * r**2 * h * tu * dx dF_tau = -tau_rtheta(du, dv) * h * r * d_dr(tv) * dx dF_tau = dF_tau - sigma_theta(du, dv) * h * d_dtheta(tv) * dx dF_tau = dF_tau + tau_rtheta(du, dv) * h * tv * dx dF_tau = dF_tau + rho * omega_velo * r**2 * h * tv * dx dF = dF_sigma + dF_tau # residual F = df.action(dF, unks) # solve df.solve(F == 0, unks, Dbc) # splits solution _u, _v = unks.split(True) # displacement df.File(save_path + 'u.pvd') << _u df.File(save_path + 'v.pvd') << _v # ====== # Analyze # ====== # compute stresses sigma_r_pro = df.project(sigma_r(_u, _v), V) sigma_r_pro.rename('sigma_r [Pa]', 'sigma_r [Pa]') df.File(save_path + 'sigma_r.pvd') << sigma_r_pro sigma_theta_pro = df.project(sigma_theta(_u, _v), V) sigma_theta_pro.rename('sigma_theta [Pa]', 'sigma_theta [Pa]') df.File(save_path + 'sigma_theta.pvd') << sigma_theta_pro # compute von Mises stress def von_mises_stress(sigma_r, sigma_theta): return df.sqrt(sigma_r**2 + sigma_theta**2 - sigma_r * sigma_theta) von_stress_pro = df.project( von_mises_stress(sigma_r(_u, _v), sigma_theta(_u, _v)), V) von_stress_pro.rename('von Mises Stress [Pa]', 'von Mises Stress [Pa]') df.File(save_path + 'von_mises_stress.pvd') << von_stress_pro tau_rtheta_pro = df.project(tau_rtheta(_u, _v), V) tau_rtheta_pro.rename('tau_rtheta [Pa]', 'tau_rtheta [Pa]') df.File(save_path + 'tau_rtheta.pvd') << tau_rtheta_pro # save results to h5 rfile = df.HDF5File(mesh.mpi_comm(), save_path + 'results.h5', "w") rfile.write(_u, "u") rfile.write(_v, "v") rfile.write(sigma_r_pro, "sigma_r") rfile.write(sigma_theta_pro, "sigma_theta") rfile.write(von_stress_pro, "von_mises_stress") rfile.write(tau_rtheta_pro, "tau_rtheta_pro") rfile.close()
def solve_linear(self, d_outputs, d_residuals, mode): linear_solver_ = self.options['linear_solver_'] pde_problem = self.options['pde_problem'] state_name = self.options['state_name'] state_function = pde_problem.states_dict[state_name]['function'] for argument_name, argument_function in iteritems(self.argument_functions_dict): density_func = argument_function mesh = state_function.function_space().mesh() sub_domains = df.MeshFunction('size_t', mesh, mesh.topology().dim() - 1) upper_edge = TractionBoundary() upper_edge.mark(sub_domains, 6) dss = df.Measure('ds')(subdomain_data=sub_domains) tractionBC = dss(6) residual_form = get_residual_form( state_function, df.TestFunction(state_function.function_space()), density_func, density_func.function_space(), tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1)), int(self.itr) ) A, _ = df.assemble_system(self.derivative_form, - residual_form, pde_problem.bcs_list) if linear_solver_=='fenics_direct': rhs_ = df.Function(state_function.function_space()) dR = df.Function(state_function.function_space()) rhs_.vector().set_local(d_outputs[state_name]) for bc in pde_problem.bcs_list: bc.apply(A) Am = df.as_backend_type(A).mat() ATm = Am.transpose() AT = df.PETScMatrix(ATm) df.solve(AT,dR.vector(),rhs_.vector()) d_residuals[state_name] = dR.vector().get_local() elif linear_solver_=='scipy_splu': for bc in pde_problem.bcs_list: bc.apply(A) Am = df.as_backend_type(A).mat() ATm = Am.transpose() ATm_csr = csr_matrix(ATm.getValuesCSR()[::-1], shape=Am.size) lu = splu(ATm_csr.tocsc()) d_residuals[state_name] = lu.solve(d_outputs[state_name],trans='T') elif linear_solver_=='fenics_Krylov': rhs_ = df.Function(state_function.function_space()) dR = df.Function(state_function.function_space()) rhs_.vector().set_local(d_outputs[state_name]) for bc in pde_problem.bcs_list: bc.apply(A) Am = df.as_backend_type(A).mat() ATm = Am.transpose() AT = df.PETScMatrix(ATm) solver = df.KrylovSolver('gmres', 'ilu') prm = solver.parameters prm["maximum_iterations"]=1000000 prm["divergence_limit"] = 1e2 solver.solve(AT,dR.vector(),rhs_.vector()) d_residuals[state_name] = dR.vector().get_local() elif linear_solver_=='petsc_gmres_ilu': ksp = PETSc.KSP().create() ksp.setType(PETSc.KSP.Type.GMRES) ksp.setTolerances(rtol=5e-11) for bc in pde_problem.bcs_list: bc.apply(A) Am = df.as_backend_type(A).mat() ksp.setOperators(Am) ksp.setFromOptions() pc = ksp.getPC() pc.setType("ilu") size = state_function.function_space().dim() dR = PETSc.Vec().create() dR.setSizes(size) dR.setType('seq') dR.setValues(range(size), d_residuals[state_name]) dR.setUp() du = PETSc.Vec().create() du.setSizes(size) du.setType('seq') du.setValues(range(size), d_outputs[state_name]) du.setUp() if mode == 'fwd': ksp.solve(dR,du) d_outputs[state_name] = du.getValues(range(size)) else: ksp.solveTranspose(du,dR) d_residuals[state_name] = dR.getValues(range(size))
def discretize(DIM, N, ORDER): # ### problem definition import dolfin as df if DIM == 2: mesh = df.UnitSquareMesh(N, N) elif DIM == 3: mesh = df.UnitCubeMesh(N, N, N) else: raise NotImplementedError V = df.FunctionSpace(mesh, "CG", ORDER) g = df.Constant(1.0) c = df.Constant(1.) class DirichletBoundary(df.SubDomain): def inside(self, x, on_boundary): return abs(x[0] - 1.0) < df.DOLFIN_EPS and on_boundary db = DirichletBoundary() bc = df.DirichletBC(V, g, db) u = df.Function(V) v = df.TestFunction(V) f = df.Expression("x[0]*sin(x[1])", degree=2) F = df.inner( (1 + c * u**2) * df.grad(u), df.grad(v)) * df.dx - f * v * df.dx df.solve(F == 0, u, bc, solver_parameters={"newton_solver": { "relative_tolerance": 1e-6 }}) # ### pyMOR wrapping from pymor.bindings.fenics import FenicsVectorSpace, FenicsOperator, FenicsVisualizer from pymor.models.basic import StationaryModel from pymor.operators.constructions import VectorOperator from pymor.parameters.spaces import CubicParameterSpace space = FenicsVectorSpace(V) op = FenicsOperator( F, space, space, u, (bc, ), parameter_setter=lambda mu: c.assign(float(mu['c'])), parameter_type={'c': ()}, solver_options={'inverse': { 'type': 'newton', 'rtol': 1e-6 }}) rhs = VectorOperator(op.range.zeros()) fom = StationaryModel(op, rhs, visualizer=FenicsVisualizer(space), parameter_space=CubicParameterSpace({'c': ()}, 0., 1000.)) return fom
def solve_nonlinear(self, inputs, outputs): pde_problem = self.options['pde_problem'] state_name = self.options['state_name'] problem_type = self.options['problem_type'] visualization = self.options['visualization'] state_function = pde_problem.states_dict[state_name]['function'] for argument_name, argument_function in iteritems(self.argument_functions_dict): density_func = argument_function mesh = state_function.function_space().mesh() sub_domains = df.MeshFunction('size_t', mesh, mesh.topology().dim() - 1) upper_edge = TractionBoundary() upper_edge.mark(sub_domains, 6) dss = df.Measure('ds')(subdomain_data=sub_domains) tractionBC = dss(6) self.itr = self.itr + 1 state_function = pde_problem.states_dict[state_name]['function'] residual_form = get_residual_form( state_function, df.TestFunction(state_function.function_space()), density_func, density_func.function_space(), tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1)), int(self.itr) ) self._set_values(inputs, outputs) self.derivative_form = df.derivative(residual_form, state_function) df.set_log_level(df.LogLevel.ERROR) df.set_log_active(True) # df.solve(residual_form==0, state_function, bcs=pde_problem.bcs_list, J=self.derivative_form) if problem_type == 'linear_problem': df.solve(residual_form==0, state_function, bcs=pde_problem.bcs_list, J=self.derivative_form, solver_parameters={"newton_solver":{"maximum_iterations":60, "error_on_nonconvergence":False}}) elif problem_type == 'nonlinear_problem': problem = df.NonlinearVariationalProblem(residual_form, state_function, pde_problem.bcs_list, self.derivative_form) solver = df.NonlinearVariationalSolver(problem) solver.parameters['nonlinear_solver_']='snes' solver.parameters["snes_solver"]["line_search"] = 'bt' solver.parameters["snes_solver"]["linear_solver_"]='mumps' # "cg" "gmres" solver.parameters["snes_solver"]["maximum_iterations"]=500 solver.parameters["snes_solver"]["relative_tolerance"]=5e-13 solver.parameters["snes_solver"]["absolute_tolerance"]=5e-13 # solver.parameters["snes_solver"]["linear_solver_"]["maximum_iterations"]=1000 solver.parameters["snes_solver"]["error_on_nonconvergence"] = False solver.solve() elif problem_type == 'nonlinear_problem_load_stepping': num_steps = 3 state_function.vector().set_local(np.zeros((state_function.function_space().dim()))) for i in range(num_steps): v = df.TestFunction(state_function.function_space()) if i < (num_steps-1): residual_form = get_residual_form( state_function, v, density_func, density_func.function_space(), tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1/num_steps*(i+1))), int(self.itr) ) else: residual_form = get_residual_form( state_function, v, density_func, density_func.function_space(), tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1/num_steps*(i+1))), int(self.itr) ) problem = df.NonlinearVariationalProblem(residual_form, state_function, pde_problem.bcs_list, self.derivative_form) solver = df.NonlinearVariationalSolver(problem) solver.parameters['nonlinear_solver_']='snes' solver.parameters["snes_solver"]["line_search"] = 'bt' solver.parameters["snes_solver"]["linear_solver_"]='mumps' # "cg" "gmres" solver.parameters["snes_solver"]["maximum_iterations"]=500 solver.parameters["snes_solver"]["relative_tolerance"]=1e-15 solver.parameters["snes_solver"]["absolute_tolerance"]=1e-15 # solver.parameters["snes_solver"]["linear_solver_"]["maximum_iterations"]=1000 solver.parameters["snes_solver"]["error_on_nonconvergence"] = False solver.solve() # option to store the visualization results if visualization == 'True': for argument_name, argument_function in iteritems(self.argument_functions_dict): df.File('solutions_iterations_3d/{}_{}.pvd'.format(argument_name, self.itr)) << argument_function self.L = -residual_form self.itr = self.itr+1 outputs[state_name] = state_function.vector().get_local()
mu = fac_avg * 0.5 # Define single scale constitutive law def sigma(u): return lamb * ufl.nabla_div(u) * df.Identity(2) + 2 * mu * symgrad(u) # Define single scale variational problem a_single_scale = df.inner(sigma(uh), df.grad(vh)) * dx f_single_scale = df.inner(traction, vh) * ds(2) # Compute single scale solution uh_single_scale = df.Function(Uh) df.solve(a_single_scale == f_single_scale, uh_single_scale, bcs=bcL, solver_parameters={"linear_solver": "mumps"}) # Save single scale solution in XDMF format file_results = df.XDMFFile("bar_single_scale.xdmf") file_results.write(uh_single_scale) # ~~~ PART II: multiscale constitutive law ~~~ # # Define the mesh of the micro model. Note that such mesh is associated to the current processor only # and not partitioned across multiple processors. Nx_micro = Ny_micro = 50 Lx_micro = Ly_micro = 1.0 mesh_micro = df.RectangleMesh(MPI.COMM_SELF, df.Point(0.0, 0.0), df.Point(Lx_micro, Ly_micro), Nx_micro, Ny_micro, "right/left")
def run_model(function_space, kappa, forcing, init_condition, dt, final_time, boundary_conditions=None, second_order_timestepping=False, exact_sol=None, velocity=None, point_sources=None): """ Use implicit euler to solve transient advection diffusion equation du/dt = grad (k* grad u) - vel*grad u + f WARNINGarningW: when point sources solution changes significantly when mesh is varied """ mesh = function_space.mesh() time_independent_boundaries = False if boundary_conditions == None: bndry_obj = dl.CompiledSubDomain("on_boundary") boundary_conditions = [['dirichlet', bndry_obj, dl.Constant(0)]] time_independent_boundaries = True num_bndrys = len(boundary_conditions) boundaries = mark_boundaries(mesh, boundary_conditions) dirichlet_bcs = collect_dirichlet_boundaries(function_space, boundary_conditions, boundaries) # To express integrals over the boundary parts using ds(i), we must first # redefine the measure ds in terms of our boundary markers: ds = dl.Measure('ds', domain=mesh, subdomain_data=boundaries) dx = dl.Measure('dx', domain=mesh) # Variational problem at each time u = dl.TrialFunction(function_space) v = dl.TestFunction(function_space) # Previous solution if hasattr(init_condition, 't'): assert init_condition.t == 0 u_1 = dl.interpolate(init_condition, function_space) if not second_order_timestepping: theta = 1 else: theta = 0.5 if hasattr(forcing, 't'): forcing_1 = copy_expression(forcing) else: forcing_1 = forcing def steady_state_form(u, v, f): F = kappa * dl.inner(dl.grad(u), dl.grad(v)) * dx F -= f * v * dx if velocity is not None: F += dl.dot(velocity, dl.grad(u)) * v * dx return F F = u*v*dx-u_1*v*dx + dt*theta*steady_state_form(u,v,forcing) + \ dt*(1.-theta)*steady_state_form(u_1,v,forcing_1) a, L = dl.lhs(F), dl.rhs(F) # a = u*v*dx + theta*dt*kappa*dl.inner(dl.grad(u), dl.grad(v))*dx # L = (u_1 + dt*theta*forcing)*v*dx # if velocity is not None: # a += theta*dt*v*dl.dot(velocity,dl.grad(u))*dx # if second_order_timestepping: # L -= (1-theta)*dt*dl.inner(kappa*dl.grad(u_1), dl.grad(v))*dx # L += (1-theta)*dt*forcing_1*v*dx # if velocity is not None: # L -= (1-theta)*dt*(v*dl.dot(velocity,dl.grad(u_1)))*dx beta_1_list = [] alpha_1_list = [] for ii in range(num_bndrys): if (boundary_conditions[ii][0] == 'robin'): alpha = boundary_conditions[ii][3] a += theta * dt * alpha * u * v * ds(ii) if second_order_timestepping: if hasattr(alpha, 't'): alpha_1 = copy_expression(alpha) alpha_1_list.append(alpha_1) else: alpha_1 = alpha L -= (1 - theta) * dt * alpha_1 * u_1 * v * ds(ii) if ((boundary_conditions[ii][0] == 'robin') or (boundary_conditions[ii][0] == 'neumann')): beta = boundary_conditions[ii][2] L -= theta * dt * beta * v * ds(ii) if second_order_timestepping: if hasattr(beta, 't'): beta_1 = copy_expression(beta) beta_1_list.append(beta_1) else: # boundary condition is constant in time beta_1 = beta L -= (1 - theta) * dt * beta_1 * v * ds(ii) if time_independent_boundaries: # TODO this can be used if dirichlet and robin conditions are not # time dependent. A = dl.assemble(a) for bc in dirichlet_bcs: bc.apply(A) solver = dl.LUSolver(A) #solver.parameters["reuse_factorization"] = True else: solver = None u_2 = dl.Function(function_space) u_2.assign(u_1) t = 0.0 dt_tol = 1e-12 n_time_steps = 0 while t < final_time - dt_tol: # Update current time t += dt forcing.t = t forcing_1.t = t - dt # set current time for time varying boundary conditions for ii in range(num_bndrys): if hasattr(boundary_conditions[ii][2], 't'): boundary_conditions[ii][2].t = t # set previous time for time varying boundary conditions when # using second order timestepping. lists will be empty if using # first order timestepping for jj in range(len(beta_1_list)): beta_1_list[jj].t = t - dt for jj in range(len(alpha_1_list)): alpha_1_list[jj].t = t - dt #A, b = dl.assemble_system(a, L, dirichlet_bcs) #for bc in dirichlet_bcs: # bc.apply(A,b) if boundary_conditions is not None: A = dl.assemble(a) for bc in dirichlet_bcs: bc.apply(A) b = dl.assemble(L) for bc in dirichlet_bcs: bc.apply(b) if point_sources is not None: ps_list = [] for ii in range(len(point_sources)): point, expr = point_sources[ii] ps_list.append((dl.Point(point[0], point[1]), expr(t))) ps = dl.PointSource(function_space, ps_list) ps.apply(b) if solver is None: dl.solve(A, u_2.vector(), b) else: solver.solve(u_2.vector(), b) #print ("t =", t, "end t=", final_time) # Update previous solution u_1.assign(u_2) # import matplotlib.pyplot as plt # plt.subplot(131) # pp=dl.plot(u_1) # plt.subplot(132) # dl.plot(forcing,mesh=mesh) # plt.subplot(133) # dl.plot(forcing_1,mesh=mesh) # plt.colorbar(pp) # plt.show() # compute error if exact_sol is not None: exact_sol.t = t error = dl.errornorm(exact_sol, u_2) print('t = %.2f: error = %.3g' % (t, error)) #dl.plot(exact_sol,mesh=mesh) #plt.show() t = min(t, final_time) n_time_steps += 1 #print ("t =", t, "end t=", final_time,"# time steps", n_time_steps) return u_2
# Galerkin Least Square stabilization term stb_gls = L(tu) * tau * Res(du) * df.dx # Weak form dres = r * sigma_r(du) * epsilon_r(tu) * df.dx dres = dres + sigma_theta(du) * tu * df.dx dres = dres - Fc * tu * df.dx dres = dres + 1e9 * stb_gls # residual res = df.action(dres, u) # solve df.solve(res == 0, u, bc) # displacement df.File(save_path + 'displacement.pvd') << u # compute stresses sigma_r_pro = df.project(sigma_r(u), V) sigma_r_pro.rename('sigma_r [Pa]', 'sigma_r [Pa]') df.File(save_path + 'sigma_r.pvd') << sigma_r_pro sigma_theta_pro = df.project(sigma_theta(u), V) sigma_theta_pro.rename('sigma_theta [Pa]', 'sigma_theta [Pa]') df.File(save_path + 'sigma_theta.pvd') << sigma_theta_pro # compute von Mises stress
def burgers2d(run, nu, ngx, ngy, dt, T, ngx_out, ngy_out, save_dir, save_every, save_pvd=False, save_vector=False, plot=False, order=4): """simulate 2D Burgers' equation https://www.firedrakeproject.org/demos/burgers.py.html Args: run (int): # run nu (float): viscosity ngx (int): # grid in x axis ngy (int): dt (float): time step for simulation T (float): simulation time from 0 to T ngx_out (int): output # grid in x axis ngy_out (int): output # grid in y axis save_dir (str): runs folder order (int): order for sampling initial U save_every (int): save frequency in terms of # dt save_pvd (bool): save the field as vtk file for paraview save_vector (bool): save fenics field vector for later operation plot (bool): plot fields """ assert not (save_pvd and save_vector), 'wasting memory to save pvd & vector' save_dir = save_dir + f'/run{run}' mkdirs(save_dir) mesh = df.UnitSquareMesh(ngx-1, ngy-1) mesh_out = df.UnitSquareMesh(ngx_out-1, ngy_out-1) V = df.VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=PeriodicBoundary()) Vout = df.VectorFunctionSpace(mesh_out, 'CG', 1, constrained_domain=PeriodicBoundary()) # initial vector field u0, lam, c = init_field_fenics(mesh, V, order=order, seed=run) np.savez(save_dir + '/init_lam_c.npz', lam=lam, c=c) u = df.Function(V) u_old = df.Function(V) v = df.TestFunction(V) u = df.project(u0, V) u_old.assign(u) # backward Euler F = (df.inner((u - u_old)/dt, v) \ + df.inner(df.dot(u, df.nabla_grad(u)), v) \ + nu*df.inner(df.grad(u), df.grad(v)))*df.dx t = 0 k = 0 vtkfile = df.File(save_dir + f'/soln{ngx_out}x{ngy_out}_.pvd') u_out = df.project(u, Vout) u_out.rename('u', 'u') # (2, ngy_out, ngx_out) ? u_out_vertex = u_out.compute_vertex_values(mesh_out).reshape(2, ngx_out, ngy_out) np.save(save_dir + f'/u{k}.npy', u_out_vertex) # if plot: # plot_row([u_out_vertex[0], u_out_vertex[1]], save_dir, f'u{k}', # same_range=False, plot_fn='imshow', cmap='jet') if save_pvd: vtkfile << (u_out, t) elif save_vector: u_out_vector = u_out.vector().get_local() np.save(save_dir + f'/u{k}_fenics_vec.npy', u_out_vector) # u_vec_load = np.load(save_dir + f'/u{k}.npy') # u_load = Function(Vout) # u_load.vector().set_local(u_vec_load) # not much log df.set_log_level(30) tic = time.time() while t < T: t += dt k += 1 df.solve(F == 0, u) u_old.assign(u) u_out = df.project(u, Vout) u_out.rename('u', 'u') if k % save_every == 0: u_out_vertex = u_out.compute_vertex_values(mesh_out).reshape(2, ngx_out, ngy_out) np.save(save_dir + f'/u{k}.npy', u_out_vertex) # if k % (10 * save_every) == 0 and plot: # plot_row([u_out_vertex[0], u_out_vertex[1]], save_dir, f'u{k}', # same_range=False, plot_fn='imshow', cmap='jet') if save_pvd: vtkfile << (u_out, t) elif save_vector: u_out_vector = u_out.vector().get_local() np.save(save_dir + f'/u{k}_fenics_vec.npy', u_out_vector) print(f'Run {run}: solved {k} steps with total {time.time()-tic:.3f} seconds') return time.time() - tic
def comp_axisymmetric_heat_dirichlet(mat_obj, mesh_obj, bc, omega, save_path): E = mat_obj.E rho = mat_obj.rho nu = mat_obj.nu mesh = mesh_obj.create() Rext = mesh_obj.Rext Rint = mesh_obj.Rint G = 1 # FAKE # ====== # Thermal load # ====== alpha = 1 T = 1 # ====== # Thickness profile # ====== h = 1 # ====== # markers # ====== cell_markers, facet_markers = define_markers(mesh, Rext, Rint) # rename x[0], x[1] by x, y x, y = df.SpatialCoordinate(mesh) dim = mesh.topology().dim() coord = mesh.coordinates() # ====== # Create function space # ====== # Create mesh and define function space V = df.FunctionSpace(mesh, "CG", 1) # Define boundary condition (homogeneous BC) u0 = df.Constant(0.0) if bc == 'CC': bc = [df.DirichletBC(V, u0, facet_markers, i) for i in [1, 2]] elif bc == 'CF': bc = df.DirichletBC(V, u0, facet_markers, 2) # Define variational problem du = df.TrialFunction(V) tu = df.TestFunction(V) # displacement in radial direction u(x,y) u = df.Function(V, name='displacement') class THETA(df.UserExpression): def eval(self, values, x): values[0] = math.atan2(x[1], x[0]) def value_shape(self): #return (1,) # vector return () # scalar theta = THETA(degree=1) #theta_int = df.interpolate(theta, df.FunctionSpace(mesh, "DG", 0)) #df.File(save_path + 'theta.pvd') << theta_int class RADIUS(df.UserExpression): def eval(self, values, x): values[0] = df.sqrt(x[0] * x[0] + x[1] * x[1]) def value_shape(self): return () # scalar r = RADIUS(degree=1) # ====== # Define week form # ====== def d_dr(du): return 1.0 / r * (x * df.Dx(du, 0) + y * df.Dx(du, 1)) def d_dtheta(du): return -y * df.Dx(du, 0) + x * df.Dx(du, 1) # strain radial def epsilon_r(du): return d_dr(du) # strain circumferential def epsilon_theta(du): return du / r def sigma_r(du): return E / (1.0 - nu**2) * ((epsilon_r(du) - alpha * T) + nu * (epsilon_theta(du) - alpha * T)) def sigma_theta(du): return E / (1.0 - nu**2) * ((epsilon_theta(du) - alpha * T) + nu * (epsilon_r(du) - alpha * T)) # week form dF = -sigma_r(du) * r * h * epsilon_r(tu) * df.dx dF = -dF + sigma_theta(du) * h * tu * df.dx dF = dF + rho * omega**2 * r**2 * h * tu * df.dx # residual F = df.action(dF, u) # solve df.solve(F == 0, u, bc) # displacement df.File(save_path + 'u.pvd') << _u # ====== # Analyze # ====== # compute stresses sigma_r_pro = df.project(sigma_r(u), V) sigma_r_pro.rename('sigma_r [Pa]', 'sigma_r [Pa]') df.File(save_path + 'sigma_r.pvd') << sigma_r_pro sigma_theta_pro = df.project(sigma_theta(u), V) sigma_theta_pro.rename('sigma_theta [Pa]', 'sigma_theta [Pa]') df.File(save_path + 'sigma_theta.pvd') << sigma_theta_pro # compute von Mises stress def von_mises_stress(sigma_r, sigma_theta): return df.sqrt(sigma_r**2 + sigma_theta**2 - sigma_r * sigma_theta) von_stress_pro = df.project(von_mises_stress(sigma_r(u), sigma_theta(u)), V) von_stress_pro.rename('von Mises Stress [Pa]', 'von Mises Stress [Pa]') df.File(save_path + 'von_mises_stress.pvd') << von_stress_pro # save results to h5 rfile = df.HDF5File(mesh.mpi_comm(), save_path + 'results.h5', "w") rfile.write(u, "u") rfile.write(sigma_r_pro, "sigma_r") rfile.write(sigma_theta_pro, "sigma_theta") rfile.write(von_stress_pro, "von_mises_stress") rfile.close()
def run_model(kappa, forcing, function_space, boundary_conditions=None): """ Solve complex valued Helmholtz equation by solving coupled system, one for the real part of the solution one for the imaginary part. """ mesh = function_space.mesh() kappa_sq = kappa**2 if boundary_conditions == None: bndry_obj = dl.CompiledSubDomain("on_boundary") boundary_conditions = [['dirichlet', bndry_obj, [0, 0]]] num_bndrys = len(boundary_conditions) boundaries = mark_boundaries(mesh, boundary_conditions) dirichlet_bcs = collect_dirichlet_boundaries(function_space, boundary_conditions, boundaries) # To express integrals over the boundary parts using ds(i), we must first # redefine the measure ds in terms of our boundary markers: ds = dl.Measure('ds', domain=mesh, subdomain_data=boundaries) #dx = dl.Measure('dx', domain=mesh) dx = dl.dx (pr, pi) = dl.TrialFunction(function_space) (vr, vi) = dl.TestFunction(function_space) # real part bilinear_form = kappa_sq * (pr * vr - pi * vi) * dx bilinear_form += (-dl.inner(dl.nabla_grad(pr), dl.nabla_grad(vr)) + dl.inner(dl.nabla_grad(pi), dl.nabla_grad(vi))) * dx # imaginary part bilinear_form += kappa_sq * (pr * vi + pi * vr) * dx bilinear_form += -(dl.inner(dl.nabla_grad(pr), dl.nabla_grad(vi)) + dl.inner(dl.nabla_grad(pi), dl.nabla_grad(vr))) * dx for ii in range(num_bndrys): if (boundary_conditions[ii][0] == 'robin'): alpha_real, alpha_imag = boundary_conditions[ii][3] bilinear_form -= alpha_real * (pr * vr - pi * vi) * ds(ii) bilinear_form -= alpha_imag * (pr * vi + pi * vr) * ds(ii) forcing_real, forcing_imag = forcing rhs = (forcing_real * vr + forcing_real * vi + forcing_imag * vr - forcing_imag * vi) * dx for ii in range(num_bndrys): if ((boundary_conditions[ii][0] == 'robin') or (boundary_conditions[ii][0] == 'neumann')): beta_real, beta_imag = boundary_conditions[ii][2] # real part of robin boundary conditions rhs += (beta_real * vr - beta_imag * vi) * ds(ii) # imag part of robin boundary conditions rhs += (beta_real * vi + beta_imag * vr) * ds(ii) # compute solution p = dl.Function(function_space) #solve(a == L, p) dl.solve(bilinear_form == rhs, p, bcs=dirichlet_bcs) return p
def main(traction, outfile='displacement.json'): # Create the Beam geometry # Length L = 10 # Width W = 1 print('Got traction of {} kN'.format(traction)) # Create mesh mesh = dolfin.BoxMesh(dolfin.Point(0, 0, 0), dolfin.Point(L, W, W), 30, 3, 3) # Mark boundary subdomians left = dolfin.CompiledSubDomain("near(x[0], side) && on_boundary", side=0) bottom = dolfin.CompiledSubDomain("near(x[2], side) && on_boundary", side=0) boundary_markers = dolfin.MeshFunction("size_t", mesh, mesh.topology().dim() - 1) boundary_markers.set_all(0) left_marker = 1 bottom_marker = 2 left.mark(boundary_markers, left_marker) bottom.mark(boundary_markers, bottom_marker) f = dolfin.File('boundary_markers.pvd') f << boundary_markers P2 = dolfin.VectorElement("Lagrange", mesh.ufl_cell(), 2) P1 = dolfin.FiniteElement("Lagrange", mesh.ufl_cell(), 1) state_space = dolfin.FunctionSpace(mesh, P2 * P1) state = dolfin.Function(state_space) state_test = dolfin.TestFunction(state_space) u, p = dolfin.split(state) v, q = dolfin.split(state_test) # Some mechanical quantities I = dolfin.Identity(3) gradu = dolfin.grad(u) F = dolfin.variable(I + gradu) J = dolfin.det(F) # Material properites mu = dolfin.Constant(100.0) lmbda = dolfin.Constant(1.0) epsilon = 0.5 * (gradu + gradu.T) # Strain energy W = lmbda / 2 * (dolfin.tr(epsilon)**2) \ + mu * dolfin.tr(epsilon * epsilon) internal_energy = W - p * (J - 1) # Neumann BC N = dolfin.FacetNormal(mesh) p_bottom = dolfin.Constant(traction) external_work = dolfin.inner(v, p_bottom * dolfin.cofac(F) * N) \ * dolfin.ds(bottom_marker, subdomain_data=boundary_markers) # Virtual work G = dolfin.derivative(internal_energy * dolfin.dx, state, state_test) + external_work # Anchor the left side bcs = dolfin.DirichletBC(state_space.sub(0), dolfin.Constant((0.0, 0.0, 0.0)), left) # Traction at the bottom of the beam dolfin.solve(G == 0, state, [bcs]) # Get displacement and hydrostatic pressure u, p = state.split(deepcopy=True) point = np.array([10.0, 0.5, 1.0]) disp = np.zeros(3) u.eval(disp, point) print(('Get z-position of point ({}): {:.4f} mm' '').format(', '.join(['{:.1f}'.format(p) for p in point]), point[2] + disp[2])) with open(outfile, 'w') as f: json.dump({ 'point': point.tolist(), 'displacement': disp.tolist() }, f, indent=4) print('Output saved to {}'.format(outfile)) V = dolfin.VectorFunctionSpace(mesh, "CG", 1) u_int = dolfin.interpolate(u, V) moved_mesh = dolfin.Mesh(mesh) dolfin.ALE.move(mesh, u_int) f = dolfin.File('mesh.pvd') f << mesh f = dolfin.File('bending_beam.pvd') f << moved_mesh
def femsolve(): ''' Bilineaarinen muoto: a(u,v) = L(v) a(u,v) = (inner(grad(u), grad(v)) + u*v)*dx L(v) = f*v*dx - g*v*ds g(x) = -du/dx = -u1, x = x1 u(x0) = u0 Omega = {xeR|x0<=x<=x1} ''' from dolfin import UnitInterval, FunctionSpace, DirichletBC, TrialFunction from dolfin import TestFunction, grad, Constant, Function, solve, inner, dx, ds from dolfin import MeshFunction, assemble import dolfin # from dolfin import set_log_level, PROCESS # Create mesh and define function space mesh = UnitInterval(30) V = FunctionSpace(mesh, 'Lagrange', 2) boundaries = MeshFunction('uint', mesh, mesh.topology().dim()-1) boundaries.set_all(0) class Left(dolfin.SubDomain): def inside(self, x, on_boundary): tol = 1E-14 # tolerance for coordinate comparisons return on_boundary and abs(x[0]) < tol class Right(dolfin.SubDomain): def inside(self, x, on_boundary): return dolfin.near(x[0], 1.0) left = Left() right = Right() left.mark(boundaries, 1) right.mark(boundaries, 2) # def u0_boundary(x): # return abs(x[0]) < tol # # bc = DirichletBC(V, Constant(u0), lambda x: abs(x[0]) < tol) bcs = [DirichletBC(V, Constant(u0), boundaries, 1)] # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = (inner(grad(u), grad(v)) + u*v)*dx g = Constant(-u1) L = Constant(f)*v*dx - g*v*ds(2) # set_log_level(PROCESS) # Compute solution A = assemble(a, exterior_facet_domains=boundaries) b = assemble(L, exterior_facet_domains=boundaries) for bc in bcs: bc.apply(A, b) u = Function(V) solve(A, u.vector(), b, 'lu') coor = mesh.coordinates() u_array = u.vector().array() a = [] b = [] for i in range(mesh.num_vertices()): a.append(coor[i]) b.append(u_array[i]) print('u(%3.2f) = %0.14E'%(coor[i],u_array[i])) import numpy as np np.savez('fem',a,b)
def plot_bulk_electrostriction(E, direction, q_b, materials, mesh, power_opt): """ plots bulk electrostriction in 2D for the whole simulation space the solution is first projected into Lagrange basis """ from dolfin import (VectorElement, FunctionSpace, TrialFunction, Function, TestFunction, split, dot, inner, lhs, rhs, solve, plot) import matplotlib.pyplot as plt # bulk electrostriction V = VectorElement("Lagrange", mesh.ufl_cell(), 1, dim = 3) VComplex = FunctionSpace( mesh, V*V) u = TrialFunction(VComplex) (ur, ui) = split(u) v = TestFunction(VComplex) (vr, vi) = split(v) f_bulk = Function(VComplex) if not isinstance(materials, collections.Iterable): (fr_bulk, fi_bulk) = bulk_electrostriction(E, materials.em.e_r, materials.em.p, direction, q_b) F = dot(vr,fr_bulk)*materials.domain + dot(vi,fi_bulk)*materials.domain F -= inner(vr,ur)*materials.domain + inner(vi,ui)*materials.domain else: for idx, material in enumerate(materials): if idx == 0: (fr_bulk, fi_bulk) = bulk_electrostriction(E, material.em.e_r, material.em.p, direction, q_b) F = dot(vr,fr_bulk)*material.domain + dot(vi,fi_bulk)*material.domain F -= inner(vr,ur)*material.domain + inner(vi,ui)*material.domain else: (fr_bulk, fi_bulk) = bulk_electrostriction(E, material.em.e_r, material.em.p, direction, q_b) F += dot(vr,fr_bulk)*material.domain + dot(vi,fi_bulk)*material.domain F -= inner(vr,ur)*material.domain + inner(vi,ui)*material.domain scaling = 1.0/(power_opt*1e3)*1e12 ##pN/(um^2mW) a = lhs(F) L = rhs(F) solve(a==L, f_bulk) w0 = f_bulk.compute_vertex_values(mesh) nv = mesh.num_vertices() w0 = [w0[i * nv: (i + 1) * nv] for i in range(3)] U = w0[0]*scaling V = w0[1]*scaling #W = w0[2] XY = mesh.coordinates() X = XY[:,0] Y = XY[:,1] #Z = np.zeros(nv) # make a pretty plot fig = plt.figure() ax = fig.gca() Q1 = ax.quiver(X,Y, U,V, scale=4000, scale_units='inches') plt.quiverkey(Q1, 0.4, 0.9, 1000.0, r'$ 1000 \frac{pN}{\mu m^3 mW} Re(f_{x,y})$', labelpos='E', coordinates='figure', fontproperties={'size': 24}) plt.xlabel('x [$\mu$m]', fontsize=24, rotation = 0) plt.ylabel('y [$\mu$m]', fontsize=24) plt.tick_params(labelsize=24) plt.show() return fig
def compute_Mij(Mij, G_matr, G_under, Sijmats, Sijcomps, Sijfcomps, delta_CG1_sq, tensdim, alphaval=None, u_nf=None, u_f=None, Nij=None, **NS_namespace): """ Manually compute the tensor Mij = 2*delta**2*(F(|S|Sij)-alpha**2*F(|S|)F(Sij) """ Sij = Sijcomps Sijf = Sijfcomps alpha = alphaval deltasq = 2*delta_CG1_sq.vector().array() # Apply pre-assembled matrices and compute right hand sides if tensdim == 3: Ax, Ay = Sijmats u = u_nf[0].vector() v = u_nf[1].vector() uf = u_f[0].vector() vf = u_f[1].vector() # Unfiltered rhs bu = [Ax*u, 0.5*(Ay*u + Ax*v), Ay*v] # Filtered rhs buf = [Ax*uf, 0.5*(Ay*uf + Ax*vf), Ay*vf] else: Ax, Ay, Az = Sijmats u = u_nf[0].vector() v = u_nf[1].vector() w = u_nf[2].vector() uf = u_f[0].vector() vf = u_f[1].vector() wf = u_f[2].vector() bu = [Ax*u, 0.5*(Ay*u + Ax*v), 0.5*(Az*u + Ax*w), Ay*v, 0.5*(Az*v + Ay*w), Az*w] buf = [Ax*uf, 0.5*(Ay*uf + Ax*vf), 0.5*(Az*uf + Ax*wf), Ay*vf, 0.5*(Az*vf + Ay*wf), Az*wf] for i in xrange(tensdim): # Solve for the different components of Sij solve(G_matr, Sij[i].vector(), bu[i], "cg", "default") # Solve for the different components of F(Sij) solve(G_matr, Sijf[i].vector(), buf[i], "cg", "default") # Compute magnitudes of Sij and Sijf magS = mag(Sij, tensdim) magSf = mag(Sijf, tensdim) # Loop over components and add to Mij for i in xrange(tensdim): # Compute |S|*Sij Mij[i].vector().set_local(magS*Sij[i].vector().array()) Mij[i].vector().apply("insert") # Compute F(|S|*Sij) tophatfilter(unfiltered=Mij[i], filtered=Mij[i], **vars()) # Check if Nij, assign F(|S|Sij) if not None if Nij != None: Nij[i].vector().zero() Nij[i].vector().axpy(1.0, Mij[i].vector()) # Compute 2*delta**2*(F(|S|Sij) - alpha**2*F(|S|)F(Sij)) and add to Mij[i] Mij[i].vector().set_local(deltasq*(Mij[i].vector().array()-(alpha**2)*magSf*Sijf[i].vector().array())) Mij[i].vector().apply("insert") # Return magS for use when updating nut_ return magS
# Define Dirichlet boundary conditions at top and bottom boundaries bcs = [ DirichletBC(V, u5, boundaries.where_equal(boundary["TOP"][0])), DirichletBC(V, u0, boundaries.where_equal(boundary["BOTTOM"][0])), ] dx = dx(subdomain_data=domains) ds = ds(subdomain_data=boundaries) # Define variational form F = (inner(a0 * grad(u), grad(v)) * dx(boundary["DOMAIN"][0]) + inner(a1 * grad(u), grad(v)) * dx(boundary["OBSTACLE"][0]) - g_L * v * ds(boundary["LEFT"][0]) - g_R * v * ds(boundary["RIGHT"][0]) - f * v * dx(boundary["DOMAIN"][0]) - f * v * dx(boundary["OBSTACLE"][0])) # Separate left and right hand sides of equation a, L = lhs(F), rhs(F) # Solve problem u = Function(V) solve(a == L, u, bcs) bb_tree = cpp.geometry.BoundingBoxTree(mesh, 2) print(u([0.5, 0.5], bb_tree)[0]) # print((u.vector().array)) file = XDMFFile(MPI.comm_world, "input/saved_function.xdmf") file.write(u) pass
def teXXXst_fenics_vector(): # quad_degree = 13 # dolfin.parameters["form_compiler"]["quadrature_degree"] = quad_degree pi = 3.14159265358979323 k1, k2 = 2, 3 EV = pi * pi * (k1 * k1 + k2 * k2) N = 11 degree = 1 mesh = UnitSquare(N, N) fs = FunctionSpace(mesh, "CG", degree) ex = Expression("A*sin(k1*pi*x[0])*sin(k2*pi*x[1])", k1=k1, k2=k2, A=1.0) x = FEniCSVector(interpolate(ex, fs)) # print "x.coeff", x.coeffs.array() ex.A = EV b_ex = assemble_rhs(ex, fs) bexg = interpolate(ex, fs) # print b_ex.array() # print b_ex.array() / (2 * pi * pi * x.coeffs.array()) Afe = assemble_lhs(Expression('1'), fs) # apply discrete operator on (interpolated) x A = FEniCSOperator(Afe, x.basis) b = A * x # evaluate solution for eigenfunction rhs if False: b_num = Function(fs) solve(A, b_num.vector(), b_ex) bnv = A * b_num.vector() b3 = Function(fs, bnv / EV) np.set_printoptions(threshold='nan', suppress=True) print b.coeffs.array() print np.abs((b_ex.array() - b.coeffs.array()) / np.max(b_ex.array())) print np.max(np.abs((b_ex.array() - b.coeffs.array()) / np.max(b_ex.array()))) #print b_ex.array() / (M * interpolate(ex1, fs).vector()).array() # #assert_array_almost_equal(b.coeffs, b_ex.coeffs) b2 = Function(fs, b_ex.copy()) bg = Function(fs, b_ex.copy()) b2g = Function(fs, b_ex.copy()) G = assemble_gramian(x.basis) dolfin.solve(G, bg.vector(), b.coeffs) dolfin.solve(G, b2g.vector(), b2.vector()) # # compute eigenpairs numerically # eigensolver = evaluate_evp(FEniCSBasis(fs)) # # Extract largest (first) eigenpair # r, c, rx, cx = eigensolver.get_eigenpair(0) # print "Largest eigenvalue: ", r # # Initialize function and assign eigenvector # ef0 = Function(fs) # ef0.vector()[:] = rx if False: # export out_b = dolfin.File(__name__ + "_b.pvd", "compressed") out_b << b._fefunc out_b_ex = dolfin.File(__name__ + "_b_ex.pvd", "compressed") out_b_ex << b2 out_b_num = dolfin.File(__name__ + "_b_num.pvd", "compressed") out_b_num << b_num #dolfin.plot(x._fefunc, title="interpolant x", rescale=False, axes=True, legend=True) dolfin.plot(bg, title="b", rescale=False, axes=True, legend=True) dolfin.plot(b2g, title="b_ex (ass/G)", rescale=False, axes=True, legend=True) dolfin.plot(bexg, title="b_ex (dir)", rescale=False, axes=True, legend=True) #dolfin.plot(b_num, title="b_num", rescale=False, axes=True, legend=True) # dolfin.plot(b3, title="M*b_num", rescale=False, axes=True, legend=True) #dolfin.plot(ef0, title="ef0", rescale=False, axes=True, legend=True) print dolfin.errornorm(u=b._fefunc, uh=b2) #, norm_type, degree, mesh) dolfin.interactive()
def calculate_fiber_strain(fib, e_circ, e_rad, e_long, strain_markers, mesh, strains): import dolfin from dolfin import ( Measure, Function, TensorFunctionSpace, VectorFunctionSpace, TrialFunction, TestFunction, inner, assemble_system, solve, ) dX = dolfin.Measure("dx", subdomain_data=strain_markers, domain=mesh) fiber_space = fib.function_space() strain_space = dolfin.VectorFunctionSpace(mesh, "R", 0, dim=3) full_strain_space = dolfin.TensorFunctionSpace(mesh, "R", 0) fib1 = dolfin.Function(strain_space) e_c1 = dolfin.Function(strain_space) e_r1 = dolfin.Function(strain_space) e_l1 = dolfin.Function(strain_space) mean_coords, coords = get_regional_midpoints(strain_markers, mesh) # ax = plt.subplot(111, projection='3d') region = 1 fiber_strain = [] for region in range(1, 18): # For each region # Find the average unit normal in the fiber direction u = dolfin.TrialFunction(strain_space) v = TestFunction(strain_space) a = inner(u, v) * dX(region) L_fib = inner(fib, v) * dX(region) A, b = assemble_system(a, L_fib) solve(A, fib1.vector(), b) fib1_norm = np.linalg.norm(fib1.vector().array()) # Unit normal fib1_arr = fib1.vector().array() / fib1_norm # Find the average unit normal in Circumferential direction u = TrialFunction(strain_space) v = TestFunction(strain_space) a = inner(u, v) * dX(region) L_c = inner(e_circ, v) * dX(region) A, b = assemble_system(a, L_c) solve(A, e_c1.vector(), b) e_c1_norm = np.linalg.norm(e_c1.vector().array()) # Unit normal e_c1_arr = e_c1.vector().array() / e_c1_norm # Find the averag unit normal in Radial direction u = TrialFunction(strain_space) v = TestFunction(strain_space) a = inner(u, v) * dX(region) L_r = inner(e_rad, v) * dX(region) A, b = assemble_system(a, L_r) solve(A, e_r1.vector(), b) e_r1_norm = np.linalg.norm(e_r1.vector().array()) # Unit normal e_r1_arr = e_r1.vector().array() / e_r1_norm # Find the average unit normal in Longitudinal direction u = TrialFunction(strain_space) v = TestFunction(strain_space) a = inner(u, v) * dX(region) L_l = inner(e_long, v) * dX(region) A, b = assemble_system(a, L_l) solve(A, e_l1.vector(), b) e_l1_norm = np.linalg.norm(e_l1.vector().array()) # Unit normal e_l1_arr = e_l1.vector().array() / e_l1_norm # ax.plot([mean_coords[region][0], mean_coords[region][0]+e_c1_arr[0]],[mean_coords[region][1], mean_coords[region][1]+e_c1_arr[1]], [mean_coords[region][2],mean_coords[region][2]+e_c1_arr[2]], 'b', label = "circ") # ax.plot([mean_coords[region][0],mean_coords[region][0]+e_r1_arr[0]],[mean_coords[region][1], mean_coords[region][1]+e_r1_arr[1]], [mean_coords[region][2],mean_coords[region][2]+e_r1_arr[2]] , 'r',label = "rad") # ax.plot([mean_coords[region][0],mean_coords[region][0]+e_l1_arr[0]],[mean_coords[region][1], mean_coords[region][1]+e_l1_arr[1]], [mean_coords[region][2],mean_coords[region][2]+e_l1_arr[2]] , 'g',label = "long") # ax.plot([mean_coords[region][0],mean_coords[region][0]+fib1_arr[0]],[mean_coords[region][1], mean_coords[region][1]+fib1_arr[1]], [mean_coords[region][2],mean_coords[region][2]+fib1_arr[2]] , 'y', label = "fib") fiber_strain_region = [] for strain in strains[region]: mat = np.array([ strain[0] * e_c1_arr, strain[1] * e_r1_arr, strain[2] * e_l1_arr ]).T fiber_strain_region.append(np.linalg.norm(np.dot(mat, fib1_arr))) fiber_strain.append(fiber_strain_region) # for i in range(18): # ax.scatter3D(coords[i][0], coords[i][1], coords[i][2], s = 0.1) # plt.show() return fiber_strain
def solve_subdomains(self, **kwargs): # FIXME: do it on parallel for s in self.subdomains: solve(s.a == s.L, s.solution, s.bcs, solver_parameters=s.solver_params)
def bottom(x): return np.abs(x[0] + width / 2) < dolfin.DOLFIN_EPS print('ugly stuff took {:.2f} s'.format(time.time() - t_ini)) nodal_space = dolfin.FunctionSpace(mesh, 'Lagrange', 1) (L_i, ) = dolfin.TestFunctions(nodal_space) (L_j, ) = dolfin.TrialFunctions(nodal_space) bc_ground = dolfin.DirichletBC(nodal_space, dolfin.Constant(0.0), markers, other) bc_source = dolfin.DirichletBC(nodal_space, dolfin.Constant(1.0), markers, metal) rho = dolfin.Constant(0.0) A_ij = dolfin.inner(dolfin.grad(L_i), dolfin.grad(L_j)) * dolfin.dx b_ij = rho * L_j * dolfin.dx A = dolfin.assemble(A_ij) b = dolfin.assemble(b_ij) bc_ground.apply(A, b) bc_source.apply(A, b) phi = dolfin.Function(nodal_space) c = phi.vector() print('before solve took {:.2f} s'.format(time.time() - t_ini)) t_ini = time.time() dolfin.solve(A, c, b) file = dolfin.File('test.pvd') file << phi print('solve and write took {:.2f} s'.format(time.time() - t_ini))
fname_vel=vel_file, fname_pressure=p_file, fname_hdf5=hdf5_file, fname_xdmf=xdmf_file) solver.solve(save_freq=save_freq) else: problem = fm.SolidMechanicsProblem(config) solver = fm.SolidMechanicsSolver(problem, fname_disp=disp_file, fname_pressure=p_file, fname_hdf5=hdf5_file, fname_xdmf=xdmf_file) solver.full_solve(save_freq=save_freq) # Compute the final volume if args.compute_volume: W1 = dlf.VectorFunctionSpace(problem.mesh, 'CG', 1) xi1 = dlf.TestFunction(W1) du1 = dlf.TrialFunction(W1) u_move = dlf.Function(W1) move_bcs = dlf.DirichletBC(W1, dlf.Constant([0.0] * args.dim), problem.boundaries, CLIP) a = dlf.dot(xi1, du1) * dlf.dx L = dlf.dot(xi1, problem.displacement) * dlf.dx dlf.solve(a == L, u_move, move_bcs) ale = dlf.ALE() ale.move(problem.mesh, u_move) print ("Total volume after: ", \ dlf.assemble(dlf.Constant(1.0)*dlf.dx(domain=problem.mesh)))
if SUPG_stabilization: alpha = 2 h = 1 / N magnitude = 1 Pe = magnitude * h / (2.0 * mu) tau = h / (2.0 * magnitude) * (1.0 / np.tanh(Pe) - 1.0 / Pe) beta = df.Constant(tau * alpha) v = v + beta * h * v.dx(0) F = df.Constant(mu) * inner(grad(u), grad(v)) * dx + inner(u.dx(0), v) * dx bc1 = df.DirichletBC(V, df.Constant(0), left) bc2 = df.DirichletBC(V, df.Constant(1), right) # Neumann on y = 0 and y = 1 enforced implicitly df.solve(df.lhs(F) == df.rhs(F), u_, bcs=[bc1, bc2]) # interpolate(u_e, VV) # u_numerical = u_.vector().vec().array # X = V.tabulate_dof_coordinates() X = mesh.coordinates() u_numerical = u_.compute_vertex_values(mesh) # u_analytical = A*np.exp(B*X[:, 0]) + C # u_analytical.shape = (N+1, N+1) # u_numerical.shape = (N+1, N+1) # e = (u_analytical-u_numerical) # e.shape = (N+1, N+1) # e = e[:, 1:-1] # exclude Dirichtlet BC L1[i, j] = df.errornorm(u_, df.project(u_e, V), norm_type='H1') L2[i, j] = df.errornorm(u_, df.project(u_e, V), norm_type='l2') if N == 8:
def compute_dw(self, dm): """ Compute dw """ setfct(self.dm, dm) b = assemble(self.rhswwk) solve(self.Mw, self.dw.vector(), b)
Ms.assign(df.Constant(1)) # just assembling it LLG = -gamma / (1 + alpha * alpha) * df.cross(m, Heff) - alpha * gamma / ( 1 + alpha * alpha) * df.cross(m, df.cross(m, Heff)) L = df.dot(LLG, df.TestFunction(S3)) * df.dP dmdt = df.Function(S3) start = time.time() for i in xrange(1000): df.assemble(L, tensor=dmdt.vector()) stop = time.time() print "delta = ", stop - start print dmdt.vector().array() # more linear algebra, same problem... still need to assemble the cross product # we're doing even more work than before a = df.dot(u, v) * df.dP A = df.assemble(a) b = df.Function(S3) dmdt = df.Function(S3) start = time.time() for i in xrange(1000): df.assemble(L, tensor=b.vector()) # this is what should go out of the loop df.solve(A, dmdt.vector(), b.vector()) # some variation of this could stay in stop = time.time() print "delta = ", stop - start print dmdt.vector().array()
def solve_cycle(state): print('Solving state:', state['name']) # u1.assign(state['u_prev']) u0.assign(state['u_last']) p0.assign(state['pressure']) rhs.assign(project(dt*(- u0.dx(0)*u0 - nu*u0.dx(0).dx(0)/2.0 - p0.dx(0)), Q)) plot(rhs, title='RHS') # rhs_nonlinear.assign(project(dt*(- u0.dx(0)*u0), Q)) # rhs_visc.assign(project(dt*(-nu*u0.dx(0).dx(0)/2.0), Q)) # rhs_pressure.assign(project(dt*(-p0.dx(0)), Q)) # plot(rhs_nonlinear, title='RHS nonlin') # plot(rhs_visc, title='RHS visc') # plot(rhs_pressure, title='RHS pressure') solve(a_tent == L_tent, u_tent_computed, bcu) if state['rot']: if state['null']: b = assemble(L_p_rot) null_space.orthogonalize(b) solve(A_p_rot, p_computed.vector(), b, 'cg') else: solve(a_p_rot == L_p_rot, p_computed, bcp) solve(a_cor_rot == L_cor_rot, u_cor_computed) div_u_tent.assign(project(-nu*u_tent_computed.dx(0), Vplot)) plot(div_u_tent, title=state['name']+'_div u_tent (pressure correction), t = ' +str(t)) # div_u_tent.assign(project(p0+p_computed-nu*state['p_tent'].dx(0), Q)) # plot(div_u_tent, title=state['name']+'_RHS (pressure correction), t = ' +str(t)) solve(a_rot == L_rot, p_cor_computed) p_correction.assign(p_cor_computed-p_computed-p0) plot(p_correction, title=state['name']+'_(computed pressure correction), t = ' +str(t)) print(' updating state') state['u_prev'].assign(state['u_last']) state['u_tent'].assign(u_tent_computed) state['u_last'].assign(u_cor_computed) state['pressure'].assign(p_cor_computed) state['p_tent'].assign(p_computed+p0) else: if state['null']: b = assemble(L_p) null_space.orthogonalize(b) print('new:', assemble((v_in_expr-u_tent_computed)*ds(2))) # plot(interpolate((v_in_expr-u_tent_computed)*ds(2), Q), title='new') # print(A_p.array()) # print(b.array()) solve(A_p, p_computed.vector(), b, 'gmres') else: solve(a_p == L_p, p_computed, bcp) solve(a_cor == L_cor, u_cor_computed) print(' updating state') # state['u_prev'].assign(state['u_last']) state['u_tent'].assign(u_tent_computed) state['u_last'].assign(u_cor_computed) state['pressure'].assign(p_computed)
def test_DoGIP_vs_FEniCS(self): print( '\n== testing DoGIP vs. FEniCS for problem of weighted projection ====' ) for dim, pol_order in itertools.product([2, 3], [1, 2]): print('dim={}; pol_order={}'.format(dim, pol_order)) N = 2 # no. of elements # creating MESH, defining MATERIAL and SOURCE if dim == 2: mesh = UnitSquareMesh(N, N) m = Expression("1+10*16*x[0]*(1-x[0])*x[1]*(1-x[1])", degree=4) f = Expression("80*x[0]*(0.5-x[0])*(1.-x[0])*x[1]*(1.-x[1])", degree=5) elif dim == 3: mesh = UnitCubeMesh(N, N, N) m = Expression("1+10*16*x[0]*(1-x[0])*(1-x[1])*x[2]", degree=4) f = Expression("80*x[0]*(0.5-x[0])*(1.-x[0])*x[1]*(1.-x[1])", degree=5) mesh.coordinates()[:] += 0.1 * np.random.random( mesh.coordinates().shape) # mesh perturbation ## standard approach with FEniCS ############################################# V = FunctionSpace(mesh, "CG", pol_order) # original FEM space bc = DirichletBC(V, Constant(0.0), lambda x, on_boundary: on_boundary) u, v = TrialFunction(V), TestFunction(V) u_fenics = Function(V) # the vector for storing the solution solve(m * inner(grad(u), grad(v)) * dx == f * v * dx, u_fenics, bc) # solution by FEniCS ## DoGIP - double-grid integration with interpolation-projection ############# W = FunctionSpace(mesh, "DG", 2 * (pol_order - 1)) # double-grid space Wvector = VectorFunctionSpace( mesh, "DG", 2 * (pol_order - 1)) # vector variant of double-grid space w = TestFunction(W) A_dogip = assemble( m * w * dx).get_local() # diagonal matrix of material coefficients A_dogip_full = np.einsum( 'i,jk->ijk', A_dogip, np.eye(dim)) # block-diagonal mat. for non-isotropic mat. bv = assemble(f * v * dx) bc.apply(bv) b = bv.get_local() # vector of right-hand side # assembling global interpolation-projection matrix B B = get_B(V, Wvector, problem=1) # solution to DoGIP problem def Afun(x): Axd = np.einsum('...jk,...j', A_dogip_full, B.dot(x).reshape((-1, dim))) Afunx = B.T.dot(Axd.ravel()) Afunx[list(bc.get_boundary_values() )] = 0 # application of Dirichlet BC return Afunx Alinoper = linalg.LinearOperator((b.size, b.size), matvec=Afun, dtype=np.float) # system matrix x, info = linalg.cg(Alinoper, b, x0=np.zeros_like(b), tol=1e-8, maxiter=1e2) # conjugate gradients # testing the difference between DoGIP and FEniCS self.assertAlmostEqual( 0, np.linalg.norm(u_fenics.vector().get_local() - x)) print('...ok')