def __init__(self, V, parameters=None): # Set parameters self.parameters = self.default_parameters() if parameters is not None: self.parameters.update(parameters) # Set-up mass matrix for L^2 projection self.V = V self.u = df.TrialFunction(self.V) self.v = df.TestFunction(self.V) self.m = df.inner(self.u, self.v) * df.dx() self.M = df.assemble(self.m) self.b = df.Vector(V.mesh().mpi_comm(), V.dim()) solver_type = self.parameters["linear_solver_type"] assert(solver_type == "lu" or solver_type == "cg"), \ "Expecting 'linear_solver_type' to be 'lu' or 'cg'" if solver_type == "lu": df.debug("Setting up direct solver for projecter") # Customize LU solver (reuse everything) solver = df.LUSolver(self.M) solver.parameters["same_nonzero_pattern"] = True solver.parameters["reuse_factorization"] = True else: df.debug("Setting up iterative solver for projecter") # Customize Krylov solver (reuse everything) solver = df.KrylovSolver("cg", "ilu") solver.set_operator(self.M) solver.parameters["preconditioner"]["structure"] = "same" # solver.parameters["nonzero_initial_guess"] = True self.solver = solver
def __init__(self, V, kappa): ''' Parameters ---------- V : dolfin.FunctionSpace Function space. kappa : float Filter diffusivity constant. ''' if not isinstance(V, dolfin.FunctionSpace): raise TypeError( 'Parameter `V` must be of type `dolfin.FunctionSpace`.') v = dolfin.TestFunction(V) f = dolfin.TrialFunction(V) x = V.mesh().coordinates() l = (x.max(0) - x.min(0)).min() k = Constant(float(kappa) * l**2) a_M = f * v * dx a_D = k * dot(grad(f), grad(v)) * dx A = assemble(a_M + a_D) self._M = assemble(a_M) self.solver = dolfin.LUSolver(A, "mumps") self.solver.parameters["symmetric"] = True
def _create_linear_solver(self) -> None: """Helper function for creating linear solver based on parameters.""" solver_type = self._parameters["linear_solver_type"] if solver_type == "direct": solver = df.LUSolver(self._lhs_matrix) elif solver_type == "iterative": # Initialize KrylovSolver with matrix alg = self._parameters["algorithm"] prec = self._parameters["preconditioner"] solver = df.PETScKrylovSolver(alg, prec) solver.set_operator(self._lhs_matrix) solver.parameters["nonzero_initial_guess"] = True # Important! A = df.as_backend_type(self._lhs_matrix) A.set_nullspace(self.nullspace) else: df.error( "Unknown linear_solver_type given: {}".format(solver_type)) return solver
def __init__(self, adjoint_a, L, z, bcs): ''' Solve a problem of this form: adjoint_a z = L Note, "adjoint" means that the Dirichlet boundary conditions are zero. ''' if bcs is None: bcs = () elif not isinstance(bcs, (list, tuple)): bcs = (bcs, ) if not all(isinstance(bc, DirichletBC) for bc in bcs): raise TypeError('Parameter `bcs` must contain ' 'homogenized `DirichletBC`(\'s)') if any(any(bc.get_boundary_values().values()) for bc in bcs): raise ValueError('Parameter `bcs` must contain ' 'homogenized `DirichletBC`(\'s)') V = z.function_space() v0 = dolfin.TestFunction(V) if adjoint_a is not None: L_dummy = dot(v0, Constant((0.0, ) * len(z))) * dx K, _ = assemble_system(adjoint_a, L_dummy, bcs) self._solver = dolfin.LUSolver(K, "mumps") dof_bcs = [] for bc in bcs: dof_bcs.extend(bc.get_boundary_values().keys()) self._dof_bcs = tuple(sorted(dof_bcs)) self._x = z.vector() self._L = L
def __init__(self, a, L, u, bcs): ''' Solve a problem of this form: a u = L ''' if bcs is None: bcs = () elif not isinstance(bcs, (list, tuple)): bcs = (bcs, ) if not all(isinstance(bc, DirichletBC) for bc in bcs): raise TypeError('Parameter `bcs` must contain ' 'homogenized `DirichletBC`(\'s)') V = u.function_space() v0 = dolfin.TestFunction(V) L_dummy = dot(v0, Constant((0.0, ) * len(u))) * dx K, self._rhs_bcs = assemble_system(a, L_dummy, bcs) self._solver = dolfin.LUSolver(K, "mumps") dof_bcs = [] for bc in bcs: dof_bcs.extend(bc.get_boundary_values().keys()) self._dof_bcs = tuple(sorted(dof_bcs)) self._x = u.vector() self._L = L
def setupSolver(self): if type(self.f) == type([]): self.n = len(self.f) if type(d.lhs(self.f[0]).integrals()[0].integrand()) == type( UFL.constantvalue.Zero() ): # Not particularly elegant. Should check for another solution. self.rank = 1 else: self.rank = 2 else: self.n = 1 if type(d.lhs(self.f).integrals()[0].integrand()) == type( UFL.constantvalue.Zero() ): # Not particularly elegant. Should check for another solution. self.rank = 1 else: self.rank = 2 # Set appropriate solver if self.rank == 2: d.info("Using LU-solver to solve linear systems.") self.linear = True self.solver = d.LUSolver() else: d.info("Using Newton-solver to solve nonlinear systems.") self.linear = False self.solver = d.NewtonSolver()
def setup_NS(w_NS, u, p, v, q, p0, q0, dx, ds, normal, dirichlet_bcs_NS, neumann_bcs, boundary_to_mark, u_1, rho_, rho_1, mu_, c_1, grad_g_c_, dt, grav, enable_EC, trial_functions, use_iterative_solvers, p_lagrange, mesh, q_rhs, density_per_concentration, K, **namespace): """ Set up the Navier-Stokes subproblem. """ mom_1 = rho_1 * u_1 if enable_EC and density_per_concentration is not None: for drhodci, ci_1, grad_g_ci_, Ki in zip( density_per_concentration, c_1, grad_g_c_, K): if drhodci > 0.: mom_1 += -drhodci*Ki*ci_1*grad_g_ci_ F = (1./dt * rho_1 * df.dot(u - u_1, v) * dx + df.inner(df.nabla_grad(u), df.outer(mom_1, v)) * dx + 2*mu_*df.inner(df.sym(df.nabla_grad(u)), df.sym(df.nabla_grad(v))) * dx + 0.5*( 1./dt * (rho_ - rho_1) * df.inner(u, v) - df.inner(mom_1, df.nabla_grad(df.dot(u, v)))) * dx - p * df.div(v) * dx - q * df.div(u) * dx - rho_ * df.dot(grav, v) * dx) for boundary_name, pressure in neumann_bcs["p"].iteritems(): F += pressure * df.inner( normal, v) * ds(boundary_to_mark[boundary_name]) if enable_EC: F += sum([ci_1*df.dot(grad_g_ci_, v)*dx for ci_1, grad_g_ci_ in zip(c_1, grad_g_c_)]) if p_lagrange: F += (p*q0 + q*p0)*dx if "u" in q_rhs: F += -df.dot(q_rhs["u"], v)*dx a, L = df.lhs(F), df.rhs(F) if not use_iterative_solvers: problem = df.LinearVariationalProblem(a, L, w_NS, dirichlet_bcs_NS) solver = df.LinearVariationalSolver(problem) else: solver = df.LUSolver("mumps") # solver.set_operator(A) return solver, a, L, dirichlet_bcs_NS return solver
def __init__(self, V, kappa=None): v0 = dolfin.TestFunction(V) v1 = dolfin.TrialFunction(V) a = dot(v0, v1) * dx if kappa is not None: a += kappa * inner(grad(v0), grad(v1)) * dx self._solver = dolfin.LUSolver(assemble(a), "mumps") self._solver.parameters["symmetric"] = True
def solve_problem(self, nev, ncv=None): """Solve problem and return the eigenvalues and eigenvectors @param nev: Number of eigenpairs to compute @keyword ncv: Number of Arnoldi basisvectors to use. (default: 2*nev+1). @rtype: (C{numpy.array}, C{numpy.array}) @return: (eigs_w, eigs_v) -- A tupple consisting of the eigenvalues and eigenvectors of the eigensystem. The eigenvalues are returned as an array of n_eig values corresponding to k^2 for the mode's resonant wavenumber. The eigenvectors are returned as a 2D array of shape (n_eig, problem_dim), with row i corresponding to the modal distributions associated with the i-th eigenvalue. """ M = self.eigenproblem.system_matrices['M'] S = self.eigenproblem.system_matrices['S'] solve_mat = S - self.sigma * M lu = dolfin.LUSolver(solve_mat) lu.parameters["reuse_factorization"] = True lu.parameters["report"] = False bb = dolfin.Vector(M.size(0)) xx = dolfin.Vector(M.size(0)) def sigma_solve(b): bb[:] = b lu.solve(xx, bb) return xx[:] M_matvec = lambda x: M * x #speigs in ARPACK has been removed in scipy 0.9/0.10 #eigs is now in scipy.sparse.linalg.arpack #therefore force M_matvec to have "shape" and "dtype" attributes to comply with "eigs" requirements #also perform the sigma_solve function for spectrum shift to sigma class RM: def __init__(self, M): self.M = M self.shape = (M.size(0), M.size(1)) self.dtype = np.dtype('d') def matvec(self, x): return sigma_solve(self.M * x) # eigs_w, eigs_v = speigs.ARPACK_gen_eigs( # M_matvec, sigma_solve, M.size(0), self.sigma, nev, ncv=ncv) eigs_w, eigs_v = arpack.eigs(RM(M), k=nev, sigma=self.sigma, which='LM', ncv=ncv) return eigs_w, eigs_v.T
def initialize_solver(self, form, bcs, solver_type="mumps"): """ Performs solver initialization and matrix factorization is stored. As discussed at https://fenicsproject.org/docs/dolfin/dev/python/demos/elastodynamics/demo_elastodynamics.py.html: 'Since the system matrix to solve is the same for each time step (constant time step), it is not necessary to factorize the system at each increment. It can be done once and for all and only perform assembly of the varying right-hand side and backsubstitution to obtain the solution much more efficiently. This is done by defining a LUSolver object while PETSc handles caching factorizations.' """ self.bilinear_form = dolf.assemble(dolf.lhs(form)) for bc in bcs: bc.apply(self.bilinear_form) self.LUSolver = dolf.LUSolver(self.bilinear_form, solver_type)
def __init__(self, V): ''' Parameters ---------- V : dolfin.FunctionSpace Function space. ''' if not isinstance(V, dolfin.FunctionSpace): raise TypeError( 'Parameter `V` must be of type `dolfin.FunctionSpace`.') v0 = dolfin.TestFunction(V) v1 = dolfin.TrialFunction(V) A = assemble(v0 * v1 * dx) # Mass-like matrix self.solver = dolfin.LUSolver(A, "mumps") self.solver.parameters["symmetric"] = True
def _create_linear_solver(self): """Helper function for creating linear solver based on parameters.""" solver_type = self._parameters.linear_solver_type if solver_type == "direct": solver = df.LUSolver(self._lhs_matrix) elif solver_type == "iterative": alg = self._parameters.krylov_method prec = self._parameters.krylov_preconditioner solver = df.PETScKrylovSolver(alg, prec) solver.set_operator(self._lhs_matrix) solver.parameters["nonzero_initial_guess"] = True A = df.as_backend_type(self._lhs_matrix) A.set_nullspace(self._nullspace()) else: msg = "Unknown solver type. Got {}, expected 'iterative' or 'direct'".format(solver_type) raise ValueError(msg) return solver
def set_solver(self, solver_method="mumps", **kwargs): """ Choose the type of the solver and its method. An up-to-date list of the available solvers and preconditioners can be obtained with dolfin.list_linear_solver_methods() and dolfin.list_krylov_solver_preconditioners(). kwargs: type e.g. 'LU', preconditioner e.g. 'default' """ s_type = kwargs.pop("type", None) s_precond = kwargs.pop("preconditioner", "default") if s_type is None: if solver_method in DOLFIN_KRYLOV_METHODS.keys(): s_type = "Krylov" elif solver_method in DOLFIN_LU_METHODS.keys(): s_type = "LU" else: raise RuntimeError("The indicated solver method is unknown.") else: if not (solver_method in DOLFIN_KRYLOV_METHODS.keys() or solver_method in DOLFIN_LU_METHODS.keys()): raise RuntimeError("The indicated solver method is unknown.") self._solver = dict(type=s_type, method=solver_method, preconditioner=s_precond) if s_precond: self._solver["preconditioner"] = s_precond if s_type == "Krylov": self.solver = fe.KrylovSolver(self.K, solver_method) elif s_type == "LU": self.solver = fe.LUSolver(self.K, solver_method) if s_precond != "default": self.solver.parameters.preconditioner = s_precond return self.solver
def create_linear_solver( lhs_matrix, parameters: CoupledMonodomainParameters ) -> Union[df.LUSolver, df.KrylovSolver]: """helper function for creating linear solver.""" solver_type = parameters.linear_solver_type # direct or iterative if solver_type == "direct": solver = df.LUSolver(lhs_matrix, parameters.lu_type) solver.parameters["symmetric"] = True elif solver_type == "iterative": method = parameters.krylov_method preconditioner = parameters.krylov_preconditioner solver = df.PETScKrylovSolver(method, preconditioner) solver.set_operator(lhs_matrix) solver.parameters["nonzero_initial_guess"] = True solver.ksp().setFromOptions() # TODO: What is this? else: raise ValueError(f"Unknown linear_solver_type given: {solver_type}") return solver
def make_smoothing_solver(V, kappa=None): ''' Important --------- * Smoothing is most effective when the sensitivities are defined over the whole domain. In practice, this is rarely the case. * Smoothing works well enough when the sensitivities are defined over a subdomain but that is the same dimension as the mesh. In such a case, the smoothing will be relatively poor on the boundary of the subdomain. * Smoothing does not work well when the sensitivities are defined over a boundary of the domain. More generally, when the sensitivity domain is lower dimension than the mesh. In this case, this type of smoothing is quite useless. Returns ------- smoothing_solver: Returns the solver that solves the smoothing problem that is the linear system `M x_smoothed = x` where `x` is the vector of unsmoothed values. The smoothing solver can be invoked as `smoothing_solver.solve(x, x)`. ''' v0 = dolfin.TestFunction(V) v1 = dolfin.TrialFunction(V) a = dot(v1,v0)*dolfin.dx if kappa is not None and float(kappa) != 0.0: if isinstance(kappa, (float, int)): kappa = Constant(kappa) a += kappa*inner(grad(v1), grad(v0))*dolfin.dx smoothing_solver = dolfin.LUSolver(assemble(a), "mumps") smoothing_solver.parameters["symmetric"] = True return smoothing_solver
def _create_linear_solver(self) -> tp.Union[df.KrylovSolver, df.LUSolver]: """Helper function for creating linear solver based on parameters.""" solver_type = self.parameters["linear_solver_type"] if solver_type == "direct": solver = df.LUSolver(self._lhs_matrix, self.parameters["lu_type"]) solver.parameters["symmetric"] = True elif solver_type == "iterative": # Preassemble preconditioner (will be updated if time-step changes) # Initialize KrylovSolver with matrix and preconditioner alg = self.parameters["algorithm"] prec = self.parameters["preconditioner"] solver = df.PETScKrylovSolver(alg, prec) solver.set_operator(self._lhs_matrix) solver.parameters["nonzero_initial_guess"] = True solver.parameters["monitor_convergence"] = True solver.ksp().setFromOptions() else: raise TypeError( "Unknown linear_solver_type given: {}".format(solver_type)) return solver
def __init__(self, fenics_2d_rve, **kwargs): """[summary] Parameters ---------- object : [type] [description] fenics_2d_rve : [type] [description] element : tuple or dict Type and degree of element for displacement FunctionSpace Ex: ('CG', 2) or {'family':'Lagrange', degree:2} solver : dict Choose the type of the solver, its method and the preconditioner. An up-to-date list of the available solvers and preconditioners can be obtained with dolfin.list_linear_solver_methods() and dolfin.list_krylov_solver_preconditioners(). """ self.rve = fenics_2d_rve self.topo_dim = topo_dim = fenics_2d_rve.dim try: bottom_left_corner = fenics_2d_rve.bottom_left_corner except AttributeError: logger.warning( "For the definition of the periodicity boundary conditions," "the bottom left corner of the RVE is assumed to be on (0.,0.)" ) bottom_left_corner = np.zeros(shape=(topo_dim, )) self.pbc = periodicity.PeriodicDomain.pbc_dual_base( fenics_2d_rve.gen_vect, "XY", bottom_left_corner, topo_dim) solver = kwargs.pop("solver", {}) # {'type': solver_type, 'method': solver_method, 'preconditioner': preconditioner} s_type = solver.pop("type", None) s_method = solver.pop("method", SOLVER_METHOD) s_precond = solver.pop("preconditioner", None) if s_type is None: if s_method in DOLFIN_KRYLOV_METHODS.keys(): s_type = "Krylov" elif s_method in DOLFIN_LU_METHODS.keys(): s_type = "LU" else: raise RuntimeError("The indicated solver method is unknown.") self._solver = dict(type=s_type, method=s_method) if s_precond: self._solver["preconditioner"] = s_precond element = kwargs.pop("element", ("Lagrange", 2)) if isinstance(element, dict): element = (element["family"], element["degree"]) self._element = element # * Function spaces cell = self.rve.mesh.ufl_cell() self.scalar_FE = fe.FiniteElement(element[0], cell, element[1]) self.displ_FE = fe.VectorElement(element[0], cell, element[1]) strain_deg = element[1] - 1 if element[1] >= 1 else 0 strain_dim = int(topo_dim * (topo_dim + 1) / 2) self.strain_FE = fe.VectorElement("DG", cell, strain_deg, dim=strain_dim) # Espace fonctionel scalaire self.X = fe.FunctionSpace(self.rve.mesh, self.scalar_FE, constrained_domain=self.pbc) # Espace fonctionnel 3D : deformations, notations de Voigt self.W = fe.FunctionSpace(self.rve.mesh, self.strain_FE) # Espace fonctionel 2D pour les champs de deplacement # TODO : reprendre le Ve défini pour l'espace fonctionnel mixte. Par ex: V = FunctionSpace(mesh, Ve) self.V = fe.VectorFunctionSpace(self.rve.mesh, element[0], element[1], constrained_domain=self.pbc) # * Espace fonctionel mixte pour la résolution : # * 2D pour les champs + scalaire pour multiplicateur de Lagrange # "R" : Real element with one global degree of freedom self.real_FE = fe.VectorElement("R", cell, 0) self.M = fe.FunctionSpace( self.rve.mesh, fe.MixedElement([self.displ_FE, self.real_FE]), constrained_domain=self.pbc, ) # Define variational problem self.v, self.lamb_ = fe.TestFunctions(self.M) self.u, self.lamb = fe.TrialFunctions(self.M) self.w = fe.Function(self.M) # bilinear form self.a = ( fe.inner(sigma(self.rve.C_per, epsilon(self.u)), epsilon(self.v)) * fe.dx + fe.dot(self.lamb_, self.u) * fe.dx + fe.dot(self.lamb, self.v) * fe.dx) self.K = fe.assemble(self.a) if self._solver["type"] == "Krylov": self.solver = fe.KrylovSolver(self.K, self._solver["method"]) elif self._solver["type"] == "LU": self.solver = fe.LUSolver(self.K, self._solver["method"]) self.solver.parameters["symmetric"] = True try: self.solver.parameters.preconditioner = self._solver[ "preconditioner"] except KeyError: pass # fe.info(self.solver.parameters, True) self.localization = dict() # dictionary of localization field objects, # will be filled up when calling auxiliary problems (lazy evaluation) self.ConstitutiveTensors = dict()
# Define basis and bilinear form u = dol.TrialFunction(V) v = dol.TestFunction(V) m = inner(v, u) * dx # Mass form s = dot(curl(v), curl(u)) * dx # Stiffness form # Assemble smass form M = dol.PETScMatrix() S = dol.PETScMatrix() dol.assemble(m, tensor=M, mesh=mesh) dol.assemble(s, tensor=S, mesh=mesh) sigma = 0.03 smat = S - sigma * M #lu = dol.LUSolver(S - sigma*M) lu = dol.LUSolver(smat) lu.parameters["reuse_factorization"] = True lu.parameters["report"] = False bb = dol.Vector(M.size(0)) xx = dol.Vector(M.size(0)) def sigma_solve(b): bb[:] = b lu.solve(xx, bb) return xx[:] M_matvec = lambda x: M * x arpack_eigs, v = speigs.ARPACK_gen_eigs(M_matvec,
def __init__(self, m, parameters=None, degree=1, element="CG", project_method='magpar', unit_length=1, Ms=None, bench=False, normalize=True, solver_type=None): assert isinstance(m, Field) assert isinstance( Ms, Field ) # currently this means that Ms must be passed in (we don't have a default value) self.m = m # Problem objects and parameters self.name = "Demag" self.in_jacobian = False self.unit_length = unit_length self.degree = degree self.bench = bench self.parameters = parameters # This is used in energy density calculations self.mu0 = np.pi * 4e-7 # Vs/(Am) # Mesh Facet Normal self.n = df.FacetNormal(self.m.mesh()) # Spaces and functions for the Demag Potential self.V = df.FunctionSpace(self.m.mesh(), element, degree) self.v = df.TestFunction(self.V) self.u = df.TrialFunction(self.V) self.phi = df.Function(self.V) # Space and functions for the Demag Field self.W = df.VectorFunctionSpace(self.m.mesh(), element, degree, dim=3) self.w = df.TrialFunction(self.W) self.vv = df.TestFunction(self.W) self.H_demag = df.Function(self.W) # Interpolate the Unit Magentisation field if necessary # A try block was not used since it might lead to an unneccessary (and potentially bad) # interpolation # if isinstance(m, df.Expression) or isinstance(m, df.Constant): # self.m = df.interpolate(m,self.W) # elif isinstance(m,tuple): # self.m = df.interpolate(df.Expression(m, degree=1),self.W) # elif isinstance(m,list): # self.m = df.interpolate(df.Expression(tuple(m, degree=1)),self.W) # else: # self.m = m # Normalize m (should be normalized anyway). # if normalize: # self.m.vector()[:] = helpers.fnormalise(self.m.vector().array()) assert isinstance(Ms, Field) self.Ms = Ms # Initilize the boundary element matrix variable self.bem = None # Objects that are needed frequently for linear solves. self.poisson_matrix = self.build_poisson_matrix() self.laplace_zeros = df.Function(self.V).vector() # 2nd FEM. if parameters: method = parameters["laplace_solver"]["method"] pc = parameters["laplace_solver"]["preconditioner"] else: method, pc = "default", "default" if solver_type is None: solver_type = 'Krylov' solver_type = solver_type.lower() if solver_type == 'lu': self.laplace_solver = df.LUSolver() self.laplace_solver.parameters["reuse_factorization"] = True elif solver_type == 'krylov': self.laplace_solver = df.KrylovSolver(method, pc) # We're setting 'same_nonzero_pattern=True' to enforce the # same matrix sparsity pattern across different demag solves, # which should speed up things. #self.laplace_solver.parameters["preconditioner"][ # "structure"] = "same_nonzero_pattern" else: raise ValueError( "Wrong solver type specified: '{}' (allowed values: 'Krylov', 'LU')" .format(solver_type)) # Objects needed for energy density computation self.nodal_vol = df.assemble(self.v * df.dx).array() self.ED = df.Function(self.V) # Method to calculate the Demag field from the potential self.project_method = project_method if self.project_method == 'magpar': self.__setup_field_magpar() self.__compute_field = self.__compute_field_magpar elif self.project_method == 'project': self.__compute_field = self.__compute_field_project else: raise NotImplementedError("""Only methods currently implemented are * 'magpar', * 'project'""")
def __init__(self, V, inner_product="L2", map_operator=None, inverse="default"): self.V = V if inner_product is not "custom": u = dolfin.TrialFunction(V) v = dolfin.TestFunction(V) if isinstance(V, dolfin.cpp.function.MultiMeshFunctionSpace): default_forms = { "L2": dolfin.inner(u, v) * dolfin.dX, "H0_1": dolfin.inner(dolfin.grad(u), dolfin.grad(v)) * dolfin.dX, "H1": (dolfin.inner(u, v) + dolfin.inner(dolfin.grad(u), dolfin.grad(v))) * dolfin.dX, } else: default_forms = { "L2": dolfin.inner(u, v) * dolfin.dx, "H0_1": dolfin.inner(dolfin.grad(u), dolfin.grad(v)) * dolfin.dx, "H1": (dolfin.inner(u, v) + dolfin.inner(dolfin.grad(u), dolfin.grad(v))) * dolfin.dx, } form = default_forms[inner_product] if hasattr(form.arguments()[0], "_V_multi"): map_operator = dolfin.assemble_multimesh(form) else: map_operator = dolfin.assemble(form) self.map_operator = map_operator if inverse in ("default", "lu"): self.map_solver = dolfin.LUSolver(self.map_operator) elif inverse == "jacobi": self.map_solver = dolfin.PETScKrylovSolver() self.map_solver.set_operator(self.map_operator) self.map_solver.ksp().setType("preonly") self.map_solver.ksp().getPC().setType("jacobi") elif inverse == "sor": self.map_solver = dolfin.PETScKrylovSolver() self.map_solver.set_operator(self.map_operator) self.map_solver.ksp().setType("preonly") self.map_solver.ksp().getPC().setType("sor") elif inverse == "amg": self.map_solver = dolfin.PETScKrylovSolver() self.map_solver.set_operator(self.map_operator) self.map_solver.ksp().setType("preonly") self.map_solver.ksp().getPC().setType("hypre") elif isinstance(inverse, dolfin.GenericMatrix): self.map_solver = dolfin.PETScKrylovSolver() self.map_solver.set_operators(self.map_operator, inverse) self.map_solver.ksp().setType("preonly") self.map_solver.ksp().getPC().setType("mat") else: self.map_solver = inverse self.solver_type = inverse
def setup(self, m, Ms, unit_length=1): """ Setup the FKDemag instance. Usually called automatically by the Simulation object. *Arguments* m: finmag.Field The unit magnetisation on a finite element space. Ms: float The saturation magnetisation in A/m. unit_length: float The length (in m) represented by one unit on the mesh. Default 1. """ assert isinstance(m, Field) assert isinstance(Ms, Field) self.m = m self.Ms = Ms self.unit_length = unit_length self.S1 = df.FunctionSpace(self.m.mesh(), "Lagrange", 1) self._test1 = df.TestFunction(self.S1) self._trial1 = df.TrialFunction(self.S1) self._test3 = df.TestFunction(self.m.functionspace) self._trial3 = df.TrialFunction(self.m.functionspace) # for computation of energy self._nodal_volumes = nodal_volume(self.S1, unit_length) self._H_func = df.Function(m.functionspace) # we will copy field into # this when we need the # energy self._E_integrand = -0.5 * mu0 * \ df.dot(self._H_func, self.m.f * self.Ms.f) self._E = self._E_integrand * df.dx self._nodal_E = df.dot(self._E_integrand, self._test1) * df.dx self._nodal_E_func = df.Function(self.S1) # for computation of field and scalar magnetic potential self._poisson_matrix = self._poisson_matrix() self._laplace_zeros = df.Function(self.S1).vector() # determine the solver type to be used (Krylov or LU); if the kwarg # 'solver_type' is not provided, try to read the setting from the # .finmagrc file; use 'Krylov' if this fails. solver_type = self.solver_type if solver_type is None: solver_type = configuration.get_config_option( 'demag', 'solver_type', 'Krylov') if solver_type == 'None': # if the user set 'solver_type = None' in # the .finmagrc file, solver_type will be a # string so we need to catch this here. solver_type = 'Krylov' logger.debug("Using {} solver for demag.".format(solver_type)) if solver_type == 'Krylov': self._poisson_solver = df.KrylovSolver( self._poisson_matrix.copy(), self.parameters['phi_1_solver'], self.parameters['phi_1_preconditioner']) self._poisson_solver.parameters.update(self.parameters['phi_1']) self._laplace_solver = df.KrylovSolver( self.parameters['phi_2_solver'], self.parameters['phi_2_preconditioner']) self._laplace_solver.parameters.update(self.parameters['phi_2']) # We're setting 'same_nonzero_pattern=True' to enforce the # same matrix sparsity pattern across different demag solves, # which should speed up things. #self._laplace_solver.parameters["preconditioner"][ # "structure"] = "same_nonzero_pattern" elif solver_type == 'LU': self._poisson_solver = df.LUSolver(self._poisson_matrix.copy()) self._laplace_solver = df.LUSolver() self._poisson_solver.parameters["reuse_factorization"] = True self._laplace_solver.parameters["reuse_factorization"] = True else: raise ValueError( "Argument 'solver_type' must be either 'Krylov' or 'LU'. " "Got: '{}'".format(solver_type)) with fk_timer('compute BEM'): if not hasattr(self, "_bem"): if self.macrogeometry is not None: Ts = self.macrogeometry.compute_Ts(self.m.mesh()) pbc = BMatrixPBC(self.m.mesh(), Ts) self._b2g_map = np.array(pbc.b2g_map, dtype=np.int) self._bem = pbc.bm else: self._bem, self._b2g_map = compute_bem_fk( df.BoundaryMesh(self.m.mesh(), 'exterior', False)) logger.debug( "Boundary element matrix uses {:.2f} MB of memory.".format( self._bem.nbytes / 1024.**2)) # solution of inhomogeneous Neumann problem self._phi_1 = df.Function(self.S1) # solution of Laplace equation inside domain self._phi_2 = df.Function(self.S1) self._phi = df.Function(self.S1) # magnetic potential phi_1 + phi_2 # To be applied to the vector field m as first step of computation of # _phi_1. This gives us div(M), which is equal to Laplace(_phi_1), # equation which is then solved using _poisson_solver. self._Ms_times_divergence = df.assemble( self.Ms.f * df.inner(self._trial3, df.grad(self._test1)) * df.dx) # we move the boundary condition here to avoid create a instance each # time when compute the magnetic potential self.boundary_condition = df.DirichletBC(self.S1, self._phi_2, df.DomainBoundary()) self.boundary_condition.apply(self._poisson_matrix) self._setup_gradient_computation()
def run_model(function_space, kappa, forcing, init_condition, dt, final_time, boundary_conditions=None, second_order_timestepping=False, exact_sol=None, velocity=None, point_sources=None, intermediate_times=None): """ Use implicit euler to solve transient advection diffusion equation du/dt = grad (k* grad u) - vel*grad u + f WARNINGarningW: when point sources solution changes significantly when mesh is varied """ mesh = function_space.mesh() time_independent_boundaries = False if boundary_conditions == None: bndry_obj = dl.CompiledSubDomain("on_boundary") boundary_conditions = [['dirichlet', bndry_obj, dl.Constant(0)]] time_independent_boundaries = True num_bndrys = len(boundary_conditions) boundaries = mark_boundaries(mesh, boundary_conditions) dirichlet_bcs = collect_dirichlet_boundaries(function_space, boundary_conditions, boundaries) # To express integrals over the boundary parts using ds(i), we must first # redefine the measure ds in terms of our boundary markers: ds = dl.Measure('ds', domain=mesh, subdomain_data=boundaries) dx = dl.Measure('dx', domain=mesh) # Variational problem at each time u = dl.TrialFunction(function_space) v = dl.TestFunction(function_space) # Previous solution if hasattr(init_condition, 't'): assert init_condition.t == 0 u_1 = dl.interpolate(init_condition, function_space) if not second_order_timestepping: theta = 1 else: theta = 0.5 if hasattr(forcing, 't'): forcing_1 = copy_expression(forcing) else: forcing_1 = forcing def steady_state_form(u, v, f): F = kappa * dl.inner(dl.grad(u), dl.grad(v)) * dx F -= f * v * dx if velocity is not None: F += dl.dot(velocity, dl.grad(u)) * v * dx return F F = u*v*dx-u_1*v*dx + dt*theta*steady_state_form(u,v,forcing) + \ dt*(1.-theta)*steady_state_form(u_1,v,forcing_1) a, L = dl.lhs(F), dl.rhs(F) # a = u*v*dx + theta*dt*kappa*dl.inner(dl.grad(u), dl.grad(v))*dx # L = (u_1 + dt*theta*forcing)*v*dx # if velocity is not None: # a += theta*dt*v*dl.dot(velocity,dl.grad(u))*dx # if second_order_timestepping: # L -= (1-theta)*dt*dl.inner(kappa*dl.grad(u_1), dl.grad(v))*dx # L += (1-theta)*dt*forcing_1*v*dx # if velocity is not None: # L -= (1-theta)*dt*(v*dl.dot(velocity,dl.grad(u_1)))*dx beta_1_list = [] alpha_1_list = [] for ii in range(num_bndrys): if (boundary_conditions[ii][0] == 'robin'): alpha = boundary_conditions[ii][3] a += theta * dt * alpha * u * v * ds(ii) if second_order_timestepping: if hasattr(alpha, 't'): alpha_1 = copy_expression(alpha) alpha_1_list.append(alpha_1) else: alpha_1 = alpha L -= (1 - theta) * dt * alpha_1 * u_1 * v * ds(ii) if ((boundary_conditions[ii][0] == 'robin') or (boundary_conditions[ii][0] == 'neumann')): beta = boundary_conditions[ii][2] L -= theta * dt * beta * v * ds(ii) if second_order_timestepping: if hasattr(beta, 't'): beta_1 = copy_expression(beta) beta_1_list.append(beta_1) else: # boundary condition is constant in time beta_1 = beta L -= (1 - theta) * dt * beta_1 * v * ds(ii) if time_independent_boundaries: # TODO this can be used if dirichlet and robin conditions are not # time dependent. A = dl.assemble(a) for bc in dirichlet_bcs: bc.apply(A) solver = dl.LUSolver(A) #solver.parameters["reuse_factorization"] = True else: solver = None u_2 = dl.Function(function_space) u_2.assign(u_1) t = 0.0 dt_tol = 1e-12 n_time_steps = 0 if intermediate_times is not None: intermediate_u = [] intermediate_cnt = 0 # assert in chronological order assert np.allclose(intermediate_times, np.array(intermediate_times)) assert np.all(intermediate_times < final_time) while t < final_time - dt_tol: # Update current time t += dt forcing.t = t forcing_1.t = t - dt # set current time for time varying boundary conditions for ii in range(num_bndrys): if hasattr(boundary_conditions[ii][2], 't'): boundary_conditions[ii][2].t = t # set previous time for time varying boundary conditions when # using second order timestepping. lists will be empty if using # first order timestepping for jj in range(len(beta_1_list)): beta_1_list[jj].t = t - dt for jj in range(len(alpha_1_list)): alpha_1_list[jj].t = t - dt #A, b = dl.assemble_system(a, L, dirichlet_bcs) #for bc in dirichlet_bcs: # bc.apply(A,b) if boundary_conditions is not None: A = dl.assemble(a) for bc in dirichlet_bcs: bc.apply(A) b = dl.assemble(L) for bc in dirichlet_bcs: bc.apply(b) if point_sources is not None: ps_list = [] for ii in range(len(point_sources)): point, expr = point_sources[ii] ps_list.append((dl.Point(point[0], point[1]), expr(t))) ps = dl.PointSource(function_space, ps_list) ps.apply(b) if solver is None: dl.solve(A, u_2.vector(), b) else: solver.solve(u_2.vector(), b) #print ("t =", t, "end t=", final_time) # Update previous solution u_1.assign(u_2) # import matplotlib.pyplot as plt # plt.subplot(131) # pp=dl.plot(u_1) # plt.subplot(132) # dl.plot(forcing,mesh=mesh) # plt.subplot(133) # dl.plot(forcing_1,mesh=mesh) # plt.colorbar(pp) # plt.show() # compute error if exact_sol is not None: exact_sol.t = t error = dl.errornorm(exact_sol, u_2) print('t = %.2f: error = %.3g' % (t, error)) #dl.plot(exact_sol,mesh=mesh) #plt.show() t = min(t, final_time) if (intermediate_times is not None and intermediate_cnt < intermediate_times.shape[0] and t >= intermediate_times[intermediate_cnt]): #save solution closest to intermediate time u_t = dl.Function(function_space) u_t.assign(u_2) intermediate_u.append(u_t) intermediate_cnt += 1 n_time_steps += 1 #print ("t =", t, "end t=", final_time,"# time steps", n_time_steps) if intermediate_times is None: return u_2 else: return intermediate_u + [u_2]
+ time_step * b[2] * c_operator(v00, T00, del_T) lhs = lhs_momentum + lhs_energy #=============================================================================== # full problem lhs = lhs_momentum + lhs_energy rhs = rhs_momentum + rhs_energy if not use_assembler_method: # linear problem problem = dlfn.LinearVariationalProblem(lhs, rhs, sol, bcs=bcs) solver = dlfn.LinearVariationalSolver(problem) else: # system assembler assembler = dlfn.SystemAssembler(lhs, rhs, bcs=bcs) system_matrix = dlfn.Matrix() system_rhs = dlfn.Vector() solver = dlfn.LUSolver(system_matrix) #=============================================================================== def solve(step, time_step_ratio, update_coefficients, use_assembler_method): assert isinstance(step, int) and step >= 0 assert isinstance(time_step_ratio, float) and time_step_ratio > 0 assert isinstance(update_coefficients, bool) \ and isinstance(use_assembler_method, bool) if step == 0: # assign omega for initial Euler step omega.assign(0.) assert isinstance(omega, dlfn.Constant) # assemble system matrix / rhs if use_assembler_method: print " Assembling system (initial step)..." assembler.assemble(system_matrix, system_rhs) # re-compute LU decomposition
def test_stokes_shear(nu_interp, postprocessor): #set_log_level(WARNING) basename = postprocessor.basename label = "{}_{}".format(basename, nu_interp) c = postprocessor.get_coefficients() cc = wrap_coeffs_as_constants(c) nu = eval("nu_" + nu_interp) # choose viscosity interpolation for level in range(3, 4): c[r"\eps"] = postprocessor.eps * 0.5**(level - 3) cc[r"\eps"].assign(c[r"\eps"]) mesh, boundary_markers, pinpoint, periodic_bnd = create_domain( level, "crossed") del periodic_bnd k = 1 W = create_mixed_space(mesh, k) bcs = create_bcs(W, boundary_markers, pinpoint) df.info("\n... mesh created!") df.info("h = {}".format(mesh.hmin())) df.info("h_safe = {}".format(0.229 * c[r"\eps"] * k)) phi = create_fixed_vfract(mesh, c) p_h = create_hydrostatic_pressure(mesh, cc) # Create forms a, L = create_forms(W, rho(phi, cc), nu(phi, cc), c[r"F"], c[r"g_a"], p_h, boundary_markers) # Solve problem w = df.Function(W) A, b = df.assemble_system(a, L, bcs) solver = df.LUSolver() solver.set_operator(A) solver.solve(w.vector(), b) # Pre-process results v, p = w.split(True) v.rename("v", "velocity") p.rename("p", "pressure") V_dv = df.FunctionSpace(mesh, "DG", W.sub(0).ufl_element().degree() - 1) div_v = df.project(df.div(v), V_dv) div_v.rename("div_v", "velocity-divergence") D_12 = df.project(0.5 * v.sub(0).dx(1), V_dv) if nu_interp[:2] == "PW": V_nu = df.FunctionSpace(mesh, "DG", phi.ufl_element().degree()) else: V_nu = phi.function_space() nu_0 = df.project(nu(phi, cc), V_nu) T_12 = df.project(nu(phi, cc) * v.sub(0).dx(1), V_nu) #p_ref = df.project(p_h, df.FunctionSpace(mesh, W.sub(1).ufl_element())) # Save results make_cut = postprocessor._make_cut rs = dict(level=level, r_dens=c[r"r_dens"], r_visc=c[r"r_visc"], nu_interp=nu_interp) rs[r"$v_1$"] = make_cut(v.sub(0)) rs[r"$p$"] = make_cut(p) rs[r"$\phi$"] = make_cut(phi) rs[r"$D_{12}$"] = make_cut(D_12) rs[r"$T_{12}$"] = make_cut(T_12) rs[r"$\nu$"] = make_cut(nu_0) print(label, level) # Send to posprocessor comm = mesh.mpi_comm() rank = df.MPI.rank(comm) postprocessor.add_result(rank, rs) # Plot results obtained in the last round outdir = os.path.join(postprocessor.outdir, "XDMFoutput") with df.XDMFFile(os.path.join(outdir, "v.xdmf")) as file: file.write(v, 0.0) with df.XDMFFile(os.path.join(outdir, "p.xdmf")) as file: file.write(p, 0.0) with df.XDMFFile(os.path.join(outdir, "phi.xdmf")) as file: file.write(phi, 0.0) with df.XDMFFile(os.path.join(outdir, "div_v.xdmf")) as file: file.write(div_v, 0.0) # Save results into a binary file filename = "results_{}.pickle".format(label) postprocessor.save_results(filename) # Flush plots as we now have data for all level values postprocessor.flush_plots() # Cleanup df.set_log_level(df.INFO) gc.collect()
def __init__(self, CGdeg, regularizationtype, h=1.0, parameters=[], image='image.dat'): class Image(dl.Expression): def __init__(self, Lx, Ly, data): self.data = data self.hx = Lx / float(self.data.shape[1] - 1) self.hy = Ly / float(self.data.shape[0] - 1) def eval(self, values, x): j = math.floor(x[0] / self.hx) i = math.floor(x[1] / self.hy) values[0] = self.data[i, j] data = np.loadtxt(image, delimiter=',') #Lx, Ly = float(data.shape[1])/float(data.shape[0]), 1. Lx, Ly = 2., 1. scaling = 100. * h # =1.0 => h~0.01 Lx, Ly = scaling * Lx, scaling * Ly np.random.seed(seed=1) noise_std_dev = 0.3 noise = noise_std_dev * np.random.randn(data.shape[0], data.shape[1]) print '||noise||={}'.format(np.linalg.norm(noise)) mesh = dl.RectangleMesh(dl.Point(0, 0), dl.Point(Lx, Ly), 200, 100) mcoord = mesh.coordinates() print 'hx={}, hy={}'.format((mcoord[-1][0] - mcoord[0][0]) / 200., (mcoord[-1][1] - mcoord[0][1]) / 100.) V = dl.FunctionSpace(mesh, 'Lagrange', CGdeg) trueImage = Image(Lx, Ly, data) noisyImage = Image(Lx, Ly, data + noise) print 'min(data)={}, max(data)={}'.format(np.amin(data), np.amax(data)) print 'min(data+noise)={}, max(data+noise)={}'.format( np.amin(data + noise), np.amax(data + noise)) self.u_true = dl.interpolate(trueImage, V) self.u_0 = dl.interpolate(noisyImage, V) self.u = dl.Function(V) self.ucopy = dl.Function(V) self.G = dl.Function(V) self.du = dl.Function(V) u_test = dl.TestFunction(V) u_trial = dl.TrialFunction(V) Mweak = dl.inner(u_test, u_trial) * dl.dx self.M = dl.assemble(Mweak) self.solverM = dl.LUSolver('petsc') self.solverM.parameters['symmetric'] = True self.solverM.parameters['reuse_factorization'] = True self.solverM.set_operator(self.M) self.regul = regularizationtype if self.regul == 'tikhonov': self.Regul = LaplacianPrior({'Vm': V, 'gamma': 1.0, 'beta': 0.0}) elif self.regul == 'TV': paramTV = {'Vm': V, 'k': 1.0, 'eps': 1e-4, 'GNhessian': True} paramTV.update(parameters) self.Regul = TV(paramTV) self.inexact = False elif self.regul == 'TVPD': paramTV = {'Vm': V, 'k': 1.0, 'eps': 1e-4, 'exact': False} paramTV.update(parameters) self.Regul = TVPD(paramTV) self.inexact = False self.alpha = 1.0 self.Hess = self.M self.parametersLS = {'alpha0':1.0, 'rho':0.5, 'c':5e-5, \ 'max_backtrack':12, 'cgtol':0.5, 'maxiter':50000} filename, ext = os.path.splitext(sys.argv[0]) #if os.path.isdir(filename + '/'): shutil.rmtree(filename + '/') self.myplot = PlotFenics(filename) try: solver = PETScKrylovSolver('cg', 'ml_amg') self.precond = 'ml_amg' except: print '*** WARNING: ML not installed -- using petsc_amg instead' self.precond = 'petsc_amg'
u_D = dolfin.Expression(('x[1]', 'x[0]'), degree=1) def boundary(x, on_boundary): return on_boundary bc1 = dolfin.DirichletBC(V, u_D, boundary) bc = [bc1] A, p = dolfin.assemble_system(bilinear_form, linear_form, bc) usol = dolfin.Function(V) us_vector = usol.vector() solver = dolfin.LUSolver("mumps") solver.set_operator(A) solver.solve(us_vector, p) plt.colorbar(dolfin.plot(usol, mode='displacement', title=r"$\Vert u \Vert$", wireframe=True), orientation="vertical") plt.show() """Compute the Solution to the elasticity problem""" eps_sol = dolfin.sym(dolfin.grad(usol)) P1_tens = dolfin.TensorFunctionSpace(mesh, 'DG', 0) strain_sol = dolfin.project(eps_sol, P1_tens) dolfin.plot(strain_sol[0, 0]) plt.title(r"$\varepsilon_{xx}$ - DG0 projection")
def test_stokes_noflow(gamma, Re, nu_interp, postprocessor): #set_log_level(WARNING) basename = postprocessor.basename label = "{}_{}_gamma_{}_Re_{:.0e}".format(basename, nu_interp, gamma, Re) c = postprocessor.get_coefficients() c[r"\nu_1"] = c[r"\rho_1"] / Re c[r"\nu_2"] = c[r"r_visc"] * c[r"\nu_1"] c[r"\nu_1"] /= c[r"\rho_0"] * c[r"V_0"] * c[r"L_0"] c[r"\nu_2"] /= c[r"\rho_0"] * c[r"V_0"] * c[r"L_0"] cc = wrap_coeffs_as_constants(c) nu = eval("nu_" + nu_interp) # choose viscosity interpolation for level in range(1, 4): mesh, boundary_markers, pinpoint, periodic_bnd = create_domain(level) periodic_bnd = None W = create_mixed_space(mesh, periodic_boundary=periodic_bnd) bcs = create_bcs(W, boundary_markers, periodic_boundary=periodic_bnd, pinpoint=pinpoint) phi = create_fixed_vfract(mesh, c) # Create forms a, L = create_forms(W, rho(phi, cc), nu(phi, cc), c[r"g_a"], boundary_markers, gamma) # Solve problem w = df.Function(W) A, b = df.assemble_system(a, L, bcs) solver = df.LUSolver("mumps") df.PETScOptions.set("fieldsplit_u_mat_mumps_icntl_14", 500) solver.set_operator(A) try: solver.solve(w.vector(), b) except: df.warning("Ooops! Something went wrong: {}".format( sys.exc_info()[0])) continue # Pre-process results v, p = w.split(True) v.rename("v", "velocity") p.rename("p", "pressure") V_dv = df.FunctionSpace(mesh, "DG", W.sub(0).ufl_element().degree() - 1) div_v = df.project(df.div(v), V_dv) div_v.rename("div_v", "velocity-divergence") D_22 = df.project(v.sub(1).dx(1), V_dv) p_h = create_hydrostatic_pressure(mesh, cc) #p_ref = df.project(p_h, W.sub(1).ufl_element()) p_ref = df.project( p_h, df.FunctionSpace(mesh, df.FiniteElement("CG", mesh.ufl_cell(), 4))) v_errL2, v_errH10, div_errL2, p_errL2 = compute_errornorms( v, div_v, p, p_ref) if nu_interp[:2] == "PW": V_nu = df.FunctionSpace(mesh, "DG", phi.ufl_element().degree()) else: V_nu = phi.function_space() nu_0 = df.project(nu(phi, cc), V_nu) T_22 = df.project(2.0 * nu(phi, cc) * v.sub(1).dx(1), V_nu) # Save results make_cut = postprocessor._make_cut rs = dict(ndofs=W.dim(), level=level, h=mesh.hmin(), r_dens=c[r"r_dens"], r_visc=c[r"r_visc"], gamma=gamma, Re=Re, nu_interp=nu_interp) rs[r"$v_2$"] = make_cut(v.sub(1)) rs[r"$p$"] = make_cut(p) rs[r"$\phi$"] = make_cut(phi) rs[r"$D_{22}$"] = make_cut(D_22) rs[r"$T_{22}$"] = make_cut(T_22) rs[r"$\nu$"] = make_cut(nu_0) rs[r"$||\mathbf{v} - \mathbf{v}_h||_{L^2}$"] = v_errL2 rs[r"$||\nabla (\mathbf{v} - \mathbf{v}_h)||_{L^2}$"] = v_errH10 rs[r"$||\mathrm{div} \mathbf{v}_h||_{L^2}$"] = div_errL2 rs[r"$||\mathbf{p} - \mathbf{p}_h||_{L^2}$"] = p_errL2 print(label, level) # Send to posprocessor comm = mesh.mpi_comm() rank = df.MPI.rank(comm) postprocessor.add_result(rank, rs) # Plot results obtained in the last round outdir = os.path.join(postprocessor.outdir, "XDMFoutput") with df.XDMFFile(os.path.join(outdir, "v.xdmf")) as file: file.write(v, 0.0) with df.XDMFFile(os.path.join(outdir, "p.xdmf")) as file: file.write(p, 0.0) with df.XDMFFile(os.path.join(outdir, "phi.xdmf")) as file: file.write(phi, 0.0) with df.XDMFFile(os.path.join(outdir, "div_v.xdmf")) as file: file.write(div_v, 0.0) # Save results into a binary file filename = "results_{}.pickle".format(label) postprocessor.save_results(filename) # Flush plots as we now have data for all level values postprocessor.pop_items(["level", "h"]) postprocessor.flush_plots() # Cleanup df.set_log_level(df.INFO) gc.collect()
def mass_matrix_LUSolver(me): if me._mass_matrix_LUSolver is None: me._mass_matrix_LUSolver = dl.LUSolver(me.mass_matrix_petsc) return me._mass_matrix_LUSolver
def test_bending(self): # return LX = 6.0 LY = 0.5 LZ = 0.5 mesh_resolution = 6.0 def loading(t): level = 4 * LZ N = 1.5 return level * np.sin(N * t * 2 * np.pi) # show_loading(loading) # if you really insist on it :P prm = c.Parameters(c.Constraint.PLANE_STRESS) prm.E = 1000.0 prm.Et = prm.E / 100.0 prm.sig0 = 12.0 prm.H = 15.0 * prm.E * prm.Et / (prm.E - prm.Et) prm.nu = 0.3 prm.deg_d = 3 prm.deg_q = 4 law = None # for now, plays no role. mesh = df.RectangleMesh( df.Point(0, 0), df.Point(LX, LY), int(LX * mesh_resolution), int(LY * mesh_resolution), ) yf = Yield_VM(prm.sig0, prm.constraint, H=prm.H) ri = c.RateIndependentHistory() # mat = PlasticConsitutivePerfect(prm.E, prm.nu, prm.constraint, yf=yf) mat = PlasticConsitutiveRateIndependentHistory(prm.E, prm.nu, prm.constraint, yf=yf, ri=ri) plasticity = Plasticity(mat) problem = c.MechanicsProblem(mesh, prm, law=law, iploop=plasticity) left = boundary.plane_at(0.0) right_top = boundary.point_at((LX, LY)) bc_expr = df.Expression("u", degree=0, u=0) bcs = [] bcs.append( df.DirichletBC(problem.Vd.sub(1), bc_expr, right_top, method="pointwise")) bcs.append(df.DirichletBC(problem.Vd, (0, 0), left)) problem.set_bcs(bcs) linear_solver = df.LUSolver("mumps") solver = df.NewtonSolver(df.MPI.comm_world, linear_solver, df.PETScFactory.instance()) solver.parameters["linear_solver"] = "mumps" solver.parameters["maximum_iterations"] = 10 solver.parameters["error_on_nonconvergence"] = False def solve(t, dt): print(t, dt) bc_expr.u = loading(t) return solver.solve(problem, problem.u.vector()) ld = c.helper.LoadDisplacementCurve(bcs[0]) ld.show() if not ld.is_root: set_log_level(LogLevel.ERROR) fff = df.XDMFFile("output.xdmf") fff.parameters["functions_share_mesh"] = True fff.parameters["flush_output"] = True def pp(t): problem.update() # this fixes XDMF time stamps import locale locale.setlocale(locale.LC_NUMERIC, "en_US.UTF-8") fff.write(problem.u, t) ld(t, df.assemble(problem.R)) TimeStepper(solve, pp, problem.u).dt_max(0.02).adaptive(1.0, dt=0.01)
def _setup_imex_problem(self): assert hasattr(self, "_parameters") assert hasattr(self, "_mesh") assert hasattr(self, "_Wh") assert hasattr(self, "_coefficients") assert hasattr(self, "_one") assert hasattr(self, "_omega") assert hasattr(self, "_v0") assert hasattr(self, "_v00") assert hasattr(self, "_T0") assert hasattr(self, "_T00") print " setup explicit imex problem..." #======================================================================= # retrieve imex coefficients a, b, c = self._imex_alpha, self._imex_beta, self._imex_gamma #======================================================================= # trial and test function (del_v, del_p, del_T) = dlfn.TestFunctions(self._Wh) (dv, dp, dT) = dlfn.TrialFunctions(self._Wh) # volume element dV = dlfn.Measure("dx", domain=self._mesh) # reference to time step timestep = self._timestep #======================================================================= from dolfin import dot, grad, inner # 1) lhs momentum equation lhs_momentum = a[0] / timestep * dot(dv, del_v) * dV \ + c[0] * self._coefficients[1] * a_op(dv, del_v) * dV\ - b_op(del_v, dp) * dV\ - b_op(dv, del_p) * dV # 2a) rhs momentum equation: time derivative rhs_momentum = -dot( a[1] / timestep * self._v0 + a[2] / timestep * self._v00, del_v) * dV # 2b) rhs momentum equation: nonlinear term nonlinear_term_velocity = b[0] * dot(grad(self._v0), self._v0) \ + b[1] * dot(grad(self._v00), self._v00) rhs_momentum -= dot(nonlinear_term_velocity, del_v) * dV # 2c) rhs momentum equation: linear term rhs_momentum -= self._coefficients[1] * inner( c[1] * grad(self._v0) + c[2] * grad(self._v00), grad(del_v)) * dV # 2d) rhs momentum equation: coriolis term if self._parameters.rotation is True: assert self._coefficients[0] != 0.0 print " adding rotation to the model..." # defining extrapolated velocity extrapolated_velocity = (self._one + self._omega) * self._v0 \ - self._omega * self._v00 # set Coriolis term if self._space_dim == 2: rhs_momentum -= self._coefficients[0] * ( -extrapolated_velocity[1] * del_v[0] + extrapolated_velocity[0] * del_v[1]) * dV elif self._space_dim == 3: from dolfin import cross coriolis_term = cross(self._rotation_vector, extrapolated_velocity) rhs_momentum -= self._coefficients[0] * dot( coriolis_term, del_v) * dV print " adding rotation to the model..." # 2e) rhs momentum equation: buoyancy term if self._parameters.buoyancy is True: assert self._coefficients[2] != 0.0 # defining extrapolated temperature extrapolated_temperature = ( self._one + self._omega) * self._T0 - self._omega * self._T00 # buoyancy term print " adding buoyancy to the model..." rhs_momentum -= self._coefficients[ 2] * extrapolated_temperature * dot(self._gravity, del_v) * dV #======================================================================= # 3) lhs energy equation lhs_energy = a[0] / timestep * dot(dT, del_T) * dV \ + self._coefficients[3] * a_op(dT, del_T) * dV # 4a) rhs energy equation: time derivative rhs_energy = -dot( a[1] / timestep * self._T0 + a[2] / timestep * self._T00, del_T) * dV # 4b) rhs energy equation: nonlinear term nonlinear_term_temperature = b[0] * dot(self._v0, grad(self._T0)) \ + b[1] * dot(self._v00, grad(self._T00)) rhs_energy -= nonlinear_term_temperature * del_T * dV # 4c) rhs energy equation: linear term rhs_energy -= self._coefficients[3] \ * dot(c[1] * grad(self._T0) + c[2] * grad(self._T00), grad(del_T)) * dV #======================================================================= # full problem self._lhs = lhs_momentum + lhs_energy self._rhs = rhs_momentum + rhs_energy if not hasattr(self, "_dirichlet_bcs"): self._setup_boundary_conditions() if self._parameters.use_assembler_method: # system assembler self._assembler = dlfn.SystemAssembler(self._lhs, self._rhs, bcs=self._dirichlet_bcs) self._system_matrix = dlfn.Matrix() self._system_rhs = dlfn.Vector() self._solver = dlfn.LUSolver(self._system_matrix) else: # linear problem problem = dlfn.LinearVariationalProblem(self._lhs, self._rhs, self._sol, bcs=self._dirichlet_bcs) self._solver = dlfn.LinearVariationalSolver(problem)