# Create CG Krylov solver and turn convergence monitoring on solver = PETSc.KSP().create(MPI.comm_world) solver.setFromOptions() # Set matrix operator solver.setOperators(A) # Compute solution solver.setMonitor(lambda ksp, its, rnorm: print( "Iteration: {}, rel. residual: {}".format(its, rnorm))) solver.solve(b, u.vector) solver.view() # Save solution to XDMF format file = XDMFFile(MPI.comm_world, "elasticity.xdmf") file.write(u) unorm = u.vector.norm() if MPI.rank(mesh.mpi_comm()) == 0: print("Solution vector norm:", unorm) # Save colored mesh partitions in VTK format if running in parallel # if MPI.size(mesh.mpi_comm()) > 1: # File("partitions.pvd") << MeshFunction("size_t", mesh, mesh.topology.dim, \ # MPI.rank(mesh.mpi_comm())) # Project and write stress field to post-processing file # W = TensorFunctionSpace(mesh, "Discontinuous Lagrange", 0) # stress = project(sigma(u), V=W) # File("stress.pvd") << stress
# Output file file = XDMFFile(MPI.comm_world, "output.xdmf") # Step in time t = 0.0 # Check if we are running on CI server and reduce run time if "CI" in os.environ.keys(): T = 3 * dt else: T = 50 * dt u.vector.copy(result=u0.vector) u0.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) while (t < T): t += dt r = solver.solve(problem, u.vector) print("Step, num iterations:", int(t / dt), r[0]) u.vector.copy(result=u0.vector) file.write(u.sub(0), t) # Within the time stepping loop, the nonlinear problem is solved by # calling :py:func:`solver.solve(problem,u.vector)<dolfinx.cpp.NewtonSolver.solve>`, # with the new solution vector returned in :py:func:`u.vector<dolfinx.cpp.Function.vector>`. # The solution vector associated with ``u`` is copied to ``u0`` at the # end of each time step, and the ``c`` component of the solution # (the first component of ``u``) is then written to file.