def gmres(atol=0.0, rtol=0.0, max_num_iter=5, kdim=50, print_level=-1, preconditioner=None, **kwargs): prc = kwargs.pop('prc') blockname = kwargs.pop('blockname') if use_parallel: gmres = mfem.GMRESSolver(MPI.COMM_WORLD) else: gmres = mfem.GMRESSolver() gmres.iterative_mode = False gmres.SetRelTol(rtol) gmres.SetAbsTol(atol) gmres.SetMaxIter(max_num_iter) gmres.SetPrintLevel(print_level) gmres.SetKDim(kdim) r0 = prc.get_row_by_name(blockname) c0 = prc.get_col_by_name(blockname) A0 = prc.get_operator_block(r0, c0) gmres.SetOperator(A0) if preconditioner is not None: gmres.SetPreconditioner(preconditioner) # keep this object from being freed... gmres._prc = preconditioner return gmres
def __init__(self, spaces, ess_bdr, block_offsets, rel_tol, abs_tol, iter, mu): # Array<Vector *> -> tuple super(RubberOperator, self).__init__(spaces[0].TrueVSize() + spaces[1].TrueVSize()) rhs = (None, None) self.spaces = spaces self.mu = mfem.ConstantCoefficient(mu) self.block_offsets = block_offsets Hform = mfem.ParBlockNonlinearForm(spaces) Hform.AddDomainIntegrator( mfem.IncompressibleNeoHookeanIntegrator(self.mu)) Hform.SetEssentialBC(ess_bdr, rhs) self.Hform = Hform a = mfem.ParBilinearForm(self.spaces[1]) one = mfem.ConstantCoefficient(1.0) mass = mfem.OperatorHandle(mfem.Operator.Hypre_ParCSR) a.AddDomainIntegrator(mfem.MassIntegrator(one)) a.Assemble() a.Finalize() a.ParallelAssemble(mass) mass.SetOperatorOwner(False) pressure_mass = mass.Ptr() self.j_prec = JacobianPreconditioner(spaces, pressure_mass, block_offsets) j_gmres = mfem.GMRESSolver(MPI.COMM_WORLD) j_gmres.iterative_mode = False j_gmres.SetRelTol(1e-12) j_gmres.SetAbsTol(1e-12) j_gmres.SetMaxIter(300) j_gmres.SetPrintLevel(0) j_gmres.SetPreconditioner(self.j_prec) self.j_solver = j_gmres newton_solver = mfem.NewtonSolver(MPI.COMM_WORLD) # Set the newton solve parameters newton_solver.iterative_mode = True newton_solver.SetSolver(self.j_solver) newton_solver.SetOperator(self) newton_solver.SetPrintLevel(1) newton_solver.SetRelTol(rel_tol) newton_solver.SetAbsTol(abs_tol) newton_solver.SetMaxIter(iter) self.newton_solver = newton_solver
def SetOperator(self, op): self.jacobian = mfem.Opr2BlockOpr(op) if (self.stiff_prec == None): # Initialize the stiffness preconditioner and solver stiff_prec_amg = mfem.HypreBoomerAMG() stiff_prec_amg.SetPrintLevel(0) stiff_prec_amg.SetElasticityOptions(self.spaces[0]) self.stiff_prec = stiff_prec_amg stiff_pcg_iter = mfem.GMRESSolver(MPI.COMM_WORLD) stiff_pcg_iter.SetRelTol(1e-8) stiff_pcg_iter.SetAbsTol(1e-8) stiff_pcg_iter.SetMaxIter(200) stiff_pcg_iter.SetPrintLevel(0) stiff_pcg_iter.SetPreconditioner(self.stiff_prec) stiff_pcg_iter.iterative_mode = False self.stiff_pcg = stiff_pcg_iter # At each Newton cycle, compute the new stiffness preconditioner by updating # the iterative solver which, in turn, updates its preconditioner self.stiff_pcg.SetOperator(self.jacobian.GetBlock(0, 0))
del a del b # 11. Depending on the symmetry of A, define and apply a parallel PCG or # GMRES solver for AX=B using the BoomerAMG preconditioner from hypre. amg = mfem.HypreBoomerAMG(A) if sigma == -1.0: pcg = mfem.HyprePCG(A) pcg.SetTol(1e-12) pcg.SetMaxIter(200) pcg.SetPrintLevel(2) pcg.SetPreconditioner(amg) pcg.Mult(B, X) else: gmres = mfem.GMRESSolver(MPI.COMM_WORLD) gmres.SetAbsTol(0.0) gmres.SetRelTol(1e-12) gmres.SetMaxIter(200) gmres.SetKDim(10) gmres.SetPrintLevel(1) gmres.SetOperator(A) gmres.SetPreconditioner(amg) gmres.Mult(B, X) del amg # 12. Extract the parallel grid function corresponding to the finite element # approximation X. This is the local solution on each processor. x.Assign(X) # 13. Save the refined mesh and the solution in parallel. This output can
# 11. Define a simple symmetric Gauss-Seidel preconditioner and use it to # solve the system Ax=b with PCG for the symmetric formulation, or GMRES # for the non-symmetric. rtol = 1e-6 amg = mfem.HypreBoomerAMG(A) if (amg_elast): amg.SetElasticityOptions(fespace) else: amg.SetSystemsOptions(dim) if (alpha == -1.0): solver = mfem.CGSolver(A.GetComm()) else: solver = mfem.GMRESSolver(A.GetComm()) solver.SetKDim(50) solver.SetRelTol(rtol) solver.SetMaxIter(500) solver.SetPrintLevel(1) solver.SetOperator(A) solver.SetPreconditioner(amg) solver.Mult(B, X) # 12. Recover the solution as a finite element grid function 'x'. a.RecoverFEMSolution(X, b, x) # 13. Use the DG solution space as the mesh nodal space. This allows us to # save the displaced mesh as a curved DG mesh. pmesh.SetNodalFESpace(fespace)
def solve_serial(self, A, b, x=None): def get_block(Op, i, j): try: return Op._linked_op[(i, j)] except KeyError: return None offset = A.RowOffsets() rows = A.NumRowBlocks() cols = A.NumColBlocks() if self.gui.write_mat: for i in range(cols): for j in range(rows): m = get_block(A, i, j) if m is None: continue m.Print('matrix_' + str(i) + '_' + str(j)) for i, bb in enumerate(b): for j in range(rows): v = bb.GetBlock(j) v.Print('rhs_' + str(i) + '_' + str(j)) M = mfem.BlockDiagonalPreconditioner(offset) prcs = dict(self.gui.preconditioners) name = self.Aname assert not self.gui.parent.is_complex(), "can not solve complex" if self.gui.parent.is_converted_from_complex(): name = sum([[n, n] for n in name], []) ''' this if block does a generalized version of this... M1 = mfem.GSSmoother(get_block(A, 0, 0)) M1.iterative_mode = False M.SetDiagonalBlock(0, M1) ''' for k, n in enumerate(name): prc = prcs[n][0] if prc == "None": continue name = "".join([tmp for tmp in prc if not tmp.isdigit()]) A0 = get_block(A, k, k) cls = SparseSmootherCls[name][0] arg = SparseSmootherCls[name][1] if name == 'MUMPS': invA0 = cls(A0, gui=self.gui[prc], engine=self.engine) else: invA0 = cls(A0, arg) invA0.iterative_mode = False M.SetDiagonalBlock(k, invA0) ''' We should support Shur complement type preconditioner if offset.Size() > 2: B = get_block(A, 1, 0) MinvBt = get_block(A, 0, 1) Md = mfem.Vector(get_block(A, 0, 0).Height()) get_block(A, 0, 0).GetDiag(Md) for i in range(Md.Size()): if Md[i] != 0.: MinvBt.ScaleRow(i, 1/Md[i]) else: assert False, "diagnal element of matrix is zero" S = mfem.Mult(B, MinvBt) S.iterative_mode = False SS = mfem.DSmoother(S) SS.iterative_mode = False M.SetDiagonalBlock(1, SS) ''' ''' int GMRES(const Operator &A, Vector &x, const Vector &b, Solver &M, int &max_iter, int m, double &tol, double atol, int printit) ''' maxiter = int(self.maxiter) atol = self.abstol rtol = self.reltol kdim = int(self.kdim) printit = 1 sol = [] solver = mfem.GMRESSolver() solver.SetKDim(kdim) #solver = mfem.MINRESSolver() solver.SetAbsTol(atol) solver.SetRelTol(rtol) solver.SetMaxIter(maxiter) solver.SetOperator(A) solver.SetPreconditioner(M) solver.SetPrintLevel(1) for bb in b: if x is None: xx = mfem.Vector(bb.Size()) xx.Assign(0.0) else: xx = x #for j in range(cols): # print x.GetBlock(j).Size() # print x.GetBlock(j).GetDataArray() #assert False, "must implement this" solver.Mult(bb, xx) sol.append(xx.GetDataArray().copy()) sol = np.transpose(np.vstack(sol)) return sol
def solve_parallel(self, A, b, x=None): from mpi4py import MPI myid = MPI.COMM_WORLD.rank nproc = MPI.COMM_WORLD.size from petram.helper.mpi_recipes import gather_vector def get_block(Op, i, j): try: return Op._linked_op[(i, j)] except KeyError: return None offset = A.RowOffsets() rows = A.NumRowBlocks() cols = A.NumColBlocks() if self.gui.write_mat: for i in range(cols): for j in range(rows): m = get_block(A, i, j) if m is None: continue m.Print('matrix_' + str(i) + '_' + str(j)) for i, bb in enumerate(b): for j in range(rows): v = bb.GetBlock(j) v.Print('rhs_' + str(i) + '_' + str(j) + '.' + smyid) if x is not None: for j in range(rows): xx = x.GetBlock(j) xx.Print('x_' + str(i) + '_' + str(j) + '.' + smyid) M = mfem.BlockDiagonalPreconditioner(offset) prcs = dict(self.gui.preconditioners) name = self.Aname assert not self.gui.parent.is_complex(), "can not solve complex" if self.gui.parent.is_converted_from_complex(): name = sum([[n, n] for n in name], []) for k, n in enumerate(name): prc = prcs[n][1] if prc == "None": continue name = "".join([tmp for tmp in prc if not tmp.isdigit()]) A0 = get_block(A, k, k) if A0 is None and not name.startswith('schur'): continue if hasattr(mfem.HypreSmoother, prc): invA0 = mfem.HypreSmoother(A0) invA0.SetType(getattr(mfem.HypreSmoother, prc)) elif prc == 'ams': depvar = self.engine.r_dep_vars[k] dprint1("setting up AMS for ", depvar) prec_fespace = self.engine.fespaces[depvar] invA0 = mfem.HypreAMS(A0, prec_fespace) invA0.SetSingularProblem() elif name == 'MUMPS': cls = SparseSmootherCls[name][0] invA0 = cls(A0, gui=self.gui[prc], engine=self.engine) elif name.startswith('schur'): args = name.split("(")[-1].split(")")[0].split(",") dprint1("setting up schur for ", args) if len(args) > 1: assert False, "not yet supported" for arg in args: r1 = self.engine.dep_var_offset(arg.strip()) c1 = self.engine.r_dep_var_offset(arg.strip()) B = get_block(A, k, c1) Bt = get_block(A, r1, k).Transpose() Bt = Bt.Transpose() B0 = get_block(A, r1, c1) Md = mfem.HypreParVector(MPI.COMM_WORLD, B0.GetGlobalNumRows(), B0.GetColStarts()) B0.GetDiag(Md) Bt.InvScaleRows(Md) S = mfem.ParMult(B, Bt) invA0 = mfem.HypreBoomerAMG(S) invA0.iterative_mode = False else: cls = SparseSmootherCls[name][0] invA0 = cls(A0, gui=self.gui[prc]) invA0.iterative_mode = False M.SetDiagonalBlock(k, invA0) ''' We should support Shur complement type preconditioner if offset.Size() > 2: B = get_block(A, 1, 0) MinvBt = get_block(A, 0, 1) #Md = mfem.HypreParVector(MPI.COMM_WORLD, # A0.GetGlobalNumRows(), # A0.GetRowStarts()) Md = mfem.Vector() A0.GetDiag(Md) MinvBt.InvScaleRows(Md) S = mfem.ParMult(B, MinvBt) invS = mfem.HypreBoomerAMG(S) invS.iterative_mode = False M.SetDiagonalBlock(1, invS) ''' maxiter = int(self.maxiter) atol = self.abstol rtol = self.reltol kdim = int(self.kdim) printit = 1 sol = [] solver = mfem.GMRESSolver(MPI.COMM_WORLD) solver.SetKDim(kdim) #solver = mfem.MINRESSolver(MPI.COMM_WORLD) #solver.SetOperator(A) #solver = mfem.CGSolver(MPI.COMM_WORLD) solver.SetOperator(A) solver.SetAbsTol(atol) solver.SetRelTol(rtol) solver.SetMaxIter(maxiter) solver.SetPreconditioner(M) solver.SetPrintLevel(1) # solve the problem and gather solution to head node... # may not be the best approach for bb in b: rows = MPI.COMM_WORLD.allgather(np.int32(bb.Size())) rowstarts = np.hstack((0, np.cumsum(rows))) dprint1("rowstarts/offser", rowstarts, offset.ToList()) if x is None: xx = mfem.BlockVector(offset) xx.Assign(0.0) else: xx = x #for j in range(cols): # dprint1(x.GetBlock(j).Size()) # dprint1(x.GetBlock(j).GetDataArray()) #assert False, "must implement this" solver.Mult(bb, xx) s = [] for i in range(offset.Size() - 1): v = xx.GetBlock(i).GetDataArray() vv = gather_vector(v) if myid == 0: s.append(vv) else: pass if myid == 0: sol.append(np.hstack(s)) if myid == 0: sol = np.transpose(np.vstack(sol)) return sol else: return None