コード例 #1
0
def ams(singular=False, **kwargs):
    prc = kwargs.pop('prc')
    blockname = kwargs.pop('blockname')
    print_level = kwargs.pop('print_level', -1)

    row = prc.get_row_by_name(blockname)
    col = prc.get_col_by_name(blockname)
    mat = prc.get_operator_block(row, col)
    fes = prc.get_test_fespace(blockname)
    inv_ams = mfem.HypreAMS(mat, fes)
    if singular:
        inv_ams.SetSingularProblem()
    inv_ams.SetPrintLevel(print_level)
    inv_ams.iterative_mode = False
    return inv_ams
コード例 #2
0
# 11. Set up a block-diagonal preconditioner for the 2x2 normal equation
#
#        [ S0^{-1}     0     ]
#        [   0     Shat^{-1} ]      Shat = (Bhat^T Sinv Bhat)
#
#     corresponding to the primal (x0) and interfacial (xhat) unknowns.
#     Since the Shat operator is equivalent to an H(div) matrix reduced to
#     the interfacial skeleton, we approximate its inverse with one V-cycle
#     of the ADS preconditioner from the hypre library (in 2D we use AMS for
#     the rotated H(curl) problem).
S0inv = mfem.HypreBoomerAMG(matS0)
S0inv.SetPrintLevel(0)

Shat = mfem.RAP(matSinv, matBhat)
if (dim == 2): Shatinv = mfem.HypreAMS(Shat, xhat_space)
else: Shatinv = mfem.HypreADS(Shat, xhat_space)

P = mfem.BlockDiagonalPreconditioner(true_offsets)
P.SetDiagonalBlock(0, S0inv)
P.SetDiagonalBlock(1, Shatinv)

# 12. Solve the normal equation system using the PCG iterative solver.
#     Check the weighted norm of residual for the DPG least square problem.
#     Wrap the primal variable in a GridFunction for visualization purposes.
pcg = mfem.CGSolver(MPI.COMM_WORLD)
pcg.SetOperator(A)
pcg.SetPreconditioner(P)
pcg.SetRelTol(1e-6)
pcg.SetMaxIter(200)
pcg.SetPrintLevel(1)
コード例 #3
0
ファイル: ex13p.py プロジェクト: matthiastaus/PyMFEM
a.Assemble()
a.EliminateEssentialBCDiag(ess_bdr, 1.0)
a.Finalize()

m = mfem.ParBilinearForm(fespace)
m.AddDomainIntegrator(mfem.VectorFEMassIntegrator(one))
m.Assemble()
# shift the eigenvalue corresponding to eliminated dofs to a large value
m.EliminateEssentialBCDiag(ess_bdr, 2.3e-308)
m.Finalize()

A = a.ParallelAssemble()
M = m.ParallelAssemble()

ams = mfem.HypreAMS(A, fespace)
ams.SetPrintLevel(0)
ams.SetSingularProblem()

ame = mfem.HypreAME(MPI.COMM_WORLD)
ame.SetNumModes(nev)
ame.SetPreconditioner(ams)
ame.SetMaxIter(100)
ame.SetTol(1e-8)
ame.SetPrintLevel(1)
ame.SetMassMatrix(M)
ame.SetOperator(A)

eigenvalues = doubleArray()
ame.Solve()
ame.GetEigenvalues(eigenvalues)
コード例 #4
0
    def solve_parallel(self, A, b, x=None):
        from mpi4py import MPI
        myid = MPI.COMM_WORLD.rank
        nproc = MPI.COMM_WORLD.size
        from petram.helper.mpi_recipes import gather_vector

        def get_block(Op, i, j):
            try:
                return Op._linked_op[(i, j)]
            except KeyError:
                return None

        offset = A.RowOffsets()
        rows = A.NumRowBlocks()
        cols = A.NumColBlocks()

        if self.gui.write_mat:
            for i in range(cols):
                for j in range(rows):
                    m = get_block(A, i, j)
                    if m is None: continue
                    m.Print('matrix_' + str(i) + '_' + str(j))
            for i, bb in enumerate(b):
                for j in range(rows):
                    v = bb.GetBlock(j)
                    v.Print('rhs_' + str(i) + '_' + str(j) + '.' + smyid)
            if x is not None:
                for j in range(rows):
                    xx = x.GetBlock(j)
                    xx.Print('x_' + str(i) + '_' + str(j) + '.' + smyid)

        M = mfem.BlockDiagonalPreconditioner(offset)

        prcs = dict(self.gui.preconditioners)
        name = self.Aname
        assert not self.gui.parent.is_complex(), "can not solve complex"
        if self.gui.parent.is_converted_from_complex():
            name = sum([[n, n] for n in name], [])

        for k, n in enumerate(name):
            prc = prcs[n][1]
            if prc == "None": continue
            name = "".join([tmp for tmp in prc if not tmp.isdigit()])

            A0 = get_block(A, k, k)
            if A0 is None and not name.startswith('schur'): continue

            if hasattr(mfem.HypreSmoother, prc):
                invA0 = mfem.HypreSmoother(A0)
                invA0.SetType(getattr(mfem.HypreSmoother, prc))
            elif prc == 'ams':
                depvar = self.engine.r_dep_vars[k]
                dprint1("setting up AMS for ", depvar)
                prec_fespace = self.engine.fespaces[depvar]
                invA0 = mfem.HypreAMS(A0, prec_fespace)
                invA0.SetSingularProblem()
            elif name == 'MUMPS':
                cls = SparseSmootherCls[name][0]
                invA0 = cls(A0, gui=self.gui[prc], engine=self.engine)
            elif name.startswith('schur'):
                args = name.split("(")[-1].split(")")[0].split(",")
                dprint1("setting up schur for ", args)
                if len(args) > 1:
                    assert False, "not yet supported"
                for arg in args:
                    r1 = self.engine.dep_var_offset(arg.strip())
                    c1 = self.engine.r_dep_var_offset(arg.strip())
                    B = get_block(A, k, c1)
                    Bt = get_block(A, r1, k).Transpose()
                    Bt = Bt.Transpose()
                    B0 = get_block(A, r1, c1)
                    Md = mfem.HypreParVector(MPI.COMM_WORLD,
                                             B0.GetGlobalNumRows(),
                                             B0.GetColStarts())
                    B0.GetDiag(Md)
                    Bt.InvScaleRows(Md)
                    S = mfem.ParMult(B, Bt)
                    invA0 = mfem.HypreBoomerAMG(S)
                    invA0.iterative_mode = False
            else:
                cls = SparseSmootherCls[name][0]
                invA0 = cls(A0, gui=self.gui[prc])

            invA0.iterative_mode = False
            M.SetDiagonalBlock(k, invA0)
        '''
        We should support Shur complement type preconditioner
        if offset.Size() > 2:
            B =  get_block(A, 1, 0)
            MinvBt = get_block(A, 0, 1)
            #Md = mfem.HypreParVector(MPI.COMM_WORLD,
            #                        A0.GetGlobalNumRows(),
            #                        A0.GetRowStarts())
            Md = mfem.Vector()
            A0.GetDiag(Md)
            MinvBt.InvScaleRows(Md)
            S = mfem.ParMult(B, MinvBt)
            invS = mfem.HypreBoomerAMG(S)
            invS.iterative_mode = False
            M.SetDiagonalBlock(1, invS)
        '''
        maxiter = int(self.maxiter)
        atol = self.abstol
        rtol = self.reltol
        kdim = int(self.kdim)
        printit = 1

        sol = []

        solver = mfem.GMRESSolver(MPI.COMM_WORLD)
        solver.SetKDim(kdim)

        #solver = mfem.MINRESSolver(MPI.COMM_WORLD)
        #solver.SetOperator(A)

        #solver = mfem.CGSolver(MPI.COMM_WORLD)
        solver.SetOperator(A)
        solver.SetAbsTol(atol)
        solver.SetRelTol(rtol)
        solver.SetMaxIter(maxiter)
        solver.SetPreconditioner(M)
        solver.SetPrintLevel(1)

        # solve the problem and gather solution to head node...
        # may not be the best approach

        for bb in b:
            rows = MPI.COMM_WORLD.allgather(np.int32(bb.Size()))
            rowstarts = np.hstack((0, np.cumsum(rows)))
            dprint1("rowstarts/offser", rowstarts, offset.ToList())
            if x is None:
                xx = mfem.BlockVector(offset)
                xx.Assign(0.0)
            else:
                xx = x
                #for j in range(cols):
                #   dprint1(x.GetBlock(j).Size())
                #   dprint1(x.GetBlock(j).GetDataArray())
                #assert False, "must implement this"
            solver.Mult(bb, xx)
            s = []
            for i in range(offset.Size() - 1):
                v = xx.GetBlock(i).GetDataArray()
                vv = gather_vector(v)
                if myid == 0:
                    s.append(vv)
                else:
                    pass
            if myid == 0:
                sol.append(np.hstack(s))
        if myid == 0:
            sol = np.transpose(np.vstack(sol))
            return sol
        else:
            return None
コード例 #5
0
# 12. Define and apply a parallel PCG solver for A X = B with the 2D AMS or
#     the 3D ADS preconditioners from hypre. If using hybridization, the
#     system is preconditioned with hypre's BoomerAMG.

pcg = mfem.CGSolver(A.GetComm())
pcg.SetOperator(A)
pcg.SetRelTol(1e-12)
pcg.SetMaxIter(500)
pcg.SetPrintLevel(1)
if hybridization: prec = mfem.HypreBoomerAMG(A)
else:
    if a.StaticCondensationIsEnabled():
        prec_fespace = a.SCParFESpace()
    else:
        prec_fespace = fespace
    if dim == 2: prec = mfem.HypreAMS(A, prec_fespace)
    else: prec = mfem.HypreADS(A, prec_fespace)

pcg.SetPreconditioner(prec)
pcg.Mult(B, X)

# 13. Recover the parallel grid function corresponding to X. This is the
#     local finite element solution on each processor.
a.RecoverFEMSolution(X, b, x)

# 14. Compute and print the L^2 norm of the error.
err = x.ComputeL2Error(F)
if myid == 0:
    print("|| F_h - F ||_{L^2} = " + str(err))

# 15. Save the refined mesh and the solution in parallel. This output can
コード例 #6
0
if (static_cond): a.EnableStaticCondensation()
a.Assemble()

A = mfem.HypreParMatrix()
B = mfem.Vector()
X = mfem.Vector()
a.FormLinearSystem(ess_tdof_list, x, b, A, X, B)

if verbose:
    print("Size of linear system: " + str(A.GetGlobalNumRows()))

# 12. Define and apply a parallel PCG solver for AX=B with the AMS
#     preconditioner from hypre.
prec_fespace = (a.SCParFESpace()
                if a.StaticCondensationIsEnabled() else fespace)
ams = mfem.HypreAMS(A, prec_fespace)
pcg = mfem.HyprePCG(A)
pcg.SetTol(1e-12)
pcg.SetMaxIter(500)
pcg.SetPrintLevel(2)
pcg.SetPreconditioner(ams)
pcg.Mult(B, X)

# 13. Recover the parallel grid function corresponding to X. This is the
#     local finite element solution on each processor.

a.RecoverFEMSolution(X, b, x)

# 12. Compute and print the L^2 norm of the error.
err = x.ComputeL2Error(E)
if verbose:  # note that err should be evaulated on all nodes