def gather_blkvec_merged(self, size_hint=None, symmetric=False): ''' Construct MFEM::BlockVector This routine ordered unkonws in the following order (Re_FES1_node1, Im_FES1_node1, Re_FES1_node2, Im_FES1_node2, ...) ''' assert self.complex, "this format is complex only" roffsets = self.get_local_partitioning_v(size_hint=size_hint) roffsets = np.sum(np.diff(roffsets).reshape(-1, 2), 1) roffsets = np.hstack([0, np.cumsum(roffsets)]) dprint1("roffsets(vector)", roffsets) offset = mfem.intArray(list(roffsets)) vec = mfem.BlockVector(offset) vec._offsets = offset # in order to keep it from freed data = [] ii = 0 jj = 0 # Here I don't like that I am copying the data between two vectors.. # But, avoiding this takes the large rearangement of program flow... for i in range(self.shape[0]): if self[i, 0] is not None: if isinstance(self[i, 0], chypre.CHypreVec): rr = self[i, 0][0].GetDataArray() if self[i, 0][1] is not None: if symmetric: ii = -self[i, 0][1].GetDataArray() else: ii = self[i, 0][1].GetDataArray() else: ii = rr * 0 vv = np.hstack([rr, ii]) vec.GetBlock(i).Assign(vv) elif isinstance(self[i, 0], ScipyCoo): arr = np.atleast_1d(self[i, 0].toarray().squeeze()) if symmetric: arr = np.hstack([np.real(arr), -np.imag(arr)]) else: arr = np.hstack([np.real(arr), np.imag(arr)]) vec.GetBlock(i).Assign(arr) else: assert False, "not implemented, " + str(type(self[i, 0])) else: vec.GetBlock(i).Assign(0.0) return vec
def gather_blkvec_interleave(self, size_hint=None): ''' Construct MFEM::BlockVector This routine ordered unkonws in the following order Re FFE1, Im FES1, ReFES2, Im FES2, ... If self.complex is False, it assembles a nomal block vector This routine is used together with get_global_blkmat_interleave(self): ''' roffsets = self.get_local_partitioning_v(convert_real=True, interleave=True, size_hint=size_hint) dprint1("roffsets(vector)", roffsets) offset = mfem.intArray(list(roffsets)) vec = mfem.BlockVector(offset) vec._offsets = offset # in order to keep it from freed data = [] ii = 0 jj = 0 # Here I don't like that I am copying the data between two vectors.. # But, avoiding this takes the large rearangement of program flow... for i in range(self.shape[0]): if self[i, 0] is not None: if isinstance(self[i, 0], chypre.CHypreVec): vec.GetBlock(ii).Assign(self[i, 0][0].GetDataArray()) if self.complex: if self[i, 0][1] is not None: vec.GetBlock(ii + 1).Assign( self[i, 0][1].GetDataArray()) else: vec.GetBlock(ii + 1).Assign(0.0) elif isinstance(self[i, 0], ScipyCoo): arr = np.atleast_1d(self[i, 0].toarray().squeeze()) vec.GetBlock(ii).Assign(np.real(arr)) if self.complex: vec.GetBlock(ii + 1).Assign(np.imag(arr)) else: assert False, "not implemented, " + str(type(self[i, 0])) else: vec.GetBlock(ii).Assign(0.0) if self.complex: vec.GetBlock(ii + 1).Assign(0.0) ii = ii + 2 if self.complex else ii + 1 return vec
def make_sub_vec(self, src_vec, idx, inv=False, nocopy=False): if inv: idx = [k for k in range(self.nb) if not k in idx] size = [src_vec.BlockSize(i) for i in idx] offset = np.hstack([0, np.cumsum(size, dtype=int)]) offset = mfem.intArray(list(offset)) sub_vec = mfem.BlockVector(offset) sub_vec._offsets = offset # in order to keep it from freed for k, i in enumerate(idx): if nocopy: sub_vec.GetBlock(k).Assign(0.0) else: sub_vec.GetBlock(k).Assign(src_vec.GetBlock(i).GetDataArray()) return sub_vec
def solve_parallel(self, A, b, x=None): if self.gui.write_mat: self. write_mat(A, b, x, "."+smyid) M = self.make_preconditioner(A, parallel=True) solver = self.make_solver(A, M, use_mpi=True) sol = [] # solve the problem and gather solution to head node... # may not be the best approach from petram.helper.mpi_recipes import gather_vector offset = A.RowOffsets() for bb in b: rows = MPI.COMM_WORLD.allgather(np.int32(bb.Size())) rowstarts = np.hstack((0, np.cumsum(rows))) dprint1("rowstarts/offser",rowstarts, offset.ToList()) if x is None: xx = mfem.BlockVector(offset) xx.Assign(0.0) else: xx = x #for j in range(cols): # dprint1(x.GetBlock(j).Size()) # dprint1(x.GetBlock(j).GetDataArray()) #assert False, "must implement this" self.call_mult(solver, bb, xx) s = [] for i in range(offset.Size()-1): v = xx.GetBlock(i).GetDataArray() vv = gather_vector(v) if myid == 0: s.append(vv) else: pass if myid == 0: sol.append(np.hstack(s)) if myid == 0: sol = np.transpose(np.vstack(sol)) return sol else: return None
# 9. Define the block structure of the problem, by creating the offset # variables. Also allocate two BlockVector objects to store the solution # and rhs. x0_var = 0 xhat_var = 1 NVAR = 2 # enum in C true_s0 = x0_space.TrueVSize() true_s1 = xhat_space.TrueVSize() true_s_test = test_space.TrueVSize() true_offsets = mfem.intArray([0, true_s0, true_s0 + true_s1]) true_offsets_test = mfem.intArray([0, true_s_test]) x = mfem.BlockVector(true_offsets) b = mfem.BlockVector(true_offsets) x.Assign(0.0) b.Assign(0.0) # 10. Set up the 1x2 block Least Squares DPG operator, B = [B0 Bhat], # the normal equation operator, A = B^t Sinv B, and # the normal equation right-hand-size, b = B^t Sinv F. B = mfem.BlockOperator(true_offsets_test, true_offsets) B.SetBlock(0, 0, matB0) B.SetBlock(0, 1, matBhat) A = mfem.RAPOperator(B, matSinv, B) trueF = F.ParallelAssemble()
print("***********************************************************") block_offsets = intArray([0, R_space.GetVSize(), W_space.GetVSize()]) block_offsets.PartialSum() block_trueOffsets = intArray([0, R_space.TrueVSize(), W_space.TrueVSize()]) block_trueOffsets.PartialSum() k = mfem.ConstantCoefficient(1.0) fcoeff = fFunc(dim) fnatcoeff = f_natural() gcoeff = gFunc() ucoeff = uFunc_ex(dim) pcoeff = pFunc_ex() x = mfem.BlockVector(block_offsets) rhs = mfem.BlockVector(block_offsets) trueX = mfem.BlockVector(block_trueOffsets) trueRhs = mfem.BlockVector(block_trueOffsets) fform = mfem.ParLinearForm() fform.Update(R_space, rhs.GetBlock(0), 0) fform.AddDomainIntegrator(mfem.VectorFEDomainLFIntegrator(fcoeff)) fform.AddBoundaryIntegrator(mfem.VectorFEBoundaryFluxLFIntegrator(fnatcoeff)) fform.Assemble() fform.ParallelAssemble(trueRhs.GetBlock(0)) gform = mfem.ParLinearForm() gform.Update(W_space, rhs.GetBlock(1), 0)
def ex19_main(args): ser_ref_levels = args.refine_serial par_ref_levels = args.refine_parallel order = args.order visualization = args.visualization mu = args.shear_modulus newton_rel_tol = args.relative_tolerance newton_abs_tol = args.absolute_tolerance newton_iter = args.newton_iterations if myid == 0: parser.print_options(args) meshfile = expanduser(join(path, 'data', args.mesh)) mesh = mfem.Mesh(meshfile, 1, 1) dim = mesh.Dimension() for lev in range(ser_ref_levels): mesh.UniformRefinement() pmesh = mfem.ParMesh(MPI.COMM_WORLD, mesh) del mesh for lev in range(par_ref_levels): pmesh.UniformRefinement() # 4. Define the shear modulus for the incompressible Neo-Hookean material c_mu = mfem.ConstantCoefficient(mu) # 5. Define the finite element spaces for displacement and pressure # (Taylor-Hood elements). By default, the displacement (u/x) is a second # order vector field, while the pressure (p) is a linear scalar function. quad_coll = mfem.H1_FECollection(order, dim) lin_coll = mfem.H1_FECollection(order - 1, dim) R_space = mfem.ParFiniteElementSpace(pmesh, quad_coll, dim, mfem.Ordering.byVDIM) W_space = mfem.ParFiniteElementSpace(pmesh, lin_coll) spaces = [R_space, W_space] glob_R_size = R_space.GlobalTrueVSize() glob_W_size = W_space.GlobalTrueVSize() # 6. Define the Dirichlet conditions (set to boundary attribute 1 and 2) ess_bdr_u = mfem.intArray(R_space.GetMesh().bdr_attributes.Max()) ess_bdr_p = mfem.intArray(W_space.GetMesh().bdr_attributes.Max()) ess_bdr_u.Assign(0) ess_bdr_u[0] = 1 ess_bdr_u[1] = 1 ess_bdr_p.Assign(0) ess_bdr = [ess_bdr_u, ess_bdr_p] if myid == 0: print("***********************************************************") print("dim(u) = " + str(glob_R_size)) print("dim(p) = " + str(glob_W_size)) print("dim(u+p) = " + str(glob_R_size + glob_W_size)) print("***********************************************************") block_offsets = intArray([0, R_space.TrueVSize(), W_space.TrueVSize()]) block_offsets.PartialSum() xp = mfem.BlockVector(block_offsets) # 9. Define grid functions for the current configuration, reference # configuration, final deformation, and pressure x_gf = mfem.ParGridFunction(R_space) x_ref = mfem.ParGridFunction(R_space) x_def = mfem.ParGridFunction(R_space) p_gf = mfem.ParGridFunction(W_space) #x_gf.MakeRef(R_space, xp.GetBlock(0), 0) #p_gf.MakeRef(W_space, xp.GetBlock(1), 0) deform = InitialDeformation(dim) refconfig = ReferenceConfiguration(dim) x_gf.ProjectCoefficient(deform) x_ref.ProjectCoefficient(refconfig) p_gf.Assign(0.0) # 12. Set up the block solution vectors x_gf.GetTrueDofs(xp.GetBlock(0)) p_gf.GetTrueDofs(xp.GetBlock(1)) # 13. Initialize the incompressible neo-Hookean operator oper = RubberOperator(spaces, ess_bdr, block_offsets, newton_rel_tol, newton_abs_tol, newton_iter, mu) # 14. Solve the Newton system oper.Solve(xp) # 15. Distribute the shared degrees of freedom x_gf.Distribute(xp.GetBlock(0)) p_gf.Distribute(xp.GetBlock(1)) # 16. Compute the final deformation mfem.subtract_vector(x_gf, x_ref, x_def) # 17. Visualize the results if requested if (visualization): vis_u = mfem.socketstream("localhost", 19916) visualize(vis_u, pmesh, x_gf, x_def, "Deformation", True) MPI.COMM_WORLD.Barrier() vis_p = mfem.socketstream("localhost", 19916) visualize(vis_p, pmesh, x_gf, p_gf, "Deformation", True) # 14. Save the displaced mesh, the final deformation, and the pressure nodes = x_gf owns_nodes = 0 nodes, owns_nodes = pmesh.SwapNodes(nodes, owns_nodes) smyid = '.' + '{:0>6d}'.format(myid) pmesh.PrintToFile('deformed.mesh' + smyid, 8) p_gf.SaveToFile('pressure.sol' + smyid, 8) x_def.SaveToFile("deformation.sol" + smyid, 8)
def solve_parallel(self, A, b, x=None): from mpi4py import MPI myid = MPI.COMM_WORLD.rank nproc = MPI.COMM_WORLD.size from petram.helper.mpi_recipes import gather_vector def get_block(Op, i, j): try: return Op._linked_op[(i, j)] except KeyError: return None offset = A.RowOffsets() rows = A.NumRowBlocks() cols = A.NumColBlocks() if self.gui.write_mat: for i in range(cols): for j in range(rows): m = get_block(A, i, j) if m is None: continue m.Print('matrix_' + str(i) + '_' + str(j)) for i, bb in enumerate(b): for j in range(rows): v = bb.GetBlock(j) v.Print('rhs_' + str(i) + '_' + str(j) + '.' + smyid) if x is not None: for j in range(rows): xx = x.GetBlock(j) xx.Print('x_' + str(i) + '_' + str(j) + '.' + smyid) M = mfem.BlockDiagonalPreconditioner(offset) prcs = dict(self.gui.preconditioners) name = self.Aname assert not self.gui.parent.is_complex(), "can not solve complex" if self.gui.parent.is_converted_from_complex(): name = sum([[n, n] for n in name], []) for k, n in enumerate(name): prc = prcs[n][1] if prc == "None": continue name = "".join([tmp for tmp in prc if not tmp.isdigit()]) A0 = get_block(A, k, k) if A0 is None and not name.startswith('schur'): continue if hasattr(mfem.HypreSmoother, prc): invA0 = mfem.HypreSmoother(A0) invA0.SetType(getattr(mfem.HypreSmoother, prc)) elif prc == 'ams': depvar = self.engine.r_dep_vars[k] dprint1("setting up AMS for ", depvar) prec_fespace = self.engine.fespaces[depvar] invA0 = mfem.HypreAMS(A0, prec_fespace) invA0.SetSingularProblem() elif name == 'MUMPS': cls = SparseSmootherCls[name][0] invA0 = cls(A0, gui=self.gui[prc], engine=self.engine) elif name.startswith('schur'): args = name.split("(")[-1].split(")")[0].split(",") dprint1("setting up schur for ", args) if len(args) > 1: assert False, "not yet supported" for arg in args: r1 = self.engine.dep_var_offset(arg.strip()) c1 = self.engine.r_dep_var_offset(arg.strip()) B = get_block(A, k, c1) Bt = get_block(A, r1, k).Transpose() Bt = Bt.Transpose() B0 = get_block(A, r1, c1) Md = mfem.HypreParVector(MPI.COMM_WORLD, B0.GetGlobalNumRows(), B0.GetColStarts()) B0.GetDiag(Md) Bt.InvScaleRows(Md) S = mfem.ParMult(B, Bt) invA0 = mfem.HypreBoomerAMG(S) invA0.iterative_mode = False else: cls = SparseSmootherCls[name][0] invA0 = cls(A0, gui=self.gui[prc]) invA0.iterative_mode = False M.SetDiagonalBlock(k, invA0) ''' We should support Shur complement type preconditioner if offset.Size() > 2: B = get_block(A, 1, 0) MinvBt = get_block(A, 0, 1) #Md = mfem.HypreParVector(MPI.COMM_WORLD, # A0.GetGlobalNumRows(), # A0.GetRowStarts()) Md = mfem.Vector() A0.GetDiag(Md) MinvBt.InvScaleRows(Md) S = mfem.ParMult(B, MinvBt) invS = mfem.HypreBoomerAMG(S) invS.iterative_mode = False M.SetDiagonalBlock(1, invS) ''' maxiter = int(self.maxiter) atol = self.abstol rtol = self.reltol kdim = int(self.kdim) printit = 1 sol = [] solver = mfem.GMRESSolver(MPI.COMM_WORLD) solver.SetKDim(kdim) #solver = mfem.MINRESSolver(MPI.COMM_WORLD) #solver.SetOperator(A) #solver = mfem.CGSolver(MPI.COMM_WORLD) solver.SetOperator(A) solver.SetAbsTol(atol) solver.SetRelTol(rtol) solver.SetMaxIter(maxiter) solver.SetPreconditioner(M) solver.SetPrintLevel(1) # solve the problem and gather solution to head node... # may not be the best approach for bb in b: rows = MPI.COMM_WORLD.allgather(np.int32(bb.Size())) rowstarts = np.hstack((0, np.cumsum(rows))) dprint1("rowstarts/offser", rowstarts, offset.ToList()) if x is None: xx = mfem.BlockVector(offset) xx.Assign(0.0) else: xx = x #for j in range(cols): # dprint1(x.GetBlock(j).Size()) # dprint1(x.GetBlock(j).GetDataArray()) #assert False, "must implement this" solver.Mult(bb, xx) s = [] for i in range(offset.Size() - 1): v = xx.GetBlock(i).GetDataArray() vv = gather_vector(v) if myid == 0: s.append(vv) else: pass if myid == 0: sol.append(np.hstack(s)) if myid == 0: sol = np.transpose(np.vstack(sol)) return sol else: return None
def Mult(self, b, x=None, case_base=0): try: from mpi4py import MPI except: from petram.helper.dummy_mpi import MPI myid = MPI.COMM_WORLD.rank nproc = MPI.COMM_WORLD.size sol = [] row_offsets=self.row_offsets.ToList() for kk, bb in enumerate(b): rows = MPI.COMM_WORLD.allgather(np.int32(bb.Size())) rowstarts = np.hstack((0, np.cumsum(rows))) #nicePrint("rowstarts/offser",rowstarts, row_offsets) if x is None: xx = mfem.BlockVector(self.row_offsets) xx.Assign(0.0) else: xx = x if self.is_complex: tmp1 = [] tmp2 = [] for i in range(len(row_offsets)-1): bbv = bb.GetBlock(i).GetDataArray() xxv = xx.GetBlock(i).GetDataArray() ll = bbv.size bbv = bbv[:ll//2] + 1j*bbv[ll//2:] xxv = xxv[:ll//2] + 1j*xxv[ll//2:] tmp1.append(bbv) tmp2.append(xxv) bbv = np.hstack(tmp1) xxv = np.hstack(tmp2) else: bbv = bb.GetDataArray() xxv = xx.GetDataArray() if self.gui.write_mat: write_vector('rhs_'+str(kk), bbv) write_vector('x_'+str(kk), xxv) sys.stdout.flush();sys.stderr.flush() if self.gui.mc64job != 0: ret = self.spss.set_matching(self.gui.mc64job) if ret != STRUMPACK_SUCCESS: assert False, "error during mc64 (Strumpack)" self.spss.set_reordering_method(ST.STRUMPACK_METIS) ret = self.spss.reorder() if ret != STRUMPACK_SUCCESS: assert False, "error during recordering (Strumpack)" ret = self.spss.factor() if ret != STRUMPACK_SUCCESS: assert False, "error during factor (Strumpack)" ret = self.spss.solve(bbv, xxv, 0) if ret != STRUMPACK_SUCCESS: assert False, "error during solve phase (Strumpack)" s = [] for i in range(len(row_offsets)-1): r1 = row_offsets[i] r2 = row_offsets[i+1] if self.is_complex: r1 = r1//2 r2 = r2//2 xxvv = xxv[r1:r2] if use_parallel: vv = gather_vector(xxvv) else: vv = xxvv.copy() if myid == 0: s.append(vv) else: pass if myid == 0: sol.append(np.hstack(s)) if myid == 0: sol = np.transpose(np.vstack(sol)) return sol else: return None
dfes = mfem.ParFiniteElementSpace(pmesh, fec, dim, mfem.Ordering.byNODES) # Finite element space for all variables together (total thermodynamic state) vfes = mfem.ParFiniteElementSpace(pmesh, fec, num_equation, mfem.Ordering.byNODES) assert fes.GetOrdering() == mfem.Ordering.byNODES, "Ordering must be byNODES" glob_size = vfes.GlobalTrueVSize(); if myid==0: print("Number of unknowns: " + str(glob_size)) # 8. Define the initial conditions, save the corresponding mesh and grid # functions to a file. This can be opened with GLVis with the -gc option. # The solution u has components {density, x-momentum, y-momentum, energy}. # These are stored contiguously in the BlockVector u_block. offsets = [k*vfes.GetNDofs() for k in range(num_equation+1)] offsets = mfem.intArray(offsets) u_block = mfem.BlockVector(offsets) # Momentum grid function on dfes for visualization. mom = mfem.ParGridFunction(dfes, u_block, offsets[1]) # Initialize the state. u0 = InitialCondition(num_equation) sol = mfem.ParGridFunction(vfes, u_block.GetData()) sol.ProjectCoefficient(u0) smyid = '{:0>6d}'.format(myid) pmesh.PrintToFile("vortex-mesh."+smyid, 8) for k in range(num_equation): uk = mfem.ParGridFunction(fes, u_block.GetBlock(k).GetData()) sol_name = "vortex-" + str(k) + "-init."+smyid uk.SaveToFile(sol_name, 8)
def initialize(self, inMeshObj=None, inMeshFile=None): # 2. Problem initialization self.parser = ArgParser(description='Based on MFEM Ex16p') self.parser.add_argument('-m', '--mesh', default='beam-tet.mesh', action='store', type=str, help='Mesh file to use.') self.parser.add_argument('-rs', '--refine-serial', action='store', default=1, type=int, help="Number of times to refine the mesh \ uniformly in serial") self.parser.add_argument('-rp', '--refine-parallel', action='store', default=0, type=int, help="Number of times to refine the mesh \ uniformly in parallel") self.parser.add_argument('-o', '--order', action='store', default=1, type=int, help="Finite element order (polynomial \ degree)") self.parser.add_argument( '-s', '--ode-solver', action='store', default=3, type=int, help='\n'.join([ "ODE solver: 1 - Backward Euler, 2 - SDIRK2, \ 3 - SDIRK3", "\t\t 11 - Forward Euler, \ 12 - RK2, 13 - RK3 SSP, 14 - RK4." ])) self.parser.add_argument('-t', '--t-final', action='store', default=20., type=float, help="Final time; start time is 0.") self.parser.add_argument("-dt", "--time-step", action='store', default=5e-3, type=float, help="Time step.") self.parser.add_argument("-v", "--viscosity", action='store', default=0.00, type=float, help="Viscosity coefficient.") self.parser.add_argument('-L', '--lmbda', action='store', default=1.e0, type=float, help='Lambda of Hooks law') self.parser.add_argument('-mu', '--shear-modulus', action='store', default=1.e0, type=float, help='Shear modulus for Hooks law') self.parser.add_argument('-rho', '--density', action='store', default=1.0, type=float, help='mass density') self.parser.add_argument('-vis', '--visualization', action='store_true', help='Enable GLVis visualization') self.parser.add_argument('-vs', '--visualization-steps', action='store', default=25, type=int, help="Visualize every n-th timestep.") args = self.parser.parse_args() self.ser_ref_levels = args.refine_serial self.par_ref_levels = args.refine_parallel self.order = args.order self.dt = args.time_step self.visc = args.viscosity self.t_final = args.t_final self.lmbda = args.lmbda self.mu = args.shear_modulus self.rho = args.density self.visualization = args.visualization self.ti = 1 self.vis_steps = args.visualization_steps self.ode_solver_type = args.ode_solver self.t = 0.0 self.last_step = False if self.myId == 0: self.parser.print_options(args) # 3. Reading mesh if inMeshObj is None: self.meshFile = inMeshFile if self.meshFile is None: self.meshFile = args.mesh self.mesh = mfem.Mesh(self.meshFile, 1, 1) else: self.mesh = inMeshObj self.dim = self.mesh.Dimension() print("Mesh dimension: %d" % self.dim) print("Number of vertices in the mesh: %d " % self.mesh.GetNV()) print("Number of elements in the mesh: %d " % self.mesh.GetNE()) # 4. Define the ODE solver used for time integration. # Several implicit singly diagonal implicit # Runge-Kutta (SDIRK) methods, as well as # explicit Runge-Kutta methods are available. if self.ode_solver_type == 1: self.ode_solver = BackwardEulerSolver() elif self.ode_solver_type == 2: self.ode_solver = mfem.SDIRK23Solver(2) elif self.ode_solver_type == 3: self.ode_solver = mfem.SDIRK33Solver() elif self.ode_solver_type == 11: self.ode_solver = ForwardEulerSolver() elif self.ode_solver_type == 12: self.ode_solver = mfem.RK2Solver(0.5) elif self.ode_solver_type == 13: self.ode_solver = mfem.RK3SSPSolver() elif self.ode_solver_type == 14: self.ode_solver = mfem.RK4Solver() elif self.ode_solver_type == 22: self.ode_solver = mfem.ImplicitMidpointSolver() elif self.ode_solver_type == 23: self.ode_solver = mfem.SDIRK23Solver() elif self.ode_solver_type == 24: self.ode_solver = mfem.SDIRK34Solver() else: print("Unknown ODE solver type: " + str(self.ode_solver_type)) exit # 5. Refine the mesh in serial to increase the # resolution. In this example we do # 'ser_ref_levels' of uniform refinement, where # 'ser_ref_levels' is a command-line parameter. for lev in range(self.ser_ref_levels): self.mesh.UniformRefinement() # 6. Define a parallel mesh by a partitioning of # the serial mesh. Refine this mesh further # in parallel to increase the resolution. Once the # parallel mesh is defined, the serial mesh can # be deleted. self.pmesh = mfem.ParMesh(MPI.COMM_WORLD, self.mesh) for lev in range(self.par_ref_levels): self.pmesh.UniformRefinement() # 7. Define the vector finite element space # representing the current and the # initial temperature, u_ref. self.fe_coll = mfem.H1_FECollection(self.order, self.dim) self.fespace = mfem.ParFiniteElementSpace(self.pmesh, self.fe_coll, self.dim) self.fe_size = self.fespace.GlobalTrueVSize() if self.myId == 0: print("FE Number of unknowns: " + str(self.fe_size)) true_size = self.fespace.TrueVSize() self.true_offset = mfem.intArray(3) self.true_offset[0] = 0 self.true_offset[1] = true_size self.true_offset[2] = 2 * true_size self.vx = mfem.BlockVector(self.true_offset) self.v_gf = mfem.ParGridFunction(self.fespace) self.v_gfbnd = mfem.ParGridFunction(self.fespace) self.x_gf = mfem.ParGridFunction(self.fespace) self.x_gfbnd = mfem.ParGridFunction(self.fespace) self.x_ref = mfem.ParGridFunction(self.fespace) self.pmesh.GetNodes(self.x_ref) # 8. Set the initial conditions for u. #self.velo = InitialVelocity(self.dim) self.velo = velBCs(self.dim) #self.deform = InitialDeformation(self.dim) self.deform = defBCs(self.dim) self.v_gf.ProjectCoefficient(self.velo) self.v_gfbnd.ProjectCoefficient(self.velo) self.x_gf.ProjectCoefficient(self.deform) self.x_gfbnd.ProjectCoefficient(self.deform) #self.v_gf.GetTrueDofs(self.vx.GetBlock(0)); #self.x_gf.GetTrueDofs(self.vx.GetBlock(1)); # setup boundary-conditions self.xess_bdr = mfem.intArray( self.fespace.GetMesh().bdr_attributes.Max()) self.xess_bdr.Assign(0) self.xess_bdr[0] = 1 self.xess_bdr[1] = 1 self.xess_tdof_list = intArray() self.fespace.GetEssentialTrueDofs(self.xess_bdr, self.xess_tdof_list) #print('True x essential BCs are') #self.xess_tdof_list.Print() self.vess_bdr = mfem.intArray( self.fespace.GetMesh().bdr_attributes.Max()) self.vess_bdr.Assign(0) self.vess_bdr[0] = 1 self.vess_bdr[1] = 1 self.vess_tdof_list = intArray() self.fespace.GetEssentialTrueDofs(self.vess_bdr, self.vess_tdof_list) #print('True v essential BCs are') #self.vess_tdof_list.Print() # 9. Initialize the stiffness operator self.oper = StiffnessOperator(self.fespace, self.lmbda, self.mu, self.rho, self.visc, self.vess_tdof_list, self.vess_bdr, self.xess_tdof_list, self.xess_bdr, self.v_gfbnd, self.x_gfbnd, self.deform, self.velo, self.vx) # 10. Setting up file output self.smyid = '{:0>2d}'.format(self.myId) # initializing ode solver self.ode_solver.Init(self.oper)
def run_test(): print("Test complex_operator module") Nvert = 6 Nelem = 8 Nbelem = 2 mesh = mfem.Mesh(2, Nvert, Nelem, 2, 3) tri_v = [[1., 0., 0.], [0., 1., 0.], [-1., 0., 0.], [0., -1., 0.], [0., 0., 1.], [0., 0., -1.]] tri_e = [[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4], [1, 0, 5], [2, 1, 5], [3, 2, 5], [0, 3, 5]] tri_l = [[1, 4], [1, 2]] for j in range(Nvert): mesh.AddVertex(tri_v[j]) for j in range(Nelem): mesh.AddTriangle(tri_e[j], 1) for j in range(Nbelem): mesh.AddBdrSegment(tri_l[j], 1) mesh.FinalizeTriMesh(1, 1, True) dim = mesh.Dimension() order = 1 fec = mfem.H1_FECollection(order, dim) if use_parallel: mesh = mfem.ParMesh(MPI.COMM_WORLD, mesh) fes = mfem.ParFiniteElementSpace(mesh, fec) a1 = mfem.ParBilinearForm(fes) a2 = mfem.ParBilinearForm(fes) else: fes = mfem.FiniteElementSpace(mesh, fec) a1 = mfem.BilinearForm(fes) a2 = mfem.BilinearForm(fes) one = mfem.ConstantCoefficient(1.0) a1.AddDomainIntegrator(mfem.DiffusionIntegrator(one)) a1.Assemble() a1.Finalize() a2.AddDomainIntegrator(mfem.DiffusionIntegrator(one)) a2.Assemble() a2.Finalize() if use_parallel: M1 = a1.ParallelAssemble() M2 = a2.ParallelAssemble() M1.Print('M1') width = fes.GetTrueVSize() #X = mfem.HypreParVector(fes) #Y = mfem.HypreParVector(fes) #X.SetSize(fes.TrueVSize()) #Y.SetSize(fes.TrueVSize()) #from mfem.common.parcsr_extra import ToScipyCoo #MM1 = ToScipyCoo(M1) #print(MM1.toarray()) #print(MM1.dot(np.ones(6))) else: M1 = a1.SpMat() M2 = a2.SpMat() M1.Print('M1') width = fes.GetVSize() #X = mfem.Vector() #Y = mfem.Vector() #X.SetSize(M1.Width()) #Y.SetSize(M1.Height()) #from mfem.common.sparse_utils import sparsemat_to_scipycsr #MM1 = sparsemat_to_scipycsr(M1, np.float) #print(MM1.toarray()) #print(MM1.dot(np.ones(6))) #X.Assign(0.0) #X[0] = 1.0 #M1.Mult(X, Y) #print(Y.GetDataArray()) Mc = mfem.ComplexOperator(M1, M2, hermitan=True) offsets = mfem.intArray([0, width, width]) offsets.PartialSum() x = mfem.BlockVector(offsets) y = mfem.BlockVector(offsets) x.GetBlock(0).Assign(0) if myid == 0: x.GetBlock(0)[0] = 1.0 x.GetBlock(1).Assign(0) if myid == 0: x.GetBlock(1)[0] = 1.0 Mc.Mult(x, y) print("x", x.GetDataArray()) print("y", y.GetDataArray()) if myid == 0: x.GetBlock(1)[0] = -1.0 x.Print() Mc.Mult(x, y) print("x", x.GetDataArray()) print("y", y.GetDataArray())