示例#1
0
 def testInit(self):
     comm_null1 = PETSc.Comm()
     comm_null2 = PETSc.Comm(PETSc.COMM_NULL)
     comm_world = PETSc.Comm(PETSc.COMM_WORLD)
     comm_self = PETSc.Comm(PETSc.COMM_SELF)
     self.assertEqual(comm_null1, PETSc.COMM_NULL)
     self.assertEqual(comm_null2, PETSc.COMM_NULL)
     self.assertEqual(comm_world, PETSc.COMM_WORLD)
     self.assertEqual(comm_self, PETSc.COMM_SELF)
示例#2
0
def test_mpi_dependent_jiting():
    # FIXME: Not a proper unit test...
    from dolfin import (Expression, UnitSquareMesh, Function, TestFunction,
                        Form, FunctionSpace, dx, CompiledSubDomain,
                        SubSystemsManager)

    # Init petsc (needed to initalize petsc and slepc collectively on
    # all processes)
    SubSystemsManager.init_petsc()

    try:
        import mpi4py.MPI as mpi
    except ImportError:
        return

    try:
        import petsc4py.PETSc as petsc
    except ImportError:
        return

    # Set communicator and get process information
    comm = mpi.COMM_WORLD
    group = comm.Get_group()
    size = comm.Get_size()

    # Only consider parallel runs
    if size == 1:
        return

    rank = comm.Get_rank()
    group_comm_0 = petsc.Comm(comm.Create(group.Incl(range(1))))
    group_comm_1 = petsc.Comm(comm.Create(group.Incl(range(1, 2))))

    if size > 2:
        group_comm_2 = petsc.Comm(comm.Create(group.Incl(range(2, size))))

    if rank == 0:
        e = Expression("4", mpi_comm=group_comm_0, degree=0)

    elif rank == 1:
        e = Expression("5", mpi_comm=group_comm_1, degree=0)
        assert (e)
        domain = CompiledSubDomain("on_boundary",
                                   mpi_comm=group_comm_1,
                                   degree=0)
        assert (domain)

    else:
        mesh = UnitSquareMesh(group_comm_2, 2, 2)
        V = FunctionSpace(mesh, "P", 1)
        u = Function(V)
        v = TestFunction(V)
        Form(u * v * dx)
示例#3
0
def make_comm(comm):
    if hasattr(dolfin, "has_pybind11") and dolfin.has_pybind11():
        return comm
    elif dolfin.__version__ >= "2018.1.0":
        return comm
    else:
        return PETSc.Comm(comm)
示例#4
0
 def testCompatMPI4PY(self):
     try:
         from mpi4py import MPI
     except ImportError:
         return
     # mpi4py -> petsc4py
     cn = PETSc.Comm(MPI.COMM_NULL)
     cs = PETSc.Comm(MPI.COMM_SELF)
     cw = PETSc.Comm(MPI.COMM_WORLD)
     self.assertEqual(cn, PETSc.COMM_NULL)
     self.assertEqual(cs, PETSc.COMM_SELF)
     self.assertEqual(cw, PETSc.COMM_WORLD)
     # petsc4py - > mpi4py
     cn = PETSc.COMM_NULL.tompi4py()
     self.assertTrue(isinstance(cn, MPI.Comm))
     self.assertFalse(cn)
     cs = PETSc.COMM_SELF.tompi4py()
     self.assertTrue(isinstance(cs, MPI.Intracomm))
     self.assertEqual(cs.Get_size(), 1)
     self.assertEqual(cs.Get_rank(), 0)
     cw = PETSc.COMM_WORLD.tompi4py()
     self.assertTrue(isinstance(cw, MPI.Intracomm))
     self.assertEqual(cw.Get_size(), PETSc.COMM_WORLD.getSize())
     self.assertEqual(cw.Get_rank(), PETSc.COMM_WORLD.getRank())
示例#5
0
 def testViewLoadCycle(self):
     grank = PETSc.COMM_WORLD.rank
     for i in range(self.NTIMES):
         if i == 0:
             infname = self.infile()
             informt = self.informat()
         else:
             infname = self.outfile()
             informt = self.outformat()
         if self.HETEROGENEOUS:
             mycolor = (grank > self.NTIMES - i)
         else:
             mycolor = 0
         try:
             import mpi4py
         except ImportError:
             self.skipTest(
                 'mpi4py')  # throws special exception to signal test skip
         mpicomm = PETSc.COMM_WORLD.tompi4py()
         comm = PETSc.Comm(comm=mpicomm.Split(color=mycolor, key=grank))
         if mycolor == 0:
             self.outputText("Begin cycle %d\n" % i, comm)
             plex = PETSc.DMPlex()
             vwr = PETSc.ViewerHDF5()
             # Create plex
             plex.create(comm=comm)
             plex.setName("DMPlex Object")
             # Load data from XDMF into dm in parallel
             vwr.create(infname, mode='r', comm=comm)
             vwr.pushFormat(format=informt)
             plex.load(viewer=vwr)
             plex.setOptionsPrefix("loaded_")
             plex.setFromOptions()
             vwr.popFormat()
             vwr.destroy()
             self.outputPlex(plex)
             # Test DM is indeed distributed
             flg = plex.isDistributed()
             self.outputText(
                 "Loaded mesh distributed? %s\n" % str(flg).upper(), comm)
             # Interpolate
             plex.interpolate()
             plex.setOptionsPrefix("interpolated_")
             plex.setFromOptions()
             self.outputPlex(plex)
             # Redistribute
             part = plex.getPartitioner()
             part.setType(self.partitionerType())
             _ = plex.distribute(overlap=0)
             plex.setOptionsPrefix("redistributed_")
             plex.setFromOptions()
             self.outputPlex(plex)
             # Save redistributed dm to XDMF in parallel
             vwr.create(self.outfile(), mode='w', comm=comm)
             vwr.pushFormat(format=self.outformat())
             plex.setName("DMPlex Object")
             plex.view(viewer=vwr)
             vwr.popFormat()
             vwr.destroy()
             # Destroy plex
             plex.destroy()
             self.outputText("End   cycle %d\n--------\n" % i, comm)
         PETSc.COMM_WORLD.Barrier()
     # Check that the output is identical to that of plex/tutorial/ex5.c.
     self.assertTrue(
         filecmp.cmp(self.tmp_output_file(),
                     self.ref_output_file(),
                     shallow=False), 'Contents of the files not the same.')
     PETSc.COMM_WORLD.Barrier()
#PetscOptions.append("-help")
petsc4py.init(PetscOptions)

from petsc4py import PETSc
from mpi4py import MPI

# break processors into separate communicators
petscRank = PETSc.COMM_WORLD.getRank()
petscSize = PETSc.COMM_WORLD.Get_size()
sys.stdout.write("petsc rank %d petsc nproc %d\n" % (petscRank, petscSize))

# break up processors into communicators
NumProcsPerSubComm = 4
color = petscRank / NumProcsPerSubComm
NumSubCommunicators = petscSize / NumProcsPerSubComm + 1
subcomm = PETSc.Comm(MPI.COMM_WORLD.Split(color))
subRank = subcomm.Get_rank()
subSize = subcomm.Get_size()
sys.stdout.write("number of sub communictors %d \n" % (NumSubCommunicators))
sys.stdout.write("subcomm rank %d subcomm nproc %d\n" % (subRank, subSize))

# set shell context
# TODO import vtk should be called after femLibrary ????
# FIXME WHY IS THIS????
import femLibrary
fem = femLibrary.PyFEMInterface(4.0)
fem.SetuplibMesh(subcomm)  # initialize libMesh data structures

# inifile = None ==> setup from command line
inifile = None
fem.SetupIni(inifile)
示例#7
0
    def __init__(self,
                 F,
                 Y,
                 y,
                 bcs=None,
                 power=1,
                 shift=1,
                 bounds=None,
                 P=None):
        """
    The constructor: takes in the form, function space, solution,
    boundary conditions, and deflation parameters."""

        assert isinstance(Y, FunctionSpace)
        self.function_space = Y
        self.mesh = Y.mesh()
        self.comm = PETSc.Comm(self.mesh.mpi_comm())

        NonlinearProblem.__init__(self)
        self.y = y
        self.bcs = bcs

        self._form = F
        self._dF = derivative(F, y)
        self.assembler = SystemAssembler(self._dF, self._form, self.bcs)
        self._J = PETScMatrix()

        # All the known solutions to be deflated
        self.solutions = []

        self.power = power
        self.shift = shift
        self.norms = []
        self.dnorms = []

        self._tmpvec1 = empty_vector(y.vector())
        self._tmpvec2 = empty_vector(y.vector())
        self.residual = empty_vector(y.vector())

        # Sometimes for various problems you want to solve submatrices of the base
        # matrix -- e.g. for active set methods for variational inequalities,
        # and in nonlinear fieldsplits.
        self.eqn_subindices = None
        self.var_subindices = None
        self.inact_subindices = None
        self.pc_prefix = "inner_"

        # in case a fieldsplit preconditioner is requested, and the blocksize
        # isn't set by dolfin
        self.fieldsplit_is = None

        # the near nullspace of the operator
        self.nullsp = None

        if bounds is not None:
            (lb, ub) = bounds
            self.lb = as_backend_type(lb).vec()
            self.ub = as_backend_type(ub).vec()

        # in case you want to use a different matrix to build
        # a preconditioner.
        self.Pmat = None
        if P is not None:
            self.Pmat = P