Exemple #1
0
def SetupMPI():
    try:
        from mpi4py import MPI
        global _has_mpi, COMM, RANK, SIZE
        _has_mpi = core.IsOperatorWithEngine("CreateCommonWorld", "MPI")
        COMM = MPI.COMM_WORLD
        RANK = COMM.Get_rank()
        SIZE = COMM.Get_size()
    except ImportError:
        _has_mpi = False
Exemple #2
0
 def test_is_operator_with_engine(self):
     self.assertTrue(core.IsOperatorWithEngine('Relu', 'DEFAULT'))
     self.assertFalse(core.IsOperatorWithEngine('Relu', 'NOEXIST'))
Exemple #3
0
import hypothesis.strategies as st

import numpy as np
import unittest

from caffe2.python import core, workspace, dyndep
import caffe2.python.hypothesis_test_util as hu

if workspace.has_gpu_support:
    dyndep.InitOpsLibrary("@/caffe2/caffe2/mpi:mpi_ops_gpu")
else:
    dyndep.InitOpsLibrary("@/caffe2/caffe2/mpi:mpi_ops")

try:
    from mpi4py import MPI
    _has_mpi = core.IsOperatorWithEngine("CreateCommonWorld", "MPI")
    COMM = MPI.COMM_WORLD
    RANK = COMM.Get_rank()
    SIZE = COMM.Get_size()
except ImportError:
    _has_mpi = False


@unittest.skipIf(not _has_mpi, "MPI is not available. Skipping.")
class TestMPI(hu.HypothesisTestCase):
    @given(X=hu.tensor(),
           root=st.integers(min_value=0, max_value=SIZE - 1),
           device_option=st.sampled_from(hu.device_options),
           **hu.gcs)
    def test_broadcast(self, X, root, device_option, gc, dc):
        # Use mpi4py's broadcast to make sure that all nodes inherit the