def test_default_allocation(self): alloc = arb.proc_allocation() # test that by default proc_allocation has 1 thread and no GPU self.assertEqual(alloc.threads, 1) self.assertEqual(alloc.gpu_id, None) self.assertFalse(alloc.has_gpu)
def test_exceptions_context_arbmpi(self): alloc = arb.proc_allocation() with self.assertRaisesRegex( RuntimeError, "mpi must be None, or an MPI communicator"): arb.context(mpi='MPI_COMM_WORLD') with self.assertRaisesRegex( RuntimeError, "mpi must be None, or an MPI communicator"): arb.context(alloc, mpi=0)
def test_context_allocation_mpi4py(self): comm = arb.mpi_comm(mpi.COMM_WORLD) # test context with alloc and mpi alloc = arb.proc_allocation() ctx = arb.context(alloc, comm) self.assertEqual(ctx.threads, alloc.threads) self.assertTrue(ctx.has_mpi)
def test_context_allocation(self): alloc = arb.proc_allocation() # test context construction with proc_allocation() ctx = arb.context(alloc) self.assertEqual(ctx.threads, alloc.threads) self.assertEqual(ctx.has_gpu, alloc.has_gpu) self.assertEqual(ctx.ranks, 1) self.assertEqual(ctx.rank, 0)
def test_set_allocation(self): alloc = arb.proc_allocation() # test changing allocation alloc.threads = 20 self.assertEqual(alloc.threads, 20) alloc.gpu_id = 0 self.assertEqual(alloc.gpu_id, 0) self.assertTrue(alloc.has_gpu) alloc.gpu_id = None self.assertFalse(alloc.has_gpu)
def run(): v = options.parse_arguments().verbosity comm = arb.mpi_comm(mpi.COMM_WORLD) alloc = arb.proc_allocation() ctx = arb.context(alloc, comm) rank = ctx.rank if rank == 0: runner = unittest.TextTestRunner(verbosity=v) else: sys.stdout = open(os.devnull, 'w') runner = unittest.TextTestRunner(stream=sys.stdout) runner.run(suite())
def context(): """ Fixture that produces an MPI sensitive `arbor.context` """ args = [arbor.proc_allocation()] if _mpi_enabled: if not arbor.mpi_is_initialized(): print("Context fixture initializing mpi", flush=True) arbor.mpi_init() atexit.register(_finalize_mpi) if _mpi4py_enabled: from mpi4py.MPI import COMM_WORLD as comm else: comm = arbor.mpi_comm() args.append(comm) return arbor.context(*args)
def run(): v = options.parse_arguments().verbosity if not arb.mpi_is_initialized(): arb.mpi_init() comm = arb.mpi_comm() alloc = arb.proc_allocation() ctx = arb.context(alloc, comm) rank = ctx.rank if rank == 0: runner = unittest.TextTestRunner(verbosity=v) else: sys.stdout = open(os.devnull, 'w') runner = unittest.TextTestRunner(stream=sys.stdout) runner.run(suite()) if not arb.mpi_is_finalized(): arb.mpi_finalize()
def test_exceptions_allocation(self): with self.assertRaisesRegex( RuntimeError, "gpu_id must be None, or a non-negative integer"): arb.proc_allocation(gpu_id=1.) with self.assertRaisesRegex( RuntimeError, "gpu_id must be None, or a non-negative integer"): arb.proc_allocation(gpu_id=-1) with self.assertRaisesRegex( RuntimeError, "gpu_id must be None, or a non-negative integer"): arb.proc_allocation(gpu_id='gpu_id') with self.assertRaises(TypeError): arb.proc_allocation(threads=1.) with self.assertRaisesRegex(RuntimeError, "threads must be a positive integer"): arb.proc_allocation(threads=0) with self.assertRaises(TypeError): arb.proc_allocation(threads=None)
return suite if __name__ == "__main__": v = options.parse_arguments().verbosity if not arb.mpi_is_initialized(): print(" Runner initializing mpi") arb.mpi_init() if mpi4py_enabled: comm = arb.mpi_comm(mpi.COMM_WORLD) elif mpi_enabled: comm = arb.mpi_comm() alloc = arb.proc_allocation() ctx = arb.context(alloc, comm) rank = ctx.rank if rank == 0: runner = unittest.TextTestRunner(verbosity=v) else: sys.stdout = open(os.devnull, 'w') runner = unittest.TextTestRunner(stream=sys.stdout) result = runner.run(suite()) if not arb.mpi_is_finalized(): arb.mpi_finalize() sys.exit(not (result.wasSuccessful()))