def init(logfile=__logfile): if mode != 'tracing': return mpi4py.profile(name='mpe', logfile=logfile) mpi4py.MPI.Pcontrol(1) MPE.initLog(logfile=logfile) MPE.setLogFileName(logfile)
def main(args=None): "Entry-point for ``python -m mpi4py``" from optparse import OptionParser from mpi4py import __name__ as prog from mpi4py import __version__ as version parser = OptionParser(prog=prog, version='%prog ' + version, usage="%prog [options] <command> [args]") parser.add_option("--no-threads", action="store_false", dest="threads", default=True, help="initialize MPI without thread support") parser.add_option("--thread-level", type="choice", metavar="LEVEL", choices=["single", "funneled", "serialized", "multiple"], action="store", dest="thread_level", default="multiple", help="initialize MPI with required thread support") parser.add_option("--mpe", action="store_true", dest="mpe", default=False, help="use MPE for MPI profiling") parser.add_option("--vt", action="store_true", dest="vt", default=False, help="use VampirTrace for MPI profiling") parser.disable_interspersed_args() (options, args) = parser.parse_args(args) import mpi4py mpi4py.rc.threads = options.threads mpi4py.rc.thread_level = options.thread_level if options.mpe: mpi4py.profile('mpe', logfile='mpi4py') if options.vt: mpi4py.profile('vt', logfile='mpi4py') from mpi4py import MPI comm = MPI.COMM_WORLD if not args: if comm.rank == 0: parser.print_usage() parser.exit() command = args.pop(0) if command not in main.commands: if comm.rank == 0: parser.error("unknown command '%s'" % command) parser.exit(2) command = main.commands[command] command(comm, args=args) parser.exit()
def main(args=None): from optparse import OptionParser from mpi4py import __name__ as prog from mpi4py import __version__ as version parser = OptionParser(prog=prog, version='%prog ' + version, usage="%prog [options] <command> [args]") parser.add_option("--no-threads", action="store_false", dest="threaded", default=True, help="initialize MPI without thread support") parser.add_option("--thread-level", type="choice", metavar="LEVEL", choices=["single", "funneled", "serialized", "multiple"], action="store", dest="thread_level", default="multiple", help="initialize MPI with required thread support") parser.add_option("--mpe", action="store_true", dest="mpe", default=False, help="use MPE for MPI profiling") parser.add_option("--vt", action="store_true", dest="vt", default=False, help="use VampirTrace for MPI profiling") parser.disable_interspersed_args() (options, args) = parser.parse_args(args) # import mpi4py mpi4py.rc.threaded = options.threaded mpi4py.rc.thread_level = options.thread_level if options.mpe: mpi4py.profile('mpe', logfile='mpi4py') if options.vt: mpi4py.profile('vt', logfile='mpi4py') # from mpi4py import MPI comm = MPI.COMM_WORLD if not args: if comm.rank == 0: parser.print_usage() parser.exit() command = args.pop(0) if command not in _commands: if comm.rank == 0: parser.error("unknown command '%s'" % command) parser.exit(2) command = _commands[command] command(comm, args=args) parser.exit()
#!/usr/bin/env python # If you want MPE to log MPI calls, you have to add the two lines # below at the very beginning of your main bootstrap script. import mpi4py mpi4py.profile('mpe', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI if 0: # <- use '1' to disable logging of MPI calls MPI.Pcontrol(0) # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array('i', [0]) pi = array('d', [0]) mypi = array('d', [0]) def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n s = 0.0
#!/usr/bin/env python import os os.environ['MPE_LOGFILE_PREFIX'] = 'ring' import mpi4py mpi4py.profile('mpe') from mpi4py import MPI from array import array comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank-1 dest = rank+1 if rank == 0: src = size-1 if rank == size-1: dest = 0 try: from numpy import zeros a1 = zeros(1000000, 'd') a2 = zeros(1000000, 'd') except ImportError: from array import array a1 = array('d', [0]*1000); a1 *= 1000 a2 = array('d', [0]*1000); a2 *= 1000 comm.Sendrecv(sendbuf=a1, recvbuf=a2, source=src, dest=dest)
def mpi4py_profile(*args, **kwargs): try: mpi4py.profile(*args, **kwargs) except ValueError: pass
import sys; sys.path.insert(0, sys.argv[1]) import mpi4py if len(sys.argv) > 2: lfn = "runtests-mpi4py-child" mpe = sys.argv[2] == 'mpe' vt = sys.argv[2] == 'vt' if mpe: mpi4py.profile('mpe', logfile=lfn) if vt: mpi4py.profile('vt', logfile=lfn) from mpi4py import MPI parent = MPI.Comm.Get_parent() parent.Barrier() parent.Disconnect() assert parent == MPI.COMM_NULL parent = MPI.Comm.Get_parent() assert parent == MPI.COMM_NULL
mpi4py.rc( initialize=True, threads=True, thread_level='multiple', finalize=None, fast_reduce=True, recv_mprobe=True, errors='exception', ) try: mpi4py.rc(querty=False) except TypeError: pass import mpi4py mpi4py.profile() mpi4py.profile('mpe') mpi4py.profile('mpe', path="/usr/lib") mpi4py.profile('mpe', path=["/usr/lib"]) mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('vt') mpi4py.profile('vt', path="/usr/lib") mpi4py.profile('vt', path=["/usr/lib"]) mpi4py.profile('vt', logfile="mpi4py") mpi4py.profile('vt', logfile="mpi4py") try: mpi4py.profile('@querty') except ValueError: pass
#!/usr/bin/env python import mpi4py mpi4py.rc.threads = True mpi4py.rc.thread_level = "funneled" mpi4py.profile('vt-hyb', logfile='threads') from mpi4py import MPI from threading import Thread MPI.COMM_WORLD.Barrier() # Understanding the Python GIL # David Beazley, http://www.dabeaz.com # PyCon 2010, Atlanta, Georgia # http://www.dabeaz.com/python/UnderstandingGIL.pdf # Consider this trivial CPU-bound function def countdown(n): while n > 0: n -= 1 # Run it once with a lot of work COUNT = 10000000 # 10 millon tic = MPI.Wtime() countdown(COUNT) toc = MPI.Wtime() print ("sequential: %f seconds" % (toc-tic)) # Now, subdivide the work across two threads t1 = Thread(target=countdown, args=(COUNT//2,))
#!/usr/bin/env python # If you want VampirTrace to log MPI calls, you have to add the two # lines below at the very beginning of your main bootstrap script. import mpi4py mpi4py.rc.threads = False mpi4py.profile('vt', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array('i', [0]) pi = array('d', [0]) mypi = array('d', [0]) def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n s = 0.0 for i in range(myrank + 1, n + 1, nprocs):
# ring_mpe.py """ Demonstrates mpi4py profiling with MPE. Run this with 8 processes like: $ mpiexec -n 8 python ring_mpe.py """ import os #os.environ['MPE_LOGFILE_PREFIX'] = 'ring' import mpi4py #mpi4py.profile('mpe') # or mpi4py.profile('mpe', logfile='ring') from mpi4py import MPI from array import array comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank - 1 dest = rank + 1 if rank == 0: src = size - 1 if rank == size - 1: dest = 0 try: from numpy import zeros
# ring_mpe.py """ Demonstrates mpi4py profiling with MPE. Run this with 8 processes like: $ mpiexec -n 8 python ring_mpe.py """ import os os.environ['MPE_LOGFILE_PREFIX'] = 'ring' import mpi4py mpi4py.profile('mpe') # or # mpi4py.profile('mpe', logfile='ring') from mpi4py import MPI from array import array comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank - 1 dest = rank + 1 if rank == 0: src = size - 1 if rank == size - 1: dest = 0 try: from numpy import zeros
import sys sys.path.insert(0, sys.argv[1]) import mpi4py if len(sys.argv) > 2: lfn = "runtests-mpi4py-child" mpe = sys.argv[2] == 'mpe' vt = sys.argv[2] == 'vt' if mpe: mpi4py.profile('mpe', logfile=lfn) if vt: mpi4py.profile('vt', logfile=lfn) from mpi4py import MPI parent = MPI.Comm.Get_parent() parent.Barrier() parent.Disconnect() assert parent == MPI.COMM_NULL parent = MPI.Comm.Get_parent() assert parent == MPI.COMM_NULL
import sys import mpi4py mpi4py.profile('mpe', logfile='threads') from mpi4py import MPI from array import array try: import threading except ImportError: sys.stderr.write("threading module not available\n") sys.exit(0) send_msg = array('i', [7]*1000); send_msg *= 1000 recv_msg = array('i', [0]*1000); recv_msg *= 1000 def self_send(comm, rank): comm.Send([send_msg, MPI.INT], dest=rank, tag=0) def self_recv(comm, rank): comm.Recv([recv_msg, MPI.INT], source=rank, tag=0) comm = MPI.COMM_WORLD rank = comm.Get_rank() send_thread = threading.Thread(target=self_send, args=(comm, rank)) recv_thread = threading.Thread(target=self_recv, args=(comm, rank)) send_thread.start() recv_thread.start() recv_thread.join()
#!/usr/bin/env python # If you want VampirTrace to log MPI calls, you have to add the two # lines below at the very beginning of your main bootstrap script. import mpi4py mpi4py.rc.threads = False mpi4py.profile('vt', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array('i', [0]) pi = array('d', [0]) mypi = array('d', [0]) def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n; s = 0.0; for i in range(myrank + 1, n + 1, nprocs): x = h * (i - 0.5);
import sys import mpi4py mpi4py.profile('MPE', logfile='threads') from mpi4py import MPI from array import array try: import threading except ImportError: sys.stderr.write("threading module not available\n") sys.exit(0) send_msg = array('i', [7] * 1000) send_msg *= 1000 recv_msg = array('i', [0] * 1000) recv_msg *= 1000 def self_send(comm, rank): comm.Send([send_msg, MPI.INT], dest=rank, tag=0) def self_recv(comm, rank): comm.Recv([recv_msg, MPI.INT], source=rank, tag=0) comm = MPI.COMM_WORLD rank = comm.Get_rank() send_thread = threading.Thread(target=self_send, args=(comm, rank)) recv_thread = threading.Thread(target=self_recv, args=(comm, rank))
#!/usr/bin/env python if False: import mpi4py name = "name" # lib{name}.so path = [] mpi4py.profile(name, path=path) from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank - 1 dest = rank + 1 if rank == 0: src = size - 1 if rank == size - 1: dest = 0 try: from numpy import zeros a1 = zeros(1000000, 'd') a2 = zeros(1000000, 'd') except ImportError: from array import array a1 = array('d', [0] * 1000) a1 *= 1000 a2 = array('d', [0] * 1000) a2 *= 1000
#!/usr/bin/env python from __future__ import with_statement # Python 2.5 and later # If you want MPE to log MPI calls, you have to add the two lines # below at the very beginning of your main bootstrap script. import mpi4py mpi4py.profile('MPE', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI if 0: # <- use '1' to disable logging of MPI calls MPI.Pcontrol(0) # Import the MPE extension module from mpi4py import MPE if 1: # <- use '0' to disable user-defined logging # This has to be explicitly called ! MPE.initLog(logfile='cpilog') # Set the log file name (note: no extension) MPE.setLogFileName('cpilog') # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep # User-defined MPE events cpi_begin = MPE.newLogEvent("ComputePi-Begin", "yellow") cpi_end = MPE.newLogEvent("ComputePi-End", "pink")
#!/usr/bin/env python # If you want MPE to log MPI calls, you have to add the two lines # below at the very beginning of your main bootstrap script. import mpi4py mpi4py.profile('mpe', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI if 0: # <- use '1' to disable logging of MPI calls MPI.Pcontrol(0) # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array('i', [0]) pi = array('d', [0]) mypi = array('d', [0]) def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n; s = 0.0; for i in range(myrank + 1, n + 1, nprocs):
#!/usr/bin/env python # If you want VampirTrace to log MPI calls, you have to add the two # lines below at the very beginning of your main bootstrap script. import mpi4py mpi4py.rc.threads = False mpi4py.profile('vt-mpi', logfile='ring') from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank - 1 dest = rank + 1 if rank == 0: src = size - 1 if rank == size - 1: dest = 0 try: from numpy import zeros a1 = zeros(1000000, 'd') a2 = zeros(1000000, 'd') except ImportError: from array import array a1 = array('d', [0] * 1000) a1 *= 1000 a2 = array('d', [0] * 1000) a2 *= 1000
def mpi4py_profile(*args, **kargs): try: mpi4py.profile(*args, **kargs) except ValueError: pass
#!/usr/bin/env python from __future__ import with_statement # Python 2.5 and later # If you want MPE to log MPI calls, you have to add the two lines # below at the very beginning of your main bootstrap script. import mpi4py mpi4py.profile('MPE', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI if 0: # <- use '1' to disable logging of MPI calls MPI.Pcontrol(0) # Import the MPE extension module from mpi4py import MPE if 1: # <- use '0' to disable user-defined logging # This has to be explicitly called ! MPE.initLog(logfile='cpilog') # Set the log file name (note: no extension) MPE.setLogFileName('cpilog') # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep # User-defined MPE events cpi_begin = MPE.newLogEvent("ComputePi-Begin", "yellow")
import sys import mpi4py mpi4py.profile('MPE', logfile='threads') from mpi4py import MPI from array import array try: import threading except ImportError: sys.stderr.write("threading module not available\n") sys.exit(0) send_msg = array('i', [7]*1000); send_msg *= 1000 recv_msg = array('i', [0]*1000); recv_msg *= 1000 def self_send(comm, rank): comm.Send([send_msg, MPI.INT], dest=rank, tag=0) def self_recv(comm, rank): comm.Recv([recv_msg, MPI.INT], source=rank, tag=0) comm = MPI.COMM_WORLD rank = comm.Get_rank() send_thread = threading.Thread(target=self_send, args=(comm, rank)) recv_thread = threading.Thread(target=self_recv, args=(comm, rank)) send_thread.start() recv_thread.start() recv_thread.join()
import mpi4py.rc mpi4py.rc( initialize = True, threads = True, thread_level = 'multiple', finalize = None, fast_reduce = True, recv_mprobe = True, errors = 'exception', ) try: mpi4py.rc(querty=False) except TypeError: pass import mpi4py mpi4py.profile() mpi4py.profile('mpe') mpi4py.profile('mpe', path="/usr/lib") mpi4py.profile('mpe', path=["/usr/lib"]) mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('vt') mpi4py.profile('vt', path="/usr/lib") mpi4py.profile('vt', path=["/usr/lib"]) mpi4py.profile('vt', logfile="mpi4py") mpi4py.profile('vt', logfile="mpi4py") try: mpi4py.profile('@querty') except ValueError: pass import mpi4py.__main__
def testProfile(self): import struct import sysconfig bits = struct.calcsize('P') * 8 triplet = sysconfig.get_config_var('MULTIARCH') or '' libpath = [ f"{prefix}{suffix}" for prefix in ("/lib", "/usr/lib") for suffix in (bits, f"/{triplet}", "") ] fspath = (os.fsencode, os.fsdecode, pathlib.Path) libraries = ( 'c', 'libc.so.6', 'm', 'libm.so.6', 'dl', 'libdl.so.2', ) def mpi4py_profile(*args, **kwargs): try: mpi4py.profile(*args, **kwargs) except ValueError: pass if os.name != 'posix': with warnings.catch_warnings(): warnings.simplefilter('error') with self.assertRaises(UserWarning): mpi4py.profile(MPI.__file__) return with warnings.catch_warnings(): warnings.simplefilter('ignore') for libname in libraries: mpi4py_profile(libname, path=libpath) for fs in fspath: mpi4py_profile(libname, path=map(fs, libpath)) for path in libpath: mpi4py_profile(libname, path=path) for fsp in fspath: mpi4py_profile(libname, path=fsp(path)) warnings.simplefilter('error') with self.assertRaises(UserWarning): mpi4py.profile('hosts', path=["/etc"]) with self.assertRaises(ValueError): mpi4py.profile('@querty') with self.assertRaises(ValueError): mpi4py.profile('@querty', path="/usr/lib") with self.assertRaises(ValueError): mpi4py.profile('@querty', path=["/usr/lib"]) with self.assertRaises(ValueError): mpi4py.profile('@querty')
#!/usr/bin/env python import mpi4py mpi4py.rc.threads = True mpi4py.rc.thread_level = "funneled" mpi4py.profile('vt-hyb', logfile='threads') from mpi4py import MPI from threading import Thread MPI.COMM_WORLD.Barrier() # Understanding the Python GIL # David Beazley, http://www.dabeaz.com # PyCon 2010, Atlanta, Georgia # http://www.dabeaz.com/python/UnderstandingGIL.pdf # Consider this trivial CPU-bound function def countdown(n): while n > 0: n -= 1 # Run it once with a lot of work COUNT = 10000000 # 10 millon tic = MPI.Wtime() countdown(COUNT) toc = MPI.Wtime() print("sequential: %f seconds" % (toc - tic))
import mpi4py #mpi4py.profile('vt', logfile='ring') mpi4py.profile('mpe', logfile='mpi') import numpy as np import os import time # import struct # import matplotlib.pyplot as plt import math from mpi4py import MPI from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument("--mode", type=int, default=0, help="descrete:0, continuous:1") args = parser.parse_args() def readmnist(mnist_dir, mode='training'): if mode == 'training': image_dir = os.path.join(mnist_dir, 'train-images-idx3-ubyte') label_dir = os.path.join(mnist_dir, 'train-labels-idx1-ubyte') elif mode == 'testing': image_dir = os.path.join(mnist_dir, 't10k-images-idx3-ubyte') label_dir = os.path.join(mnist_dir, 't10k-labels-idx1-ubyte') with open(image_dir, 'rb') as fimage: #training: magic=2049, num=60000, row=28, col=28 magic, num, row, col = np.fromfile(fimage,
#!/usr/bin/env python # If you want VampirTrace to log MPI calls, you have to add the two # lines below at the very beginning of your main bootstrap script. import mpi4py mpi4py.rc.threads = False mpi4py.profile('vt-mpi', logfile='ring') from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank-1 dest = rank+1 if rank == 0: src = size-1 if rank == size-1: dest = 0 try: from numpy import zeros a1 = zeros(1000000, 'd') a2 = zeros(1000000, 'd') except ImportError: from array import array a1 = array('d', [0]*1000); a1 *= 1000 a2 = array('d', [0]*1000); a2 *= 1000 comm.Sendrecv(sendbuf=a1, recvbuf=a2,