def f(x=None): ## function to be executed by all threads if rank == 0: print "long job on process=", rank sys.stdout.flush() for i in range(500): mpitools.size() ## pass ## do something a little long and tedious else: print "short job on process=", rank sys.stdout.flush() for i in range(50): mpitools.rank() ## pass ## do something longer and much more tedious print "finished, process", rank sys.stdout.flush() mpitools.mpi_barrier() ## all processes just wait each othere here print "really finished, process", rank sys.stdout.flush()
## For example, in my .cshr file I wrote: ## setenv PYTHONPATH $PYTHONPATH':/Users/edwin/NIST/OOF2/MPIBUILD' ## import oofcppc ## import this ALWAYS before any swig-generated modules from ooflib.SWIG.common import mpitools start_time = time.time() mpitools.mpi_initialize(sys.argv) ## no need to use mpi_finalize. The modele at_exit takes care of that. numproc = mpitools.size() myid = mpitools.rank() node = mpitools.get_processor_name() lag = 0 print "I am proc %d of %d on node %s" %(myid+1, numproc, node) mpitools.mpi_barrier() ## synchronizes all the processes if numproc < 2: print "Demo must run on at least 2 processors to continue" mpitools.mpi_abort() ## sys.exit() if myid == 0: proc_0_time = time.time()