def destroy(self, *args): mpitools.broadcast_string("quit", 0) ## args: command and sender self.window.hide() gtk.mainquit() print "quitting main" sys.stdout.flush() mpitools.mpi_barrier() sys.exit()
def h(x=None): try: if rank == 0: pass else: raise mpitools.MPIException() finally: mpitools.mpi_barrier() ## all process wait each other here
def h(x=None): try: if rank == 0: for i in range(10): msg = mpitools.receive_string(1) print msg else: for i in range(10): mpitools.send_string("this is a freakin test", 0); finally: mpitools.mpi_barrier() ## all process wait each other here
def h(x=None): try: if rank == 0: for i in range(10): msg = mpitools.receive_string(1) print msg else: for i in range(10): mpitools.send_string("this is a freakin test", 0) finally: mpitools.mpi_barrier() ## all process wait each other here
def f(x=None): ## function to be executed by all threads if rank == 0: print "long job on process=", rank sys.stdout.flush() for i in range(5000000): pass ## do something a little long and tedious else: print "short job on process=", rank sys.stdout.flush() for i in range(50): pass ## do something longer and much more tedious mpitools.mpi_barrier() ## all processes just wait each othere here
def f(x=None): ## function to be executed by all threads if rank == 0: print "long job on process=", rank sys.stdout.flush() for i in range(5000000): pass ## do something a little long and tedious elif rank == 1: print "short job on process=", rank sys.stdout.flush() for i in range(50): pass ## do something longer and much more tedious ##print "waiting for completion", rank ## sys.stdout.flush() mpitools.mpi_barrier() ## any other process just waits
import oofcppc ## import this ALWAYS before any swig-generated modules from ooflib.SWIG.common import mpitools start_time = time.time() mpitools.mpi_initialize(sys.argv) ## no need to use mpi_finalize. The modele at_exit takes care of that. numproc = mpitools.size() myid = mpitools.rank() node = mpitools.get_processor_name() lag = 0 print "I am proc %d of %d on node %s" %(myid+1, numproc, node) mpitools.mpi_barrier() ## synchronizes all the processes if numproc < 2: print "Demo must run on at least 2 processors to continue" mpitools.mpi_abort() ## sys.exit() if myid == 0: proc_0_time = time.time() msg = "%f"%proc_0_time print 'Processor 1 sending message "%s" to processor %d' %(msg, 2) print start_time, proc_0_time mpitools.send_string(msg, 1) mpitools.mpi_barrier()
## ## ## Initialization overhead. sys.path.append("") os.chdir("/Users/edwin/NIST/OOF2/MPIBUILD") import oofcppc ## import this ALWAYS before any swig-generated modules from ooflib.SWIG.common import mpitools mpitools.mpi_initialize(sys.argv) ## no need to use mpi_finalize. The modele at_exit takes care of that. numproc = mpitools.size() rank = mpitools.rank() name = mpitools.get_processor_name() msg = "" ## All processes wait each other here. mpitools.mpi_barrier() ### GTK classes start HERE ### class mpiGUI: def __init__(self): ## create gtk window self.window = gtk.GtkWindow(gtk.WINDOW_TOPLEVEL) self.window.connect("destroy", self.destroy) self.window.set_border_width(10) self.window.set_usize(140, 100) self.control_box = gtk.GtkVBox(gtk.FALSE,0) self.window.add(self.control_box) self.control_box.show()
def _computation_cost(menuitem): debug.fmsg() parallel_performance.set_performance_cost() ## makes a crude estimate of the processing and memory speed mpitools.mpi_barrier()
## ## For generalities, check latency.py ## ## sys.path.append("") os.chdir("/u/home3/reida/work/OOF2/mpi_build") ## MPI Initialization overhead. import oofcppc ## import this ALWAYS before any swig-generated modules from ooflib.SWIG.common import mpitools from ooflib.SWIG.engine.PETSc import petsc_solver mpitools.mpi_initialize(sys.argv) numproc = mpitools.size() rank = mpitools.rank() name = mpitools.get_processor_name() mpitools.mpi_barrier() ## All processes wait each other here. def f(x=None): ## function to be executed by all threads if rank == 0: print "long job on process=", rank sys.stdout.flush() for i in range(5000000): pass ## do something a little long and tedious elif rank == 1: print "short job on process=", rank sys.stdout.flush() for i in range(50): pass ## do something longer and much more tedious
def parallel_quit(): sys.stdout.flush() mpitools.mpi_barrier() ## --"all together, now!"-- Lennon and McCartney sys.exit()