def f(x=None): ## function to be executed by all threads
  
  if rank == 0:
    print "long job on process=", rank
    sys.stdout.flush()
    for i in range(500):
      mpitools.size()
      ## pass ## do something a little long and tedious
    
  else:
    print "short job on process=", rank
    sys.stdout.flush()
    for i in range(50):
      mpitools.rank()
      ## pass ## do something longer and much more tedious
    
  print "finished, process", rank
  sys.stdout.flush()
  mpitools.mpi_barrier() ## all processes just wait each othere here
  print "really finished, process", rank
  sys.stdout.flush()
Exemple #2
0
def f(x=None):  ## function to be executed by all threads

    if rank == 0:
        print "long job on process=", rank
        sys.stdout.flush()
        for i in range(500):
            mpitools.size()
            ## pass ## do something a little long and tedious

    else:
        print "short job on process=", rank
        sys.stdout.flush()
        for i in range(50):
            mpitools.rank()
            ## pass ## do something longer and much more tedious

    print "finished, process", rank
    sys.stdout.flush()
    mpitools.mpi_barrier()  ## all processes just wait each othere here
    print "really finished, process", rank
    sys.stdout.flush()
Exemple #3
0
## setenv PYTHONPATH $PYTHONPATH':/Users/edwin/NIST/OOF2/MPIBUILD'
##


import oofcppc ## import this ALWAYS before any swig-generated modules
from ooflib.SWIG.common import mpitools

start_time = time.time()


mpitools.mpi_initialize(sys.argv)
## no need to use mpi_finalize. The modele  at_exit takes care of that.


numproc = mpitools.size()
myid =    mpitools.rank()
node =    mpitools.get_processor_name()
lag = 0
print "I am proc %d of %d on node %s" %(myid+1, numproc, node)
mpitools.mpi_barrier() ## synchronizes all the processes


  
if numproc < 2:
  print "Demo must run on at least 2 processors to continue"      
  mpitools.mpi_abort()
  ## sys.exit()
  
if myid == 0:
  proc_0_time = time.time()
  msg = "%f"%proc_0_time  
Exemple #4
0
## and process 1 in jeeves.
##
## Note that latency.py should be set-up in both adamantium.local AND
## jeeves.nist.gov, in the specified PATHS in order for the example to work.
##
##

## Initialization overhead.
sys.path.append("")
os.chdir("/Users/edwin/NIST/OOF2/MPIBUILD")
import oofcppc ## import this ALWAYS before any swig-generated modules
from ooflib.SWIG.common import mpitools
mpitools.mpi_initialize(sys.argv)
## no need to use mpi_finalize. The modele  at_exit takes care of that.
numproc = mpitools.size()
rank =    mpitools.rank()
name =    mpitools.get_processor_name()
msg = ""
## All processes wait each other here.
mpitools.mpi_barrier()


### GTK classes start HERE ###
class mpiGUI:
  def __init__(self):
    ## create gtk window
    self.window = gtk.GtkWindow(gtk.WINDOW_TOPLEVEL)
    self.window.connect("destroy", self.destroy)
    self.window.set_border_width(10)
    self.window.set_usize(140, 100)