コード例 #1
0
 def destroy(self, *args):
   mpitools.send_string("quit", 1)
   self.window.hide()
   gtk.mainquit()
   ## print "quitting main"
   sys.stdout.flush()
   sys.exit()
コード例 #2
0
ファイル: mpiGUI.py プロジェクト: anilkunwar/OOF2
 def apply_function(self, control_box, data=None):
     ## message should be sent here.
   ##startTime = time.time()
   msg = "apply function"
   mpitools.send_string(msg, 1)
   ## tells other processors to execute the function callback
   ## create and launch thread
   self.thread = clerk.Clerk(self.callback)
   self.thread.start()
コード例 #3
0
ファイル: mpiGUI.py プロジェクト: santiama/OOF3D
 def apply_function(self, control_box, data=None):
     ## message should be sent here.
     ##startTime = time.time()
     msg = "apply function"
     mpitools.send_string(msg, 1)
     ## tells other processors to execute the function callback
     ## create and launch thread
     self.thread = clerk.Clerk(self.callback)
     self.thread.start()
コード例 #4
0
 def latencyTime(self, control_box, data=None):
   ## message should be sent here.
   global msg
   msg += 50*" "
   startTime = time.time()
   mpitools.send_string(msg, 1)
   msg = mpitools.receive_string(1)
   endTime = time.time()
   print len(msg), endTime -startTime
   sys.stdout.flush()
コード例 #5
0
ファイル: mpiGUI.py プロジェクト: anilkunwar/OOF2
 def apply_function(self, control_box, data=None):
   ## message should be sent here.
   startTime = time.time()
   msg = "apply function"
   mpitools.send_string(msg, 1) ## tells other processors to execute the function callback
   self.callback()
   msg = mpitools.recieve_string(1) ## waits for processors to send
   endTime = time.time()
   print "processing time=", endTime -startTime
   sys.stdout.flush()
コード例 #6
0
ファイル: mpiGUI.py プロジェクト: santiama/OOF3D
 def apply_function(self, control_box, data=None):
     ## message should be sent here.
     startTime = time.time()
     msg = "apply function"
     mpitools.send_string(
         msg, 1)  ## tells other processors to execute the function callback
     self.callback()
     msg = mpitools.recieve_string(1)  ## waits for processors to send
     endTime = time.time()
     print "processing time=", endTime - startTime
     sys.stdout.flush()
コード例 #7
0
 def get_communication_cost(self, target):
     global _rank
     global performance
     if _rank == target:
         if target == 0:
             self.latency[target] = performance.get_communication()
             return
         msg = repr(performance.get_communication())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         self.latency[target] = eval(mpitools.receive_string(target))
コード例 #8
0
 def get_communication_cost(self, target):
     global _rank
     global performance
     if _rank == target:
         if target == 0:
              self.latency[target] = performance.get_communication()
              return
         msg = repr(performance.get_communication())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         self.latency[target] = eval(mpitools.receive_string(target))
コード例 #9
0
ファイル: advanced_broadcast.py プロジェクト: anilkunwar/OOF2
def h(x=None):
  try:
    if rank == 0:
      for i in range(10):
        msg = mpitools.receive_string(1)
        print msg
    else:
      for i in range(10):
        mpitools.send_string("this is a freakin test", 0);
  finally:
    mpitools.mpi_barrier() ## all  process wait each other here
コード例 #10
0
def h(x=None):
    try:
        if rank == 0:
            for i in range(10):
                msg = mpitools.receive_string(1)
                print msg
        else:
            for i in range(10):
                mpitools.send_string("this is a freakin test", 0)
    finally:
        mpitools.mpi_barrier()  ## all  process wait each other here
コード例 #11
0
 def get_computation_cost(self, target):
     global _rank
     global performance
     ## performance is a MachinePerformance object
     ## which  holds the performance parameters if the
     ## target-th processor.
     if _rank == target:
         if target == 0:  ## if local data just grab it
             self.computation[target] = performance.get_computational()
             return
         ## if data not local, send it to process zero
         msg = str(performance.get_computational())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         ## process zero waits for data
         self.computation[target] = float(mpitools.receive_string(target))
コード例 #12
0
 def get_computation_cost(self, target):
     global _rank
     global  performance
     ## performance is a MachinePerformance object
     ## which  holds the performance parameters if the
     ## target-th processor.
     if _rank == target:
         if target == 0: ## if local data just grab it
             self.computation[target] = performance.get_computational()
             return
         ## if data not local, send it to process zero
         msg = str(performance.get_computational())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         ## process zero waits for data
         self.computation[target] = float(mpitools.receive_string(target))
コード例 #13
0
ファイル: petsc_algebra_test.py プロジェクト: anilkunwar/OOF2
def run():    
  if numproc < 2:
    mpitools.mpi_abort() ## the whole point is to run in parallel!
    
  if rank == 0:
    ## front end
    ## import all modules and functions that it will need
    from ooflib.common.EXTRA import mpiGUI
    localGUI = mpiGUI.MpiGUI()
    localGUI.add_function(g)
    localGUI.mainloop() ## mainloop may execute other tasks too.
  else:
    while 1:
      msg = mpitools.recieve_string(0) ## waiting messages from GUI
      if msg == "quit":
        print "quitting process", rank
        sys.stdout.flush()
        sys.exit()
      elif msg == "apply function":
        g()
      mpitools.send_string(msg,0)
コード例 #14
0
def run():
    if numproc < 2:
        mpitools.mpi_abort()  ## the whole point is to run in parallel!

    if rank == 0:
        ## front end
        ## import all modules and functions that it will need
        from ooflib.common.EXTRA import mpiGUI
        localGUI = mpiGUI.MpiGUI()
        localGUI.add_function(g)
        localGUI.mainloop()  ## mainloop may execute other tasks too.
    else:
        while 1:
            msg = mpitools.recieve_string(0)  ## waiting messages from GUI
            if msg == "quit":
                print "quitting process", rank
                sys.stdout.flush()
                sys.exit()
            elif msg == "apply function":
                g()
            mpitools.send_string(msg, 0)
コード例 #15
0
    def estimate_communication(self, org):
        ## all machines receive this command
        ## org machine 'talks' to the rest of
        ## the machines and
        ## reports completion to front end.

        global _rank
        global _size
        msg = "oofing the world"
        startTime = 0
        if org == _rank:
            for dest in range(_size):
                if dest != _rank:  ## do not try to talk to yourself
                    startTime = time.time()
                    mpitools.send_string(msg, dest)
                    mpitools.receive_string(dest)
                    endTime = time.time()
                    self.communication[dest] = str(endTime - startTime)
            socket2me.socketWrite("live chickens")
        else:
            msg2 = mpitools.receive_string(org)
            mpitools.send_string(msg2, org)
コード例 #16
0
    def estimate_communication(self, org):
        ## all machines receive this command
        ## org machine 'talks' to the rest of
        ## the machines and
        ## reports completion to front end.

        
        global _rank
        global _size
        msg = "oofing the world"
        startTime = 0
        if org == _rank:
            for dest in range(_size):
                if dest != _rank:## do not try to talk to yourself
                    startTime = time.time()
                    mpitools.send_string(msg, dest)
                    mpitools.receive_string(dest)
                    endTime = time.time()
                    self.communication[dest] = str(endTime - startTime)
            socket2me.socketWrite("live chickens")
        else:
            msg2 = mpitools.receive_string(org)
            mpitools.send_string(msg2,org) 
コード例 #17
0
print "I am proc %d of %d on node %s" %(myid+1, numproc, node)
mpitools.mpi_barrier() ## synchronizes all the processes


  
if numproc < 2:
  print "Demo must run on at least 2 processors to continue"      
  mpitools.mpi_abort()
  ## sys.exit()
  
if myid == 0:
  proc_0_time = time.time()
  msg = "%f"%proc_0_time  
  print 'Processor 1 sending message "%s" to processor %d' %(msg, 2)
  print  start_time, proc_0_time
  mpitools.send_string(msg, 1)
  mpitools.mpi_barrier()
  msg = mpitools.recieve_string(numproc-1)
  print 'Processor 1 received message "%s" from processor %d' %(msg, numproc)
  ## print 'Size of msg was %d bytes' %(SWIG.common.mpitools.bytes())


else:
  source = myid-1
  destination = (myid+1)%numproc
  mpitools.mpi_barrier()
  msg = mpitools.recieve_string(source)
  
  print 'Processor %d received message "%s" from processor %d'\
        %(myid+1, msg, source+1)
  ## print 'Size of msg was %d bytes' %(SWIG.common.mpitools.bytes())  
コード例 #18
0
 def send_message(self, control_box, data=None):
     ## message should be sent here.
     msg = "Hello world!"
     mpitools.send_string(msg, 1)
コード例 #19
0
ファイル: helloProcess1.py プロジェクト: anilkunwar/OOF2
 def send_message(self, control_box, data=None):
   ## message should be sent here.
   msg = "Hello world!"
   mpitools.send_string(msg, 1)
コード例 #20
0
    endTime = time.time()
    print len(msg), endTime -startTime
    sys.stdout.flush()
    
if numproc < 2:
  mpitools.mpi_abort() ## the whole point is to run in parallel!
  
if rank == 0:
  ## start GUI
  import gtk, time
  localGUI = mpiGUI()
  localGUI.mainloop()
else:
  while 1:
    msg = mpitools.receive_string(0) ## waiting messages from GUI
    if msg == "quit":
      ## print "quitting process", rank
      sys.stdout.flush()
      sys.exit()
      ## mpitools.mpi_abort()
    mpitools.send_string(msg,0)
    ## print msg, " my rank is ", rank, " my name is ", name
    

## no need to use mpi_finalize. at_exit takes care of this implicitly.