Exemple #1
0
 def latencyTime(self, control_box, data=None):
   ## message should be sent here.
   global msg
   msg += 50*" "
   startTime = time.time()
   mpitools.send_string(msg, 1)
   msg = mpitools.receive_string(1)
   endTime = time.time()
   print len(msg), endTime -startTime
   sys.stdout.flush()
 def get_communication_cost(self, target):
     global _rank
     global performance
     if _rank == target:
         if target == 0:
              self.latency[target] = performance.get_communication()
              return
         msg = repr(performance.get_communication())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         self.latency[target] = eval(mpitools.receive_string(target))
 def get_communication_cost(self, target):
     global _rank
     global performance
     if _rank == target:
         if target == 0:
             self.latency[target] = performance.get_communication()
             return
         msg = repr(performance.get_communication())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         self.latency[target] = eval(mpitools.receive_string(target))
def h(x=None):
  try:
    if rank == 0:
      for i in range(10):
        msg = mpitools.receive_string(1)
        print msg
    else:
      for i in range(10):
        mpitools.send_string("this is a freakin test", 0);
  finally:
    mpitools.mpi_barrier() ## all  process wait each other here
Exemple #5
0
def h(x=None):
    try:
        if rank == 0:
            for i in range(10):
                msg = mpitools.receive_string(1)
                print msg
        else:
            for i in range(10):
                mpitools.send_string("this is a freakin test", 0)
    finally:
        mpitools.mpi_barrier()  ## all  process wait each other here
    def estimate_communication(self, org):
        ## all machines receive this command
        ## org machine 'talks' to the rest of
        ## the machines and
        ## reports completion to front end.

        global _rank
        global _size
        msg = "oofing the world"
        startTime = 0
        if org == _rank:
            for dest in range(_size):
                if dest != _rank:  ## do not try to talk to yourself
                    startTime = time.time()
                    mpitools.send_string(msg, dest)
                    mpitools.receive_string(dest)
                    endTime = time.time()
                    self.communication[dest] = str(endTime - startTime)
            socket2me.socketWrite("live chickens")
        else:
            msg2 = mpitools.receive_string(org)
            mpitools.send_string(msg2, org)
    def estimate_communication(self, org):
        ## all machines receive this command
        ## org machine 'talks' to the rest of
        ## the machines and
        ## reports completion to front end.

        
        global _rank
        global _size
        msg = "oofing the world"
        startTime = 0
        if org == _rank:
            for dest in range(_size):
                if dest != _rank:## do not try to talk to yourself
                    startTime = time.time()
                    mpitools.send_string(msg, dest)
                    mpitools.receive_string(dest)
                    endTime = time.time()
                    self.communication[dest] = str(endTime - startTime)
            socket2me.socketWrite("live chickens")
        else:
            msg2 = mpitools.receive_string(org)
            mpitools.send_string(msg2,org) 
 def get_computation_cost(self, target):
     global _rank
     global performance
     ## performance is a MachinePerformance object
     ## which  holds the performance parameters if the
     ## target-th processor.
     if _rank == target:
         if target == 0:  ## if local data just grab it
             self.computation[target] = performance.get_computational()
             return
         ## if data not local, send it to process zero
         msg = str(performance.get_computational())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         ## process zero waits for data
         self.computation[target] = float(mpitools.receive_string(target))
 def get_computation_cost(self, target):
     global _rank
     global  performance
     ## performance is a MachinePerformance object
     ## which  holds the performance parameters if the
     ## target-th processor.
     if _rank == target:
         if target == 0: ## if local data just grab it
             self.computation[target] = performance.get_computational()
             return
         ## if data not local, send it to process zero
         msg = str(performance.get_computational())
         mpitools.send_string(msg, 0)
     if _rank == 0:
         ## process zero waits for data
         self.computation[target] = float(mpitools.receive_string(target))
Exemple #10
0
    msg = mpitools.receive_string(1)
    endTime = time.time()
    print len(msg), endTime -startTime
    sys.stdout.flush()
    
if numproc < 2:
  mpitools.mpi_abort() ## the whole point is to run in parallel!
  
if rank == 0:
  ## start GUI
  import gtk, time
  localGUI = mpiGUI()
  localGUI.mainloop()
else:
  while 1:
    msg = mpitools.receive_string(0) ## waiting messages from GUI
    if msg == "quit":
      ## print "quitting process", rank
      sys.stdout.flush()
      sys.exit()
      ## mpitools.mpi_abort()
    mpitools.send_string(msg,0)
    ## print msg, " my rank is ", rank, " my name is ", name
    

## no need to use mpi_finalize. at_exit takes care of this implicitly.