Ejemplo n.º 1
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1e-3)
    parser.set_defaults(tmax=1)
    parser.set_defaults(profile='default')

    parser.add_option("-n",
                      type='int',
                      dest='n',
                      help='the number of tasks to run')
    parser.add_option("-t",
                      type='float',
                      dest='tmin',
                      help='the minimum task length in seconds')
    parser.add_option("-T",
                      type='float',
                      dest='tmax',
                      help='the maximum task length in seconds')
    parser.add_option("-p",
                      '--profile',
                      type='str',
                      dest='profile',
                      help="the cluster profile [default: 'default']")

    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"

    rc = Client()
    view = rc.load_balanced_view()
    print view
    rc.block = True
    nengines = len(rc.ids)
    with rc[:].sync_imports():
        from IPython.utils.timing import time

    # the jobs should take a random time within a range
    times = [
        random.random() * (opts.tmax - opts.tmin) + opts.tmin
        for i in range(opts.n)
    ]
    stime = sum(times)

    print "executing %i tasks, totalling %.1f secs on %i engines" % (
        opts.n, stime, nengines)
    time.sleep(1)
    start = time.time()
    amr = view.map(time.sleep, times)
    amr.get()
    stop = time.time()

    ptime = stop - start
    scale = stime / ptime

    print "executed %.1f secs in %.1f secs" % (stime, ptime)
    print "%.3fx parallel performance on %i engines" % (scale, nengines)
    print "%.1f%% of theoretical max" % (100 * scale / nengines)
Ejemplo n.º 2
0
 def execute(self):
     from IPython.utils.timing import time
     points = []
     while len(points) == 0:
         hs = self.heuristics
         self.current = (self.current + 1) % len(hs)
         points.extend(hs[self.current].get_points(self.size))
         time.sleep(1e-3)
     return points
Ejemplo n.º 3
0
 def task():
     while True:
         gtk.threads_enter()
         try:
             [c.draw_idle() for c in self._canvases if c._need_redraw]
         finally:
             gtk.threads_leave()
         from IPython.utils.timing import time
         time.sleep(self.config.ui_redraw_delay)
Ejemplo n.º 4
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1)
    parser.set_defaults(tmax=60)
    parser.set_defaults(controller='localhost')
    parser.set_defaults(meport=10105)
    parser.set_defaults(tport=10113)
    
    parser.add_option("-n", type='int', dest='n',
        help='the number of tasks to run')
    parser.add_option("-t", type='float', dest='tmin', 
        help='the minimum task length in seconds')
    parser.add_option("-T", type='float', dest='tmax',
        help='the maximum task length in seconds')
    parser.add_option("-c", type='string', dest='controller',
        help='the address of the controller')
    parser.add_option("-p", type='int', dest='meport',
        help="the port on which the controller listens for the MultiEngine/RemoteController client")
    parser.add_option("-P", type='int', dest='tport',
        help="the port on which the controller listens for the TaskClient client")
    
    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"
    
    rc = client.MultiEngineClient()
    tc = client.TaskClient()
    print(tc.task_controller)
    rc.block=True
    nengines = len(rc.get_ids())
    rc.execute('from IPython.utils.timing import time')

    # the jobs should take a random time within a range
    times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)]
    tasks = [client.StringTask("time.sleep(%f)"%t) for t in times]
    stime = sum(times)
    
    print("executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines))
    time.sleep(1)
    start = time.time()
    taskids = [tc.run(t) for t in tasks]
    tc.barrier(taskids)
    stop = time.time()

    ptime = stop-start
    scale = stime/ptime
    
    print("executed %.1f secs in %.1f secs"%(stime, ptime))
    print("%.3fx parallel performance on %i engines"%(scale, nengines))
    print("%.1f%% of theoretical max"%(100*scale/nengines))
Ejemplo n.º 5
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1e-3)
    parser.set_defaults(tmax=1)
    parser.set_defaults(profile='default')

    parser.add_option("-n", type='int', dest='n',
                      help='the number of tasks to run')
    parser.add_option("-t", type='float', dest='tmin',
                      help='the minimum task length in seconds')
    parser.add_option("-T", type='float', dest='tmax',
                      help='the maximum task length in seconds')
    parser.add_option("-p", '--profile', type='str', dest='profile',
                      help="the cluster profile [default: 'default']")

    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"

    rc = Client()
    view = rc.load_balanced_view()
    print(view)
    rc.block = True
    nengines = len(rc.ids)
    with rc[:].sync_imports():
        from IPython.utils.timing import time

    # the jobs should take a random time within a range
    times = [
        random.random() * (opts.tmax - opts.tmin) + opts.tmin for i in range(opts.n)]
    stime = sum(times)

    print("executing %i tasks, totalling %.1f secs on %i engines" %
          (opts.n, stime, nengines))
    time.sleep(1)
    start = time.time()
    amr = view.map(time.sleep, times)
    amr.get()
    stop = time.time()

    ptime = stop - start
    scale = stime / ptime

    print("executed %.1f secs in %.1f secs" % (stime, ptime))
    print("%.3fx parallel performance on %i engines" % (scale, nengines))
    print("%.1f%% of theoretical max" % (100 * scale / nengines))
Ejemplo n.º 6
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1)
    parser.set_defaults(tmax=60)
    parser.set_defaults(controller='localhost')
    parser.set_defaults(meport=10105)
    parser.set_defaults(tport=10113)

    parser.add_option("-n",
                      type='int',
                      dest='n',
                      help='the number of tasks to run')
    parser.add_option("-t",
                      type='float',
                      dest='tmin',
                      help='the minimum task length in seconds')
    parser.add_option("-T",
                      type='float',
                      dest='tmax',
                      help='the maximum task length in seconds')
    parser.add_option("-c",
                      type='string',
                      dest='controller',
                      help='the address of the controller')
    parser.add_option(
        "-p",
        type='int',
        dest='meport',
        help=
        "the port on which the controller listens for the MultiEngine/RemoteController client"
    )
    parser.add_option(
        "-P",
        type='int',
        dest='tport',
        help=
        "the port on which the controller listens for the TaskClient client")

    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"

    rc = client.MultiEngineClient()
    tc = client.TaskClient()
    print tc.task_controller
    rc.block = True
    nengines = len(rc.get_ids())
    rc.execute('from IPython.utils.timing import time')

    # the jobs should take a random time within a range
    times = [
        random.random() * (opts.tmax - opts.tmin) + opts.tmin
        for i in range(opts.n)
    ]
    tasks = [client.StringTask("time.sleep(%f)" % t) for t in times]
    stime = sum(times)

    print "executing %i tasks, totalling %.1f secs on %i engines" % (
        opts.n, stime, nengines)
    time.sleep(1)
    start = time.time()
    taskids = [tc.run(t) for t in tasks]
    tc.barrier(taskids)
    stop = time.time()

    ptime = stop - start
    scale = stime / ptime

    print "executed %.1f secs in %.1f secs" % (stime, ptime)
    print "%.3fx parallel performance on %i engines" % (scale, nengines)
    print "%.1f%% of theoretical max" % (100 * scale / nengines)
Ejemplo n.º 7
0
def func_sum(tid, data):
    "x is either a number or a list/vector of numbers"
    time.sleep(math.log(1 + random()))
    return tid, numpy.sum(data)