Example #1
0
File: spawn.py Project: inercia/evy
def bench_spawn_eventlet():
    from eventlet import sleep, spawn, spawn_n, GreenPool

    print
    print "eventlet tests:"
    print "---------------"

    results = BenchResults()

    def cleanup (): sleep(0.2)

    best = benchmarks.measure_best(5, iters, 'pass', cleanup, sleep)
    results.sleep_main = best[sleep]
    print "eventlet.sleep (main)", results.sleep_main

    gt = spawn(benchmarks.measure_best, 5, iters, 'pass', cleanup, sleep)
    best = gt.wait()
    results.sleep_gt = best[sleep]
    print "eventlet.sleep (gt)", results.sleep_gt

    def dummy (i = None): return i
    def run_spawn (): spawn(dummy, 1)
    def run_spawn_n (): spawn_n(dummy, 1)
    def run_spawn_n_kw (): spawn_n(dummy, i = 1)

    best = benchmarks.measure_best(5, iters, 'pass', cleanup, run_spawn_n, run_spawn, run_spawn_n_kw)
    results.spawn = best[run_spawn]
    print "eventlet.spawn", results.spawn

    results.spawn_n = best[run_spawn_n]
    print "eventlet.spawn_n", results.spawn_n

    results.spawn_n_kw = best[run_spawn_n_kw]
    print "eventlet.spawn_n(**kw)", results.spawn_n_kw

    print "eventlet spawn/spawn_n difference %% %0.1f" % percent(best[run_spawn], best[run_spawn_n])


    def setup ():
        global pool
        pool = GreenPool(iters)

    def run_pool_spawn ():
        pool.spawn(dummy, 1)

    def run_pool_spawn_n ():
        pool.spawn_n(dummy, 1)

    def cleanup_pool ():
        pool.waitall()

    best = benchmarks.measure_best(3, iters, setup, cleanup_pool, run_pool_spawn, run_pool_spawn_n,)
    results.pool_spawn = best[run_pool_spawn]
    print "eventlet.GreenPool.spawn", results.pool_spawn

    results.pool_spawn_n = best[run_pool_spawn_n]
    print "eventlet.GreenPool.spawn_n", results.pool_spawn_n

    print "eventlet spawn/spawn_n difference: %% %0.1f" % percent(best[run_pool_spawn], best[run_pool_spawn_n])
    return results
Example #2
0
def bench_spawn_evy():
    from evy.green.threads import spawn, spawn_n, sleep
    from evy.green.pools import GreenPool

    print
    print "evy tests:"
    print "----------"

    results = BenchResults()

    def cleanup():
        sleep(0.2)

    best = benchmarks.measure_best(5, iters, 'pass', cleanup, sleep)
    results.sleep_main = best[sleep]
    print "evy.sleep (main)", results.sleep_main

    gt = spawn(benchmarks.measure_best, 5, iters, 'pass', cleanup, sleep)
    best = gt.wait()
    results.sleep_gt = best[sleep]
    print "evy.sleep (gt)", results.sleep_gt

    def dummy(i=None):
        return i

    def run_spawn():
        spawn(dummy, 1)

    def run_spawn_n():
        spawn_n(dummy, 1)

    def run_spawn_n_kw():
        spawn_n(dummy, i=1)

    best = benchmarks.measure_best(5, iters, 'pass', cleanup, run_spawn_n,
                                   run_spawn, run_spawn_n_kw)
    results.spawn = best[run_spawn]
    print "evy.spawn", results.spawn

    results.spawn_n = best[run_spawn_n]
    print "evy.spawn_n", results.spawn_n

    results.spawn_n_kw = best[run_spawn_n_kw]
    print "evy.spawn_n(**kw)", results.spawn_n_kw

    print "evy spawn/spawn_n difference %% %0.1f" % percent(
        best[run_spawn], best[run_spawn_n])

    def setup():
        global pool
        pool = GreenPool(iters)

    def run_pool_spawn():
        pool.spawn(dummy, 1)

    def run_pool_spawn_n():
        pool.spawn_n(dummy, 1)

    def cleanup_pool():
        pool.waitall()

    best = benchmarks.measure_best(
        3,
        iters,
        setup,
        cleanup_pool,
        run_pool_spawn,
        run_pool_spawn_n,
    )

    results.pool_spawn = best[run_pool_spawn]
    print "evy.GreenPool.spawn", results.pool_spawn

    results.pool_spawn_n = best[run_pool_spawn_n]
    print "evy.GreenPool.spawn_n", results.pool_spawn_n

    print "evy spawn/spawn_n difference: %% %0.1f" % percent(
        best[run_pool_spawn], best[run_pool_spawn_n])
    return results
Example #3
0
DATA_DIR = 'plot_data'

if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)

def write_result(filename, best):
    fd = open(os.path.join(DATA_DIR, filename), 'w')
    fd.write('YVALUE=%s' % best)
    fd.close()

def cleanup():
    eventlet.sleep(0.2)

iters = 10000
best = benchmarks.measure_best(5, iters,
    'pass',
    cleanup,
    eventlet.sleep)

write_result('eventlet.sleep_main', best[eventlet.sleep])

gt = eventlet.spawn(benchmarks.measure_best,5, iters,
    'pass',
    cleanup,
    eventlet.sleep)
best = gt.wait()
write_result('eventlet.sleep_gt', best[eventlet.sleep])

def dummy(i=None):
    return i

def run_spawn():
Example #4
0
if __name__ == "__main__":
    import optparse
    parser = optparse.OptionParser()
    parser.add_option('--compare-threading', action='store_true', dest='threading', default=False)
    parser.add_option('-b', '--bytes', type='int', dest='bytes', 
                      default=BYTES)
    parser.add_option('-s', '--size', type='int', dest='size', 
                      default=SIZE)
    parser.add_option('-c', '--concurrency', type='int', dest='concurrency', 
                      default=CONCURRENCY)
    parser.add_option('-t', '--tries', type='int', dest='tries', 
                      default=TRIES)

    
    opts, args = parser.parse_args()
    BYTES=opts.bytes
    SIZE=opts.size
    CONCURRENCY=opts.concurrency
    TRIES=opts.tries
    
    funcs = [launch_green_threads]
    if opts.threading:
        funcs = [launch_green_threads, launch_heavy_threads]
    results = benchmarks.measure_best(TRIES, 3,
                                      lambda: None, lambda: None,
                                      *funcs)
    print "green:", results[launch_green_threads]
    if opts.threading:
        print "threads:", results[launch_heavy_threads]
        print "%", (results[launch_green_threads]-results[launch_heavy_threads])/results[launch_heavy_threads] * 100
Example #5
0
                      default = BYTES)
    parser.add_option('-s', '--size', type = 'int', dest = 'size',
                      default = SIZE)
    parser.add_option('-c', '--concurrency', type = 'int', dest = 'concurrency',
                      default = CONCURRENCY)
    parser.add_option('-t', '--tries', type = 'int', dest = 'tries',
                      default = TRIES)

    opts, args = parser.parse_args()

    BYTES = opts.bytes
    SIZE = opts.size
    CONCURRENCY = opts.concurrency

    funcs = [launch_green_threads]
    if opts.threading:
        funcs.append(launch_heavy_threads)

    print
    print "measuring results for %d iterations..." % opts.tries
    print

    results = benchmarks.measure_best(opts.tries, 3, lambda: None, lambda: None, *funcs)

    print "green:", results[launch_green_threads]
    if opts.threading:
        print "threads:", results[launch_heavy_threads]
        print "%", ((results[launch_green_threads] - results[launch_heavy_threads]) /
                    (results[launch_heavy_threads] * 100))

Example #6
0
    os.makedirs(DATA_DIR)


def write_result(filename, best):
    fd = open(os.path.join(DATA_DIR, filename), 'w')
    fd.write('YVALUE=%s' % best)
    fd.close()


def cleanup():
    eventlet.sleep(0.2)

iters = 10000
best = benchmarks.measure_best(
    5, iters,
    'pass',
    cleanup,
    eventlet.sleep)

write_result('eventlet.sleep_main', best[eventlet.sleep])

gt = eventlet.spawn(
    benchmarks.measure_best, 5, iters,
    'pass',
    cleanup,
    eventlet.sleep)
best = gt.wait()
write_result('eventlet.sleep_gt', best[eventlet.sleep])


def dummy(i=None):