def dotest(n): with eventstats.BenchmarkLogSpan("string"): x = n * "a" ray.put(x) with eventstats.BenchmarkLogSpan("array_of_pairs"): x = {i: i for i in range(n)} ray.put(x) with eventstats.BenchmarkLogSpan("ntuple"): x = n * (1, ) ray.put(x)
def TEST_PutAfterPut(numobj): with eventstats.BenchmarkLogSpan("put_after_put", {"numobj" : numobj}): l1,dt1 = plasma_create_objects(numobj) time.sleep(1) l2,dt2 = plasma_create_objects(numobj) print("TEST_PutAfterPut: slowdown factor={}".format(dt2/dt1)) return dt2/dt1
def TEST_GetBeforeAfterPut(numobj_getperf, numobj_put): with eventstats.BenchmarkLogSpan("get_before_after_put", {"numobj_getperf" : numobj_getperf, "numobj_put" : numobj_put}): lst1,putdt1 = plasma_create_objects(numobj_getperf) dt1 = plasma_get_objects(lst1) lst2,putdt2 = plasma_create_objects(numobj_put) #dt2 = plasma_get_objects(lst2) #known bug: this causes a crash #now try to match dt1 get time again dt2 = plasma_get_objects(lst1) lst3,putdt3 = plasma_create_objects(numobj_getperf) dt3 = plasma_get_objects(lst3) print("relative time={} to get initial numobj={} objects".format(dt2/dt1, numobj_getperf)) print("relative time={} to get new numobj={} objects".format(dt3/dt1, numobj_getperf))
def test_blocking_tasks(num_tasks): @ray.remote def f(i, j): return (i, j) @ray.remote def g(i): # Each instance of g submits and blocks on the result of another remote # task. object_ids = [f.remote(i, j) for j in range(10)] return ray.get(object_ids) with eventstats.BenchmarkLogSpan("submit"): ray.get([g.remote(i) for i in range(num_tasks)])
import json import ray import raybench import numpy as np import raybench.eventstats as eventstats if __name__ == '__main__': bench_env = raybench.Env() bench_env.ray_init() n = int(10**bench_env.benchmark_iteration) with eventstats.BenchmarkLogSpan("randints"): x = list(np.random.randint(0, 100, size=n)) ray.put(x) print "BENCHMARK_STATS:", json.dumps({ "config": { "scale": n }, "events": eventstats.log_span_events() })
import json import ray import raybench import raybench.eventstats as eventstats if __name__ == '__main__': bench_env = raybench.Env() bench_env.ray_init() n = int(10**bench_env.benchmark_iteration) with eventstats.BenchmarkLogSpan("ntuple"): x = n * (1, ) ray.put(x) print "BENCHMARK_STATS:", json.dumps({ "config": { "scale": n }, "events": eventstats.log_span_events() })
import json import ray import raybench import raybench.eventstats as eventstats if __name__ == '__main__': bench_env = raybench.Env() bench_env.ray_init() n = int(10 ** bench_env.benchmark_iteration) with eventstats.BenchmarkLogSpan("array_of_pairs"): x = {i:i for i in range(n)} ray.put(x) print "BENCHMARK_STATS:", json.dumps({ "config": { "scale" : n }, "events" : eventstats.log_span_events() })
def TEST_PutGetLinearScale(): for object_size in 10**3, 10**4, 10** 5, 10**6: time.sleep(1) with eventstats.BenchmarkLogSpan("put_get_linear_scale", {"object_size" : object_size}): l, pdt = plasma_create_objects(object_size) gdt1 = plasma_get_objects(l)
def benchmark_launchtasks(num_tasks): with eventstats.BenchmarkLogSpan("total"): with eventstats.BenchmarkLogSpan("submit"): l = [sleep1s.remote() for _ in range(num_tasks)] ray.wait(l, num_returns=len(l))
import json import ray import raybench import raybench.eventstats as eventstats if __name__ == '__main__': bench_env = raybench.Env() bench_env.ray_init() n = int(10**bench_env.benchmark_iteration) with eventstats.BenchmarkLogSpan("ones_array"): x = n * [1] ray.put(x) print "BENCHMARK_STATS:", json.dumps({ "config": { "scale": n }, "events": eventstats.log_span_events() })
import json import ray import raybench import raybench.eventstats as eventstats if __name__ == '__main__': bench_env = raybench.Env() bench_env.ray_init() n = int(10 ** bench_env.benchmark_iteration) with eventstats.BenchmarkLogSpan("nstring"): x = n * "a" ray.put(x) print "BENCHMARK_STATS:", json.dumps({ "config": { "scale" : n }, "events" : eventstats.log_span_events() })