def test_spawn_many(): # This dependencies dict sets up a graph like this: # a # / \ # b c # \ /| # d | # \| # e deps = dict(e="cd", d="bc", c="a", b="a", a="") capture = Capture() pool = DAGPool() # spawn a waitall() waiter externally to our DAGPool, but capture its # message in same Capture instance eventlet.spawn(waitall_done, capture, pool) pool.spawn_many(deps, spawn_many_func, capture, pool) # This set of greenthreads should in fact run to completion once spawned. spin() # verify that e completed (also that post(key) within greenthread # overrides implicit post of return value, which would be None) assert_equals(pool.get("e"), "e") # With the dependency graph shown above, it is not guaranteed whether b or # c will complete first. Handle either case. sequence = capture.sequence[:] sequence[1:3] = [set([sequence[1].pop(), sequence[2].pop()])] assert_equals(sequence, [ set(["a done"]), set(["b done", "c done"]), set(["d done"]), set(["e done"]), set(["waitall() done"]), ])
def test_wait_each_empty(): pool = DAGPool() with suspend_checker(): with check_no_suspend(): for k, v in pool.wait_each(()): # shouldn't yield anything raise AssertionError("empty wait_each() returned ({0}, {1})".format(k, v))
def test_wait_each_empty(): pool = DAGPool() with suspend_checker(): with check_no_suspend(): for k, v in pool.wait_each(()): # shouldn't yield anything raise AssertionError( "empty wait_each() returned ({0}, {1})".format(k, v))
def test_wait_each_preload(): pool = DAGPool(dict(a=1, b=2, c=3)) with suspend_checker(): with check_no_suspend(): # wait_each() may deliver in arbitrary order; collect into a dict # for comparison assert_equals(dict(pool.wait_each("abc")), dict(a=1, b=2, c=3)) # while we're at it, test wait() for preloaded keys assert_equals(pool.wait("bc"), dict(b=2, c=3))
def test_wait_each_exc(): pool = DAGPool() pool.spawn("a", (), raiser, BogusError("bogus")) with assert_raises(PropagateError): for k, v in pool.wait_each("a"): pass with assert_raises(PropagateError): for k, v in pool.wait_each(): pass
def test_post_replace(): pool = DAGPool() pool.post("a", 1) pool.post("a", 2, replace=True) assert_equals(pool.get("a"), 2) assert_equals(dict(pool.wait_each("a")), dict(a=2)) assert_equals(pool.wait("a"), dict(a=2)) assert_equals(pool["a"], 2)
def test_getitem(): capture = Capture() pool = DAGPool() eventlet.spawn(waitfor, capture, pool, "a") # pool["a"] just waiting capture.validate([[]]) pool.spawn("a", (), lambda key, results: 1) # still waiting: hasn't yet run capture.validate([[]]) # run it spin() capture.validate([["got 1"]])
def test_wait_each_posted(): capture = Capture() pool = DAGPool(dict(a=1, b=2, c=3)) eventlet.spawn(post_each, pool, capture) # use a string as a convenient iterable of single-letter keys for k, v in pool.wait_each("bcdefg"): capture.add("got ({0}, {1})".format(k, v)) capture.validate([ ["got (b, 2)", "got (c, 3)"], ["got (f, fval)", "got (g, gval)"], ["got (d, dval)", "got (e, eval)"], ])
def test_waitall_exc(): pool = DAGPool() pool.spawn("a", (), raiser, BogusError("bogus")) try: pool.waitall() except PropagateError as err: assert_equals(err.key, "a") assert isinstance(err.exc, BogusError), \ "exc attribute is {0}, not BogusError".format(err.exc) assert_equals(str(err.exc), "bogus") msg = str(err) assert_in("PropagateError(a)", msg) assert_in("BogusError", msg) assert_in("bogus", msg)
def test_wait_posted(): # same as test_wait_each_posted(), but calling wait() capture = Capture() pool = DAGPool(dict(a=1, b=2, c=3)) eventlet.spawn(post_each, pool, capture) gotten = pool.wait("bcdefg") capture.add("got all") assert_equals(gotten, dict(b=2, c=3, d="dval", e="eval", f="fval", g="gval")) capture.validate([ [], [], ["got all"], ])
def test_spawn_collision_spawn(): pool = DAGPool() pool.spawn("a", (), lambda key, results: "aval") # hasn't yet even started assert_equals(pool.get("a"), None) with assert_raises(Collision): # Attempting to spawn again with same key should collide even if the # first spawned greenthread hasn't yet had a chance to run. pool.spawn("a", (), lambda key, results: "bad") # now let the spawned eventlet run eventlet.sleep(0) # should have finished assert_equals(pool.get("a"), "aval") with assert_raises(Collision): # Attempting to spawn with same key collides even when the greenthread # has completed. pool.spawn("a", (), lambda key, results: "badagain")
def test_wait_each_all(): # set up a simple linear dependency chain deps = dict(b="a", c="b", d="c", e="d") capture = Capture() pool = DAGPool([("a", "a")]) # capture a different Event for each key events = dict((key, eventlet.event.Event()) for key in six.iterkeys(deps)) # can't use spawn_many() because we need a different event for each for key, dep in six.iteritems(deps): pool.spawn(key, dep, observe, capture, events[key]) keys = "abcde" # this specific order each = iter(pool.wait_each()) for pos in range(len(keys)): # next value from wait_each() k, v = next(each) assert_equals(k, keys[pos]) # advance every pool greenlet as far as it can go spin() # everything from keys[:pos+1] should have a value by now for k in keys[:pos + 1]: assert pool.get(k, _notthere) is not _notthere, \ "greenlet {0} did not yet produce a value".format(k) # everything from keys[pos+1:] should not yet for k in keys[pos + 1:]: assert pool.get(k, _notthere) is _notthere, \ "wait_each() delayed value for {0}".format(keys[pos]) # let next greenthread complete if pos < len(keys) - 1: k = keys[pos + 1] events[k].send(k)
def test_init(): with suspend_checker(): # no preload data, just so we know it doesn't blow up pool = DAGPool() # preload dict pool = DAGPool(dict(a=1, b=2, c=3)) # this must not hang with check_no_suspend(): results = pool.waitall() # with no spawn() or post(), waitall() returns preload data assert_equals(results, dict(a=1, b=2, c=3)) # preload sequence of pairs pool = DAGPool([("d", 4), ("e", 5), ("f", 6)]) # this must not hang with check_no_suspend(): results = pool.waitall() assert_equals(results, dict(d=4, e=5, f=6))
def test_spawn_many(): # This dependencies dict sets up a graph like this: # a # / \ # b c # \ /| # d | # \| # e deps = dict(e="cd", d="bc", c="a", b="a", a="") capture = Capture() pool = DAGPool() # spawn a waitall() waiter externally to our DAGPool, but capture its # message in same Capture instance eventlet.spawn(waitall_done, capture, pool) pool.spawn_many(deps, spawn_many_func, capture, pool) # This set of greenthreads should in fact run to completion once spawned. spin() # verify that e completed (also that post(key) within greenthread # overrides implicit post of return value, which would be None) assert_equals(pool.get("e"), "e") # With the dependency graph shown above, it is not guaranteed whether b or # c will complete first. Handle either case. sequence = capture.sequence[:] sequence[1:3] = [set([sequence[1].pop(), sequence[2].pop()])] assert_equals(sequence, [set(["a done"]), set(["b done", "c done"]), set(["d done"]), set(["e done"]), set(["waitall() done"]), ])
def test_propagate_exc(): pool = DAGPool() pool.spawn("a", (), raiser, BogusError("bogus")) pool.spawn("b", "a", consumer) pool.spawn("c", "b", consumer) try: pool["c"] except PropagateError as errc: assert_equals(errc.key, "c") errb = errc.exc assert_equals(errb.key, "b") erra = errb.exc assert_equals(erra.key, "a") assert isinstance(erra.exc, BogusError), \ "exc attribute is {0}, not BogusError".format(erra.exc) assert_equals(str(erra.exc), "bogus") msg = str(errc) assert_in("PropagateError(a)", msg) assert_in("PropagateError(b)", msg) assert_in("PropagateError(c)", msg) assert_in("BogusError", msg) assert_in("bogus", msg)
def test_spawn_collision_post(): pool = DAGPool() pool.post("a", "aval") with assert_raises(Collision): pool.spawn("a", (), lambda key, results: None)
def test_spawn_multiple(): capture = Capture() pool = DAGPool(dict(a=1, b=2, c=3)) events = {} for k in "defg": events[k] = eventlet.event.Event() pool.spawn(k, (), observe, capture, events[k]) # Now for a greenthread that depends on ALL the above. events["h"] = eventlet.event.Event() # trigger the last event right away: we only care about dependencies events["h"].send("hval") pool.spawn("h", "bcdefg", observe, capture, events["h"]) # let all the spawned greenthreads get as far as they can spin() capture.step() # but none of them has yet produced a result for k in "defgh": assert_equals(pool.get(k), None) assert_equals(set(pool.keys()), set("abc")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3)) assert_equals(pool.running(), 5) assert_equals(set(pool.running_keys()), set("defgh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for(), dict(h=set("defg"))) assert_equals(pool.waiting_for("d"), set()) assert_equals(pool.waiting_for("c"), set()) with assert_raises(KeyError): pool.waiting_for("j") assert_equals(pool.waiting_for("h"), set("defg")) # let one of the upstream greenthreads complete events["f"].send("fval") spin() capture.step() assert_equals(pool.get("f"), "fval") assert_equals(set(pool.keys()), set("abcf")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, f="fval")) assert_equals(pool.running(), 4) assert_equals(set(pool.running_keys()), set("degh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for("h"), set("deg")) # now two others events["e"].send("eval") events["g"].send("gval") spin() capture.step() assert_equals(pool.get("e"), "eval") assert_equals(pool.get("g"), "gval") assert_equals(set(pool.keys()), set("abcefg")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, e="eval", f="fval", g="gval")) assert_equals(pool.running(), 2) assert_equals(set(pool.running_keys()), set("dh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for("h"), set("d")) # last one events["d"].send("dval") # make sure both pool greenthreads get a chance to run spin() capture.step() assert_equals(pool.get("d"), "dval") assert_equals(set(pool.keys()), set("abcdefgh")) assert_equals( dict(pool.items()), dict(a=1, b=2, c=3, d="dval", e="eval", f="fval", g="gval", h="hval")) assert_equals(pool.running(), 0) assert_false(pool.running_keys()) assert_equals(pool.waiting(), 0) assert_equals(pool.waiting_for("h"), set()) capture.validate([ ["h got b", "h got c"], ["f returning fval", "h got f"], ["e returning eval", "g returning gval", "h got e", "h got g"], ["d returning dval", "h got d", "h returning hval"], [], ])
def test_kill(): pool = DAGPool() # nonexistent key raises KeyError with assert_raises(KeyError): pool.kill("a") # spawn a greenthread pool.spawn("a", (), lambda key, result: 1) # kill it before it can even run pool.kill("a") # didn't run spin() assert_equals(pool.get("a"), None) # killing it forgets about it with assert_raises(KeyError): pool.kill("a") # so that we can try again pool.spawn("a", (), lambda key, result: 2) spin() # this time it ran to completion, so can no longer be killed with assert_raises(KeyError): pool.kill("a") # verify it ran to completion assert_equals(pool.get("a"), 2)
def test_post_collision_preload(): pool = DAGPool(dict(a=1)) with assert_raises(Collision): pool.post("a", 2)
def test_post_collision_spawn(): pool = DAGPool() pool.spawn("a", (), lambda key, result: 1) # hasn't yet run with assert_raises(Collision): # n.b. This exercises the code that tests whether post(key) is or is # not coming from that key's greenthread. pool.post("a", 2) # kill it pool.kill("a") # now we can post pool.post("a", 3) assert_equals(pool.get("a"), 3) pool = DAGPool() pool.spawn("a", (), lambda key, result: 4) # run it spin() with assert_raises(Collision): pool.post("a", 5) # can't kill it now either with assert_raises(KeyError): pool.kill("a") # still can't post with assert_raises(Collision): pool.post("a", 6)
def test_post_collision_post(): pool = DAGPool() pool.post("a", 1) with assert_raises(Collision): pool.post("a", 2)
def test_spawn_multiple(): capture = Capture() pool = DAGPool(dict(a=1, b=2, c=3)) events = {} for k in "defg": events[k] = eventlet.event.Event() pool.spawn(k, (), observe, capture, events[k]) # Now for a greenthread that depends on ALL the above. events["h"] = eventlet.event.Event() # trigger the last event right away: we only care about dependencies events["h"].send("hval") pool.spawn("h", "bcdefg", observe, capture, events["h"]) # let all the spawned greenthreads get as far as they can spin() capture.step() # but none of them has yet produced a result for k in "defgh": assert_equals(pool.get(k), None) assert_equals(set(pool.keys()), set("abc")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3)) assert_equals(pool.running(), 5) assert_equals(set(pool.running_keys()), set("defgh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for(), dict(h=set("defg"))) assert_equals(pool.waiting_for("d"), set()) assert_equals(pool.waiting_for("c"), set()) with assert_raises(KeyError): pool.waiting_for("j") assert_equals(pool.waiting_for("h"), set("defg")) # let one of the upstream greenthreads complete events["f"].send("fval") spin() capture.step() assert_equals(pool.get("f"), "fval") assert_equals(set(pool.keys()), set("abcf")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, f="fval")) assert_equals(pool.running(), 4) assert_equals(set(pool.running_keys()), set("degh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for("h"), set("deg")) # now two others events["e"].send("eval") events["g"].send("gval") spin() capture.step() assert_equals(pool.get("e"), "eval") assert_equals(pool.get("g"), "gval") assert_equals(set(pool.keys()), set("abcefg")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, e="eval", f="fval", g="gval")) assert_equals(pool.running(), 2) assert_equals(set(pool.running_keys()), set("dh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for("h"), set("d")) # last one events["d"].send("dval") # make sure both pool greenthreads get a chance to run spin() capture.step() assert_equals(pool.get("d"), "dval") assert_equals(set(pool.keys()), set("abcdefgh")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, d="dval", e="eval", f="fval", g="gval", h="hval")) assert_equals(pool.running(), 0) assert_false(pool.running_keys()) assert_equals(pool.waiting(), 0) assert_equals(pool.waiting_for("h"), set()) capture.validate([ ["h got b", "h got c"], ["f returning fval", "h got f"], ["e returning eval", "g returning gval", "h got e", "h got g"], ["d returning dval", "h got d", "h returning hval"], [], ])
def test_spawn_collision_preload(): pool = DAGPool([("a", 1)]) with assert_raises(Collision): pool.spawn("a", (), lambda key, results: None)
def test_post_get_exc(): pool = DAGPool() bogua = BogusError("bogua") pool.post("a", bogua) assert isinstance(pool.get("a"), BogusError), \ "should have delivered BogusError instead of raising" bogub = PropagateError("b", BogusError("bogub")) pool.post("b", bogub) with assert_raises(PropagateError): pool.get("b") # Notice that although we have both "a" and "b" keys, items() is # guaranteed to raise PropagateError because one of them is # PropagateError. Other values don't matter. with assert_raises(PropagateError): pool.items() # Similar remarks about waitall() and wait(). with assert_raises(PropagateError): pool.waitall() with assert_raises(PropagateError): pool.wait() with assert_raises(PropagateError): pool.wait("b") with assert_raises(PropagateError): pool.wait("ab") # but if we're only wait()ing for success results, no exception assert isinstance(pool.wait("a")["a"], BogusError), \ "should have delivered BogusError instead of raising" # wait_each() is guaranteed to eventually raise PropagateError, though you # may obtain valid values before you hit it. with assert_raises(PropagateError): for k, v in pool.wait_each(): pass # wait_each_success() filters assert_equals(dict(pool.wait_each_success()), dict(a=bogua)) assert_equals(dict(pool.wait_each_success("ab")), dict(a=bogua)) assert_equals(dict(pool.wait_each_success("a")), dict(a=bogua)) assert_equals(dict(pool.wait_each_success("b")), {}) # wait_each_exception() filters the other way assert_equals(dict(pool.wait_each_exception()), dict(b=bogub)) assert_equals(dict(pool.wait_each_exception("ab")), dict(b=bogub)) assert_equals(dict(pool.wait_each_exception("a")), {}) assert_equals(dict(pool.wait_each_exception("b")), dict(b=bogub))