def test_reduce(): # 'reduce' provides a high-level way of creating future-sinks such # as 'count' # Make a component just like df.sum from operator import add total = df.reduce(add, initial=0) # Create two instances of it, which will be applied to different # (forked) sub-streams in the network total_all = total() total_odd = total() N = 15 the_source = list(range(N)) result = df.push(source=the_source, pipe=df.fork( total_all.sink, df.pipe(df.filter(lambda n: n % 2), total_odd.sink)), result=(total_all.future, total_odd.future)) sum_all, sum_odd = sum(the_source), (N // 2)**2 assert result == (sum_all, sum_odd)
def test_implicit_element_picking_in_fork(): the_source_elements = list(range(10)) the_source = (dict(x=i, y=-i) for i in the_source_elements) left = [] left_sink = df.sink(left.append) right = [] right_sink = df.sink(right.append) df.push(source=the_source, pipe=df.fork(("x", left_sink), ("y", right_sink))) assert left == [-i for i in right] == the_source_elements
def test_fork(): # Dataflows can be split with 'fork' the_source = list(range(10, 20)) left = [] left_sink = df.sink(left.append) right = [] right_sink = df.sink(right.append) df.push(source=the_source, pipe=df.fork(left_sink, right_sink)) assert left == right == the_source
def test_push_futures_tuple(): the_source = list(range(100)) count_all = df.count() count_odd = df.count() result = df.push(source=the_source, pipe=df.fork( count_all.sink, df.pipe(df.filter(lambda n: n % 2), count_odd.sink)), result=(count_odd.future, count_all.future)) all_count = len(the_source) odd_count = all_count // 2 assert result == (odd_count, all_count)
def test_push_futures_mapping(): count_all = df.count() count_odd = df.count() the_source = list(range(100)) result = df.push(source=the_source, pipe=df.fork( count_all.sink, df.pipe(df.filter(lambda n: n % 2), count_odd.sink)), result=dict(odd=count_odd.future, all=count_all.future)) all_count = len(the_source) assert result.odd == all_count // 2 assert result.all == all_count
def test_stop_when(): # 'stop_when' can be used to stop all branches of the network # immediately. countfuture, count = df.count() limit, step = 10, 2 import itertools result = df.push(source=itertools.count(start=0, step=step), pipe=df.fork(df.stop_when(lambda n: n == limit), count), result=(countfuture, )) assert result == (limit // step, )
def test_stateful_stop_when(): @df.coroutine_send def n_items_seen(n): yield # Will stop here on construction for _ in range(n): yield False yield True countfuture, count = df.count() import itertools limit, step = 10, 2 result = df.push(source=itertools.count(start=0, step=step), pipe=df.fork(df.stop_when(n_items_seen(limit)), count), result=(countfuture, )) assert result == (limit, )
def test_fork_implicit_pipes(): # Arguments can be pipes or tuples. # Tuples get implicitly converted into pipes the_source = list(range(10, 20)) add_1 = df.map(lambda x: 1 + x) implicit_pipe_collector = [] implicit_pipe_sink = df.sink(implicit_pipe_collector.append) explicit_pipe_collector = [] explicit_pipe_sink = df.sink(explicit_pipe_collector.append) df.push(source=the_source, pipe=df.fork((add_1, implicit_pipe_sink), df.pipe(add_1, explicit_pipe_sink))) assert implicit_pipe_collector == explicit_pipe_collector == [ 1 + x for x in the_source ]
def test_push_futures(): # 'push' provides a higher-level interface to using such futures: # it optionally accepts a tuple of futures, and returns a tuple of # their results count_all = df.count() count_odd = df.count() the_source = list(range(100)) result = df.push(source=the_source, pipe=df.fork( count_all.sink, df.pipe(df.filter(lambda n: n % 2), count_odd.sink)), result=(count_odd.future, count_all.future)) all_count = len(the_source) odd_count = all_count // 2 assert result == (odd_count, all_count)
def test_branch(): # 'branch', like 'spy', allows you to insert operations on a copy # of the stream at any point in a network. In contrast to 'spy' # (which accepts a single plain operation), 'branch' accepts an # arbitrary number of pipeline components, which it combines into # a pipeline. It provides a more convenient way of constructing # some graphs that would otherwise be constructed with 'fork'. # Some pipeline components c1 = [] C1 = df.sink(c1.append) c2 = [] C2 = df.sink(c2.append) e1 = [] E1 = df.sink(e1.append) e2 = [] E2 = df.sink(e2.append) A = df.map(lambda n: n + 1) B = df.map(lambda n: n * 2) D = df.map(lambda n: n * 3) # Two eqivalent networks, one constructed with 'fork' the other # with 'branch'. graph1 = df.pipe(A, df.fork(df.pipe(B, C1), df.pipe(D, E1))) graph2 = df.pipe(A, df.branch(B, C2), D, E2) # Feed the same data into the two networks. the_source = list(range(10, 50, 4)) df.push(source=the_source, pipe=graph1) df.push(source=the_source, pipe=graph2) # Confirm that both networks produce the same results. assert c1 == c2 assert e1 == e2