def test_cycle_rows(test_sink): row_cycler = cycle_rows(test_sink) ml = getgeneratorlocals(test_sink)['ml'] with file_readers(data_package) as readers: row_cycler.send(readers) assert len(getgeneratorlocals(test_sink)['ml'][0]) == 5 assert getgeneratorlocals(test_sink)['ml'][0][0][0] == 'Car' assert getgeneratorlocals(test_sink)['ml'][0][4][0] == 'ssn' test_sink.send('clear') try: row_cycler.send(test_sink) except StopIteration: pass assert len(ml) == 1000 test_sink.send('clear')
def test_filter_data(test_sink): ml = getgeneratorlocals(test_sink)['ml'] with file_readers(data_package) as readers: # CONSTANTS: nt_class_names = [data[0][1] for data in data_package] output_package = [data[1] for data in data_package] # DECLARE --> From the bottom up stack data_filter = filter_data(test_sink) broadcaster = broadcast(data_filter) parse_data = data_parser(broadcaster) date_key = date_key_gen(parse_data) row_key = row_key_gen((parse_data, date_key)) field_name_gen = gen_field_names(parse_data) headers = header_extract((field_name_gen, field_name_gen)) row_cycler = cycle_rows(headers) # SEND PREREQUISITES FIRST field_name_gen.send(nt_class_names) date_key.send((date_keys[1], date_keys[1])) row_cycler.send(readers) broadcaster.send(output_package) # SEND DATA: try: row_cycler.send((date_key, row_key)) except StopIteration: pass print(*ml, sep='\n') print(len(ml))
def test_data_parser(test_sink): ml = getgeneratorlocals(test_sink)['ml'] for i in range(len(date_keys)): with file_readers(data_package) as readers: # CONSTANTS: nt_class_names = [data[0][1] for data in data_package] # DECLARE --> From the bottom up stack parse_data = data_parser(test_sink) date_key = date_key_gen(parse_data) row_key = row_key_gen((parse_data, date_key)) field_name_gen = gen_field_names(parse_data) headers = header_extract((field_name_gen, test_sink)) row_cycler = cycle_rows(headers) # SEND PREREQUISITES FIRST field_name_gen.send(nt_class_names) date_key.send(date_keys) row_cycler.send(readers) # SEND DATA: while True: try: row_cycler.send((date_key, row_key)) except StopIteration: break print(*ml, sep='\n\n\n') print(len(ml)) assert (len(ml)) == 2002
def test_gen_field_names(test_sink): with file_readers(data_package) as readers: field_names_gen = gen_field_names(test_sink) field_names_gen.send( tuple(input_data[1] for input_data, output_data in data_package)) # send class_names headers = header_extract((field_names_gen, field_names_gen)) cycle_rows(headers).send(readers) dummy_nt = getgeneratorlocals(test_sink)['ml'][0] print(dummy_nt) data_fields = getgeneratorlocals(test_sink)['ml'][0] assert len(data_fields) == len( tuple(input_data[1] for input_data, output_data in data_package)) print(data_fields) attrs = ['car', 'employer', 'summons_number', 'ssn', 'ssn'] assert len(attrs) == len(data_fields) for i in range(len(data_fields)): assert getattr(data_fields[i], attrs[i]) print(' ') print(dir(data_fields[i])) print(' ')
def test_header_extract(test_sink): with file_readers(data_package) as readers: headers = header_extract((test_sink, test_sink)) row_cycler = cycle_rows(headers) row_cycler.send(readers) header_rows = getgeneratorlocals(test_sink)['ml'] # print('26:', 'header_rows ''='' ', header_rows) assert header_rows[0][0][0] == 'car' assert header_rows[0][1][0] == 'employer' assert header_rows[0][2][0] == 'summons_number' assert len(header_rows[0]) == 5 test_sink.send('clear')
def _get_test_date(gen_name, list_name, access_idxs): nested_list = getgeneratorlocals(gen_name)[list_name] idx = [arg for arg in access_idxs] current = list(nested_list) for i in range(len(idx)): try: if iter(current[idx[i]]): current = current[idx[i]] except TypeError: continue finally: current = current[idx[-1]] return current
def walk(self, sampler, stop=None): gen_vars = inspect.getgeneratorlocals(sampler) assert 'stop' in gen_vars, \ 'Sampler must be a sampler_generator instance returned by SP.sampler()' if stop is None and gen_vars['stop'] is None: warnings.warn( "No stop specification set - this walk may proceed indefinitely") if stop is None: return collections.deque([sample for sample in sampler]) steps = collections.deque() for sample in sampler: if len(steps) >= stop: break steps.append(sample) return steps
def test_row_key_gen(test_sink, sample_reader_rows): # cars.csv test_key0 = (str, float, int, float, float, float, float, int, str) # nyc_parking_tickets_extract.csv test_key1 = (int, str, str, str, str, int, str, str, str) # update_status.csv test_key2 = (str, str, str) unpacked_test_keys = [*chain(test_key0, test_key1, test_key2)] f_idxs = (0, 2, 4) test_sink_tuple = (test_sink, test_sink) def check_key(row_keys, ref_keys): for value, ref in list(zip(row_keys, ref_keys)): return ref(value) gen_row_key = row_key_gen(test_sink_tuple) gen_row_key.send(sample_reader_rows(f_idxs)) parsed_key0 = getgeneratorlocals(test_sink)['ml'][1] assert check_key(parsed_key0, unpacked_test_keys) test_sink.send('clear')
def test_date_key_gen(test_sink, sample_reader_rows, get_test_date, date_tester): # so we create a file index of 2, 4, 4 f_idxs = (2, 4, 4) # for each test run, the test sink key is ml, so 3 ml keys sink_keys = tuple('ml' for _ in range(3)) # date format type 0, then 1, then 1 again date_key_idxs = (0, 1, 1) sink_idxs = ((2, 4), (2, 10), (2, 11)) raw_date_strs = ('10/5/2016', '2016-01-24T21:19:30Z', '2017-10-07T00:14:42Z') date_tester(test_sink, sample_reader_rows(f_idxs), get_test_date, key_names=sink_keys, date_format_key_idxs=date_key_idxs, access_idxs=sink_idxs, date_strs=raw_date_strs) print(getgeneratorlocals(test_sink)['ml']) test_sink.send('clear')
def delegator(): s = subgen() yield from s yield 'delegator: subgen closed' print('delegator: closing...') d = delegator() next(d) print('#' * 52 + ' At this point, both the delegator and the subgenerator are primed and suspended:') from inspect import getgeneratorstate, getgeneratorlocals print(getgeneratorlocals(d)) s = getgeneratorlocals(d)['s'] print(getgeneratorstate(d)) print(getgeneratorstate(s)) print('#' * 52 + ' We can send data to the delegator:') print(d.send('hello')) print('#' * 52 + ' We can even send data directly to the subgenerator since we now have a handle on it:') print(s.send('python')) print('#' * 52 + ' In fact, we can close it too:')
def delegator(): e = echo() yield from e d = delegator() print(next(d)) print( '#' * 52 + ' Now, calling `next` on the delegator will establish the connection to the subgenerator and' ' automatically prime it as well.') from inspect import getgeneratorstate, getgeneratorlocals print(getgeneratorlocals(d)) e = getgeneratorlocals(d)['e'] print(getgeneratorstate(d)) print(getgeneratorstate(e)) print( '#' * 52 + ' We can now send data to the delegator, and it will pass that along to the subgenerator:' ) print(d.send('stressed')) print( '#' * 52 + 'Lets modify our `echo` coroutine to both receive and yield a result, instead of just printing to the console:'
def _extract_generator_locals(value: Any) -> List[TypedValue]: return ExpressionEvaluator._to_typed_values(filter( lambda t: t[0] != '.0', inspect.getgeneratorlocals(value).items()), check_expandable=False)
def _run_coro(kernel): # --- Main loop preparations --- _runner = super()._run_coro() runner_send = _runner.send runner_send(None) toplevel = kernel._toplevel event_queue = kernel._event_queue event_queue_append = event_queue.append event_queue_popleft = event_queue.popleft event_wait = kernel._event_wait event_wait_pop = event_wait.pop tk_runner = None # Generator that holds the tkinter loop frame = None # Looping widget (`tkinter.Frame`) coro = None # Coroutine passed in from `.run` result = None # Result from `tk_runner` # Get variables from `_runner` runner_locals = getgeneratorlocals(_runner) _reschedule_task = runner_locals["_reschedule_task"] # --- Tkinter helper functions --- @contextmanager def destroying(widget): try: yield widget finally: with suppress(tkinter.TclError): widget.destroy() @contextmanager def bind(widget, func, events): widget_bind = widget.bind widget_unbind = widget.unbind bindings = [(event, widget_bind(event, func, "+")) for event in events] try: if len(bindings) == 1: yield bindings[0] else: yield bindings finally: for info in bindings: widget_unbind(*info) @contextmanager def protocol(toplevel, func): toplevel.protocol("WM_DELETE_WINDOW", func) try: yield finally: toplevel.protocol("WM_DELETE_WINDOW", toplevel.destroy) def tk_send( info, *, reschedule=False, _unsafe_states=frozenset({GEN_RUNNING, GEN_CLOSED}), ): if not tk_runner or getgeneratorstate(tk_runner) in _unsafe_states: if reschedule: frame.after(1, lambda: tk_send(info, reschedule=True)) return False try: tk_runner.send(info) except BaseException as e: nonlocal result if isinstance(e, StopIteration): result = e.value else: result = e with suppress(tkinter.TclError): frame.destroy() finally: return True @contextmanager def ensure_after(secs, widget): if secs is None: yield else: tm = max(int(secs * 1000), 1) callback = lambda: tk_send( "SLEEP_WAKE" if secs else "READY", reschedule=True, ) id_ = widget.after(tm, callback) try: yield finally: widget.after_cancel(id_) # --- Tkinter callbacks --- # Decorator to return "break" for tkinter callbacks def tkinter_callback(func): @wraps(func) def _wrapped(*args): func(*args) return "break" return _wrapped # Functions for event callbacks @tkinter_callback def send_tk_event(event): if event.widget is toplevel: event_queue_append(event) if event_wait: tk_send("EVENT_WAKE") @tkinter_callback def send_other_event(event): if event.widget is not toplevel: event_queue_append(event) if event_wait: tk_send("EVENT_WAKE") @tkinter_callback def send_destroy_event(event): if event.widget is toplevel: event_queue_append(event) if event_wait: frame.after(1, lambda: tk_send("EVENT_WAKE")) @tkinter_callback def close_window(): if event_wait: tk_send("CLOSE_WINDOW", reschedule=True) # --- Internal loop (driven by tkinter's loop) --- def _tk_run_task(frame, coro): val = exc = None # Info from `spawn` task tk_task = None # Task to wait for its completiong with destroying(frame): val, exc = runner_send(spawn(coro)) if exc: raise exc tk_task = val tk_task.report_crash = False del coro, val, exc while True: if ((tk_task and tk_task.terminated) or (not kernel._ready and not tk_task)): if tk_task: tk_task._joined = True return (tk_task.next_value, tk_task.next_exc) else: return (None, None) # Set the timeout for our `.after` callback. # Note that the selector is also considered in this # conditional as we cannot add a callback to a selector. if kernel._ready or kernel._selector.get_map( ) or not tk_task: timeout = 0 else: timeout = kernel._sleepq.next_deadline(monotonic()) # This makes sure that the loop will continue. We suspend # here only to receive any `tkinter` events. with ensure_after(timeout, frame): info = (yield) if info == "EVENT_WAKE": # Wake all tasks waiting for an event for task in event_wait_pop(len(event_wait)): _reschedule_task(task) elif info == "CLOSE_WINDOW": # Raise an error on event waiting tasks # Note: This will NOT raise the error on any # blocking operation; only `_wait_event` will raise # this exception. for task in event_wait_pop(len(event_wait)): _reschedule_task(task, exc=CloseWindow("X was pressed")) # Only remove events if there are event tasks or if # the queue is filling up. event_tasks = [ t for t in kernel._tasks.values() if iseventtask(t) ] if event_tasks: offset = min(t.next_event for t in event_tasks) if offset > 0: for _ in range(offset): event_queue_popleft() for task in event_tasks: task.next_event -= offset # There aren't any event tasks to notice this change. elif len(event_queue) > 100: event_queue.clear() # Run using `schedule()`. Supplying `None` as the argument # means a task doing `while True: await schedule()` can block # the loop as the ready queue will never be empty. _, exc = runner_send(schedule()) if exc: raise exc # --- Main loop --- # Setup the main cleanup stack with ExitStack() as stack: enter = stack.enter_context # Create toplevel window toplevel = kernel._toplevel = enter(destroying(tkinter.Tk())) # Ensure closing of original runner enter(closing(_runner)) # Bind all events enter(bind(toplevel, send_tk_event, kernel._tk_events)) enter(bind(toplevel, send_other_event, kernel._other_events)) enter(bind(toplevel, send_destroy_event, ("<Destroy>", ))) enter(protocol(toplevel, close_window)) while True: # If an exception happened in `tk_runner`, end the # kernel and raise the exception if isinstance(result, BaseException): raise result # Get the next coroutine to run :D coro = (yield result) frame = tkinter.Frame(toplevel) tk_runner = _tk_run_task(frame, coro) coro = None # Setup the cycle's cleanup stack with ExitStack() as inner_stack: inner_enter = inner_stack.enter_context inner_enter(destroying(frame)) inner_enter(closing(tk_runner)) # Start the cycle :) tk_send(None) with suppress(tkinter.TclError): frame.wait_window() # Exceptions in the loop if getgeneratorstate(tk_runner) != GEN_CLOSED: raise RuntimeError( "Kernel frame destroyed before task finished") try: if not toplevel.winfo_exists(): raise RuntimeError( "Kernel toplevel destroyed before shutdown") except tkinter.TclError: raise RuntimeError( "Kernel toplevel destroyed before shutdown")
def song(): yield "I'm a lumberjack and I'm OK" yield "I sleep all night and I work all day" def play_song(): count = 0 s = song() yield from s yield 'song finished' print('player is exiting...') player = play_song() print(getgeneratorstate(player)) print(getgeneratorlocals(player)) next(player) print(getgeneratorstate(player)) print(getgeneratorlocals(player)) s = getgeneratorlocals(player)['s'] print(getgeneratorstate(s)) print(next(player)) print(getgeneratorstate(player)) print(getgeneratorstate(s)) print(next(player)) print(getgeneratorstate(player))
print('subgen: closing...') def delegator(): s = subgen() yield from s yield 'delegator: subgen closed' print('delegator: closing...') d = delegator() next(d) from inspect import getgeneratorstate, getgeneratorlocals getgeneratorlocals(d) s = getgeneratorlocals(d)['s'] print(getgeneratorstate(d)) print(getgeneratorstate(s)) d.send('hello') s.send('python') s.close() getgeneratorstate(d) next(d) # Yield From - Closing and Return # so this is what happens when the subgenerator closes (directly or indirectly) - the delegator simply resumes running
# 生成器是可以暂停的函数 import inspect def gen_func(): value = yield 1 # 第一返回值给调用方,第二调用方通过send方式返回值给gen return "geeking" if __name__ == '__main__': gen = gen_func() print(inspect.getgeneratorlocals(gen)) next(gen) print(inspect.getgeneratorlocals(gen)) try: next(gen) except StopIteration: pass print(inspect.getgeneratorlocals(gen))
# Now, calling next on the delegator will establish the connection to the subgenerator and automatically prime # it as well. # We can easily see this by doing some inspection: # We can now send data to the delegator, and it will pass that along to the subgenerator: def delegator(): e = echo() yield from e d = delegator() next(d) from inspect import getgeneratorstate, getgeneratorlocals getgeneratorlocals(d) e = getgeneratorlocals(d)['e'] print(getgeneratorstate(d)) print(getgeneratorstate(e)) # Yield From - Sending Data # Let's modify our echo coroutine to both receive and yield a result, instead of just printing to the console: # And we can use delegation as follows: def echo(): output = None while True: received = yield output output = received[::-1]