def test_async_single_manager_tables_chained(self): columns = {"index": int, "num1": int, "num2": int} manager = PerspectiveManager() tbl = Table(columns, index="index") view = tbl.view() tbl2 = Table(view.to_arrow(), index=tbl.get_index()) manager.host(tbl, "tbl") manager.host(tbl2, "tbl2") view.on_update(lambda port, delta: tbl2.update(delta), "row") manager.set_loop_callback(TestAsync.loop.add_callback) for i in range(1000): manager.call_loop(tbl.update, [{ "index": i, "num1": i, "num2": 2 * i }]) i += 1 q = queue.Queue() manager.call_loop(q.put, True) q.get() @syncify def _tbl_task2(): size = tbl2.size() return size assert _tbl_task2() == 1000 view.delete() tbl.delete() tbl2.delete()
def test_async_multiple_managers_mixed_queue_process_multiple_ports(self): sentinel = {"async": 0, "sync": 0} def _counter(key, f, *args, **kwargs): sentinel[key] += 1 return f(*args, **kwargs) sync_process = partial(_counter, "sync") async_process = partial(TestAsync.loop.add_timeout, 1, _counter, "async") tbl = Table({"a": int, "b": float, "c": str}) tbl2 = Table({"a": int, "b": float, "c": str}) port_ids = [0] port_data = [{"a": 0, "b": 0, "c": "0"}] for i in range(10): port_id = tbl.make_port() port_id2 = tbl2.make_port() assert port_id == port_id2 port_ids.append(port_id) port_data.append({ "a": port_id, "b": port_id * 1.5, "c": str(port_id) }) manager = PerspectiveManager() manager2 = PerspectiveManager() manager.host_table("tbl", tbl) manager2.host_table("tbl2", tbl2) # manager uses tornado, manager2 is synchronous manager.set_loop_callback(async_process) manager2.set_loop_callback(sync_process) random.shuffle(port_ids) @syncify def _task(): for port_id in port_ids: idx = port_id if port_id < len(port_ids) else len(port_ids) - 1 tbl.update([port_data[idx]], port_id=port_id) _task() for port_id in port_ids: idx = port_id if port_id < len(port_ids) else len(port_ids) - 1 tbl2.update([port_data[idx]], port_id=port_id) @syncify def _get_size(): size = tbl.size() tbl.delete() return size assert _get_size() == 11 assert tbl2.size() == 11 assert sentinel["async"] == 2 assert sentinel["sync"] == 12 tbl2.delete()
def update_callback(port_id, delta): table = Table(delta) assert table.size() == 1 assert table.schema() == { "a": int, "b": str } table.delete() s.set(s.get() + 1)
def callback(delta): table = Table(delta) assert table.size() == 1 assert table.schema() == { "a": int, "b": str } table.delete() s.set(s.get() + 100)
def test_exception_from_core_correct_types(self): tbl = Table({"a": [1, 2, 3]}) # `PerspectiveError` should be raised from the Python layer with raises(PerspectiveError) as ex: tbl.view() tbl.delete() assert str(ex.value) == "Cannot delete a Table with active views still linked to it - call delete() on each view, and try again." with raises(PerspectiveCppError) as ex: tbl.view(row_pivots=["b"]) assert str(ex.value) == "Column b does not exist in schema."
def test_async_call_loop(self): tbl = Table({"a": int, "b": float, "c": str}) manager = PerspectiveManager() manager.set_loop_callback(TestAsync.loop.add_callback) manager.call_loop(tbl.update, data) manager.host(tbl) @syncify def _task(): return tbl.size() assert _task() == 10 tbl.delete()
def test_async_multiple_managers_delayed_process_multiple_ports(self): from time import sleep short_delay_queue_process = partial(queue_process_async_delay, delay=0.5, loop=TestAsync.loop) long_delay_queue_process = partial(queue_process_async_delay, delay=1, loop=TestAsync.loop) tbl = Table({ "a": int, "b": float, "c": str }) tbl2 = Table({ "a": int, "b": float, "c": str }) manager = PerspectiveManager() manager2 = PerspectiveManager() manager.host_table("tbl", tbl) manager2.host_table("tbl2", tbl2) # The guarantee of `queue_process` is that eventually `_process` # will be called, either by user action or loop iteration. By adding # the delay, we can artificially queue up actions for later execution # and see that it's working properly. manager._set_queue_process(short_delay_queue_process) manager2._set_queue_process(long_delay_queue_process) tbl_id = tbl._table.get_id() tbl2_id = tbl2._table.get_id() for i in range(10): tbl.update([data[i]]) tbl2.update([data[i]]) assert SENTINEL.get() != 0 # updates are now queued assert tbl_id in _PerspectiveStateManager.TO_PROCESS assert tbl2_id in _PerspectiveStateManager.TO_PROCESS # Wait for the callbacks to run - we don't call any methods # that would call `call_process`, but instead wait for the # callbacks to execute asynchronously. sleep(1) tbl2.delete() tbl.delete()
def test_async_queue_process(self): tbl = Table({"a": int, "b": float, "c": str}) manager = PerspectiveManager() manager.set_loop_callback(TestAsync.loop.add_callback) manager.host(tbl) @syncify def _task(): assert tbl.size() == 0 for i in range(5): tbl.update([data[i]]) return tbl.size() assert _task() == 5 tbl.delete()
def test_async_multiple_managers_mixed_queue_process(self): # mutate when synchronously calling queue_process for each update SENTINEL_2 = AsyncSentinel(0) def sync_queue_process(table_id, state_manager): SENTINEL_2.set(SENTINEL_2.get() - 1) state_manager.call_process(table_id) tbl = Table({ "a": int, "b": float, "c": str }) tbl2 = Table({ "a": int, "b": float, "c": str }) manager = PerspectiveManager() manager2 = PerspectiveManager() manager.host_table("tbl", tbl) manager2.host_table("tbl2", tbl2) # manager uses tornado, manager2 is synchronous manager._set_queue_process(TestAsync.wrapped_queue_process) manager2._set_queue_process(sync_queue_process) tbl_id = tbl._table.get_id() tbl2_id = tbl2._table.get_id() for i in range(5): tbl.update([data[i]]) tbl2.update([data[i]]) assert SENTINEL.get() != 0 assert SENTINEL_2.get() == -5 assert tbl2_id not in _PerspectiveStateManager.TO_PROCESS # flush `TO_PROCESS` view = tbl.view() assert view.to_records() == data[:5] assert tbl_id not in _PerspectiveStateManager.TO_PROCESS tbl2.delete() view.delete() tbl.delete()
def test_async_queue_process_csv(self): """Make sure GIL release during CSV loading works""" tbl = Table("x,y,z\n1,a,true\n2,b,false\n3,c,true\n4,d,false") manager = PerspectiveManager() manager.set_loop_callback(TestAsync.loop.add_callback) manager.host(tbl) @syncify def _task(): assert tbl.size() == 4 for i in range(5): tbl.update("x,y,z\n1,a,true\n2,b,false\n3,c,true\n4,d,false") return tbl.size() assert _task() == 24 tbl.delete()
def test_async_multiple_managers_mixed_queue_process(self): sentinel = {"called": 0} def sync_queue_process(f, *args, **kwargs): sentinel["called"] += 1 f(*args, **kwargs) tbl = Table({"a": int, "b": float, "c": str}) tbl2 = Table({"a": int, "b": float, "c": str}) manager = PerspectiveManager() manager2 = PerspectiveManager() manager.host_table("tbl", tbl) manager2.host_table("tbl2", tbl2) # manager uses tornado, manager2 is synchronous manager.set_loop_callback(TestAsync.loop.add_callback) manager2.set_loop_callback(sync_queue_process) @syncify def _tbl_task(): for i in range(5): tbl.update([data[i]]) return tbl.size() assert _tbl_task() == 5 for i in range(5): tbl2.update([data[i]]) assert sentinel["called"] == 6 @syncify def _tbl_task2(): view = tbl.view() records = view.to_records() view.delete() tbl.delete() return records assert _tbl_task2() == data[:5] view = tbl2.view() assert view.to_records() == data[:5] view.delete() tbl2.delete()
def test_exception_from_core_correct_types(self): tbl = Table({"a": [1, 2, 3]}) # `PerspectiveError` should be raised from the Python layer with raises(PerspectiveError) as ex: tbl.view() tbl.delete() assert ( str(ex.value) == "Cannot delete a Table with active views still linked to it - call delete() on each view, and try again." ) with raises(PerspectiveCppError) as ex: tbl.view(group_by=["b"]) assert (str( ex.value) == "Invalid column 'b' found in View group_by.\n")
def test_async_queue_process(self): tbl = Table({ "a": int, "b": float, "c": str }) manager = PerspectiveManager() manager._set_queue_process(TestAsync.wrapped_queue_process) manager.host(tbl) assert tbl.size() == 0 for i in range(5): tbl.update([data[i]]) # process should have been called at least once assert SENTINEL.get() > 0 tbl.delete()
def test_async_multiple_managers_delayed_process(self): sentinel = {"async": 0, "sync": 0} def _counter(key, f, *args, **kwargs): sentinel[key] += 1 return f(*args, **kwargs) short_delay_queue_process = partial(_counter, "sync") long_delay_queue_process = partial(TestAsync.loop.add_timeout, 1, _counter, "async") tbl = Table({"a": int, "b": float, "c": str}) tbl2 = Table({"a": int, "b": float, "c": str}) manager = PerspectiveManager() manager2 = PerspectiveManager() manager.host_table("tbl", tbl) manager2.host_table("tbl2", tbl2) manager.set_loop_callback(short_delay_queue_process) manager2.set_loop_callback(long_delay_queue_process) @syncify def _tbl_task(): for i in range(10): tbl2.update([data[i]]) _tbl_task() for i in range(10): tbl.update([data[i]]) @syncify def _tbl_task2(): size = tbl2.size() tbl2.delete() return size assert _tbl_task2() == 10 assert tbl.size() == 10 assert sentinel["async"] == 2 assert sentinel["sync"] == 11 tbl.delete()
def test_async_call_loop_error_if_no_loop(self): tbl = Table({"a": int, "b": float, "c": str}) manager = PerspectiveManager() with raises(PerspectiveError): # loop not set - errors manager.call_loop(tbl.update, data) manager.set_loop_callback(TestAsync.loop.add_callback) manager.call_loop(tbl.update, data) manager.host(tbl) @syncify def _task(): return tbl.size() # subsequent calls to call_loop will work if loop_callback is set. assert _task() == 10 tbl.delete()
def test_async_queue_process_multiple_ports(self): tbl = Table({ "a": int, "b": float, "c": str }) port_ids = [0] port_data = [{ "a": 0, "b": 0, "c": "0" }] for i in range(10): port_id = tbl.make_port() port_ids.append(port_id) port_data.append({ "a": port_id, "b": port_id * 1.5, "c": str(port_id) }) assert port_ids == list(range(0, 11)) manager = PerspectiveManager() manager._set_queue_process(TestAsync.wrapped_queue_process) manager.host(tbl) assert tbl.size() == 0 random.shuffle(port_ids) for port_id in port_ids: idx = port_id if port_id < len(port_ids) else len(port_ids) - 1 tbl.update([port_data[idx]], port_id=port_id) # assert that process is being called asynchronously assert SENTINEL.get() > 0 tbl.delete()
def test_async_multiple_managers_queue_process(self): tbl = Table({ "a": int, "b": float, "c": str }) tbl2 = Table({ "a": int, "b": float, "c": str }) manager = PerspectiveManager() manager2 = PerspectiveManager() manager.host_table("tbl", tbl) manager2.host_table("tbl2", tbl2) manager._set_queue_process(TestAsync.wrapped_queue_process) manager2._set_queue_process(TestAsync.wrapped_queue_process) for i in range(5): tbl.update([data[i]]) tbl2.update([data[i]]) assert SENTINEL.get() != 0 # flush `TO_PROCESS` view = tbl.view() assert view.to_records() == data[:5] for i in range(5): tbl2.update([data[i]]) view.delete() tbl2.delete() tbl.delete()
def test_async_multiple_managers_mixed_queue_process_multiple_ports(self): # mutate when synchronously calling queue_process for each update SENTINEL_2 = AsyncSentinel(0) def sync_queue_process(table_id, state_manager): SENTINEL_2.set(SENTINEL_2.get() - 1) state_manager.call_process(table_id) tbl = Table({ "a": int, "b": float, "c": str }) tbl2 = Table({ "a": int, "b": float, "c": str }) port_ids = [0] port_data = [{ "a": 0, "b": 0, "c": "0" }] for i in range(10): port_id = tbl.make_port() port_id2 = tbl2.make_port() assert port_id == port_id2 port_ids.append(port_id) port_data.append({ "a": port_id, "b": port_id * 1.5, "c": str(port_id) }) manager = PerspectiveManager() manager2 = PerspectiveManager() manager.host_table("tbl", tbl) manager2.host_table("tbl2", tbl2) # manager uses tornado, manager2 is synchronous manager._set_queue_process(TestAsync.wrapped_queue_process) manager2._set_queue_process(sync_queue_process) random.shuffle(port_ids) for port_id in port_ids: idx = port_id if port_id < len(port_ids) else len(port_ids) - 1 tbl.update([port_data[idx]], port_id=port_id) tbl2.update([port_data[idx]], port_id=port_id) assert SENTINEL.get() != 0 assert SENTINEL_2.get() == -11 tbl2.delete() tbl.delete()
def update_callback(rows): table = Table(rows) assert table.size() == 1 assert table.schema() == {"a": int, "b": str} table.delete() s.set(s.get() + 1)