def test_unsubbed_table_stream(self) -> None: # Create the table stream, but it should be unsubscribed. table = data._TableStream("foo", data._Relation( self.relation, ), subscribed=False) foo_factory = utils.FakeTableFactory("foo", self.relation) foo_faker = foo_factory.create_table(utils.table_id1) rb_data: List[List[Any]] = [ ["foo", "bar", "baz", "bat"], [200, 500, 301, 404]] # A data rowbatch. batch1 = foo_faker.row_batch(rb_data) # The end of table row batch. batch2 = foo_faker.row_batch([[], []], eos=True, eow=True) # Push the rowbatches onto this table stream. table.add_row_batch(batch1) table.add_row_batch(batch2) loop = asyncio.get_event_loop() with self.assertRaisesRegex(ValueError, "Table .* not subscribed"): loop.run_until_complete(utils.iterate_and_pass(table))
def test_stop_sending_data_before_eos(self) -> None: # If the stream stops before sending over an eos for each table that should be an error. # Connect to a single fake cluster. # Connect to a single fake cluster. conn = self.px_client.connect_to_cluster( self.px_client.list_healthy_clusters()[0]) http_table1 = self.http_table_factory.create_table( test_utils.table_id1) self.fake_vizier_service.add_fake_data( conn.cluster_id, [ # Initialize the table on the stream and send over a rowbatch. http_table1.metadata_response(), http_table1.row_batch_response([["foo"], [200]]), # Note: the table does not send an end message over the stream. ]) # Create the script_executor object. script_executor = conn.prepare_script(pxl_script) # Subscribe to the http table. http_tb = script_executor.subscribe("http") # Run the script_executor and process_table concurrently. loop = asyncio.get_event_loop() with self.assertRaisesRegex(ValueError, "Closed before receiving end-of-stream."): loop.run_until_complete( run_script_and_tasks(script_executor, [test_utils.iterate_and_pass(http_tb)]))
def test_handle_server_side_errors(self) -> None: # Test to make sure server side errors are handled somewhat. # Connect to a single fake cluster. conn = self.px_client.connect_to_cluster( self.px_client.list_healthy_clusters()[0]) http_table1 = self.http_table_factory.create_table( test_utils.table_id1) self.fake_vizier_service.add_fake_data( conn.cluster_id, [ # Initialize the table on the stream with the metadata. http_table1.metadata_response(), # Send over a single-row batch. http_table1.row_batch_response([["foo"], [200]]), # NOTE: don't send over the eos -> simulating error midway through # stream. ]) self.fake_vizier_service.trigger_error(test_utils.cluster_uuid1, ValueError('hi')) # Create the script_executor object. script_executor = conn.prepare_script(pxl_script) # Subscribe to the http table. http_tb = script_executor.subscribe("http") # Run the script_executor and process_table concurrently. loop = asyncio.get_event_loop() with self.assertRaisesRegex(grpc.aio.AioRpcError, "hi"): loop.run_until_complete( run_script_and_tasks(script_executor, [test_utils.iterate_and_pass(http_tb)]))
def test_run_script_with_api_errors(self) -> None: # Connect to a single fake cluster. conn = self.px_client.connect_to_cluster( self.px_client.list_healthy_clusters()[0]) # Only send data for "http". http_table1 = self.http_table_factory.create_table( test_utils.table_id1) self.fake_vizier_service.add_fake_data(conn.cluster_id, [ http_table1.metadata_response(), http_table1.row_batch_response([["foo"], [200]]), http_table1.end(), ]) script_executor = conn.prepare_script(pxl_script) # Subscribe to a table that doesn't exist shoudl throw an error. foobar_tb = script_executor.subscribe("foobar") # Try to pull data from the foobar_tb, but error out when the script_executor # never produces that data. loop = asyncio.get_event_loop() with self.assertRaisesRegex(ValueError, "Table 'foobar' not received"): loop.run_until_complete( run_script_and_tasks(script_executor, [test_utils.iterate_and_pass(foobar_tb)]))