def test_paging_state(self): """ Test to validate paging state api @since 3.7.0 @jira_ticket PYTHON-200 @expected_result paging state should returned should be accurate, and allow for queries to be resumed. @test_category queries """ statements_and_params = zip( cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) list_all_results = [] self.session.default_fetch_size = 3 result_set = self.session.execute("SELECT * FROM test3rf.test") while (result_set.has_more_pages): for row in result_set.current_rows: self.assertNotIn(row, list_all_results) list_all_results.extend(result_set.current_rows) page_state = result_set.paging_state result_set = self.session.execute("SELECT * FROM test3rf.test", paging_state=page_state) if (len(result_set.current_rows) > 0): list_all_results.append(result_set.current_rows) self.assertEqual(len(list_all_results), 100)
def test_async_paging_verify_writes(self): ddl = ''' CREATE TABLE test3rf.test_async_paging_verify ( k1 int, k2 int, v int, PRIMARY KEY(k1, k2) )''' self.session.execute(ddl) statements_and_params = zip( cycle([ "INSERT INTO test3rf.test_async_paging_verify " "(k1, k2, v) VALUES (0, %s, %s)" ]), [(i, i + 1) for i in range(100)]) execute_concurrent(self.session, statements_and_params) prepared = self.session.prepare( "SELECT * FROM test3rf.test_async_paging_verify") for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size results = self.session.execute_async( "SELECT * FROM test3rf.test_async_paging_verify").result() result_array = [] value_array = [] for result in results: result_array.append(result.k2) value_array.append(result.v) self.assertSequenceEqual(range(100), result_array) self.assertSequenceEqual(range(1, 101), value_array) statement = SimpleStatement( "SELECT * FROM test3rf.test_async_paging_verify") results = self.session.execute_async(statement).result() result_array = [] value_array = [] for result in results: result_array.append(result.k2) value_array.append(result.v) self.assertSequenceEqual(range(100), result_array) self.assertSequenceEqual(range(1, 101), value_array) results = self.session.execute_async(prepared).result() result_array = [] value_array = [] for result in results: result_array.append(result.k2) value_array.append(result.v) self.assertSequenceEqual(range(100), result_array) self.assertSequenceEqual(range(1, 101), value_array)
def test_concurrent_with_paging(self): statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) prepared = self.session.prepare("SELECT * FROM test3rf.test") for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size results = execute_concurrent_with_args(self.session, prepared, [None] * 10) self.assertEqual(10, len(results)) for (success, result) in results: self.assertTrue(success) self.assertEqual(100, len(list(result)))
def test_paging(self): statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) prepared = self.session.prepare("SELECT * FROM test3rf.test") for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size self.assertEqual(100, len(list(self.session.execute("SELECT * FROM test3rf.test")))) statement = SimpleStatement("SELECT * FROM test3rf.test") self.assertEqual(100, len(list(self.session.execute(statement)))) self.assertEqual(100, len(list(self.session.execute(prepared))))
def test_paging_verify_writes(self): statements_and_params = zip( cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) execute_concurrent(self.session, statements_and_params) prepared = self.session.prepare("SELECT * FROM test3rf.test") for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size results = self.session.execute("SELECT * FROM test3rf.test") result_array = set() result_set = set() for result in results: result_array.add(result.k) result_set.add(result.v) self.assertEqual(set(range(100)), result_array) self.assertEqual(set([0]), result_set) statement = SimpleStatement("SELECT * FROM test3rf.test") results = self.session.execute(statement) result_array = set() result_set = set() for result in results: result_array.add(result.k) result_set.add(result.v) self.assertEqual(set(range(100)), result_array) self.assertEqual(set([0]), result_set) results = self.session.execute(prepared) result_array = set() result_set = set() for result in results: result_array.add(result.k) result_set.add(result.v) self.assertEqual(set(range(100)), result_array) self.assertEqual(set([0]), result_set)
def setUpClass(cls): if DSE_VERSION and DSE_VERSION < '5.1': return super(ContPagingTests, cls).setUpClass() cls.default_cont = object cls.one_page_cont = object() cls.many_pages_cont = object() cls.execution_profiles = { "CONTDEFAULT": ExecutionProfile( continuous_paging_options=ContinuousPagingOptions()), "ONEPAGE": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions( max_pages=1)), "MANYPAGES": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions( max_pages=10)), "BYTES": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions( page_unit=ContinuousPagingOptions.PagingUnit.BYTES)), "SLOW": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions( max_pages_per_second=1)), } cls.cluster = Cluster(protocol_version=ProtocolVersion.DSE_V1, execution_profiles=cls.execution_profiles) cls.session = cls.cluster.connect(wait_for_all_pools=True) statements_and_params = zip( cycle([ "INSERT INTO " + cls.ks_name + "." + cls.ks_name + " (k, v) VALUES (%s, 0)" ]), [(i, ) for i in range(150)]) execute_concurrent(cls.session, list(statements_and_params)) cls.select_all_statement = "SELECT * FROM {0}.{0}".format(cls.ks_name)
def test_no_connection_refused_on_timeout(self): """ Test for PYTHON-91 "Connection closed after LWT timeout" Verifies that connection to the cluster is not shut down when timeout occurs. Number of iterations can be specified with LWT_ITERATIONS environment variable. Default value is 1000 """ insert_statement = self.session.prepare( "INSERT INTO test3rf.lwt (k, v) VALUES (0, 0) IF NOT EXISTS") delete_statement = self.session.prepare( "DELETE FROM test3rf.lwt WHERE k = 0 IF EXISTS") iterations = int(os.getenv("LWT_ITERATIONS", 1000)) # Prepare series of parallel statements statements_and_params = [] for i in range(iterations): statements_and_params.append((insert_statement, ())) statements_and_params.append((delete_statement, ())) received_timeout = False results = execute_concurrent(self.session, statements_and_params, raise_on_first_error=False) for (success, result) in results: if success: continue else: # In this case result is an exception if type(result).__name__ == "NoHostAvailable": self.fail("PYTHON-91: Disconnected from Cassandra: %s" % result.message) if type(result).__name__ == "WriteTimeout": received_timeout = True continue if type(result).__name__ == "WriteFailure": received_timeout = True continue if type(result).__name__ == "ReadTimeout": continue if type(result).__name__ == "ReadFailure": continue self.fail("Unexpected exception %s: %s" % (type(result).__name__, result.message)) # Make sure test passed self.assertTrue(received_timeout)
def execute_concurrent_helper(self, session, query, results_generator=False): count = 0 while count < 100: try: return execute_concurrent(session, query, results_generator=False) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format( ex_type.__name__, ex, traceback.extract_tb(tb))) del tb count += 1 raise RuntimeError( "Failed to execute query after 100 attempts: {0}".format(query))
def test_no_raise_on_first_failure_client_side(self): statement = SimpleStatement( "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", consistency_level=ConsistencyLevel.QUORUM) statements = cycle((statement, )) parameters = [(i, i) for i in range(100)] # the driver will raise an error when binding the params parameters[57] = 1 results = execute_concurrent(self.session, list(zip(statements, parameters)), raise_on_first_error=False) for i, (success, result) in enumerate(results): if i == 57: self.assertFalse(success) self.assertIsInstance(result, TypeError) else: self.assertTrue(success) self.assertFalse(result)
def test_no_raise_on_first_failure(self): statement = SimpleStatement( "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", consistency_level=ConsistencyLevel.QUORUM) statements = cycle((statement, )) parameters = [(i, i) for i in range(100)] # we'll get an error back from the server parameters[57] = ('efefef', 'awefawefawef') results = execute_concurrent(self.session, list(zip(statements, parameters)), raise_on_first_error=False) for i, (success, result) in enumerate(results): if i == 57: self.assertFalse(success) self.assertIsInstance(result, InvalidRequest) else: self.assertTrue(success) self.assertFalse(result)
def insert_and_validate_list_generator(self, reverse, slowdown): """ This utility method will execute submit various statements for execution using the ConcurrentExecutorGenResults, then invoke a separate thread to execute the callback associated with the futures registered for those statements. The parameters will toggle various timing, and ordering changes. Finally it will validate that the results were returned in the order they were submitted :param reverse: Execute the callbacks in the opposite order that they were submitted :param slowdown: Cause intermittent queries to perform slowly """ our_handler = MockResponseResponseFuture(reverse=reverse) mock_session = Mock() statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) mock_session.execute_async.return_value = our_handler t = TimedCallableInvoker(our_handler, slowdown=slowdown) t.start() try: results = execute_concurrent(mock_session, statements_and_params, results_generator=True) self.validate_result_ordering(results) finally: t.stop()
def test_idle_heartbeat(self): interval = 2 cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval) session = cluster.connect(wait_for_all_pools=True) # This test relies on impl details of connection req id management to see if heartbeats # are being sent. May need update if impl is changed connection_request_ids = {} for h in cluster.get_connection_holders(): for c in h.get_connections(): # make sure none are idle (should have startup messages self.assertFalse(c.is_idle) with c.lock: connection_request_ids[id(c)] = deque( c.request_ids) # copy of request ids # let two heatbeat intervals pass (first one had startup messages in it) time.sleep(2 * interval + interval / 2) connections = [ c for holders in cluster.get_connection_holders() for c in holders.get_connections() ] # make sure requests were sent on all connections for c in connections: expected_ids = connection_request_ids[id(c)] expected_ids.rotate(-1) with c.lock: self.assertListEqual(list(c.request_ids), list(expected_ids)) # assert idle status self.assertTrue(all(c.is_idle for c in connections)) # send messages on all connections statements_and_params = [("SELECT release_version FROM system.local", ())] * len(cluster.metadata.all_hosts()) results = execute_concurrent(session, statements_and_params) for success, result in results: self.assertTrue(success) # assert not idle status self.assertFalse( any(c.is_idle if not c.is_control_connection else False for c in connections)) # holders include session pools and cc holders = cluster.get_connection_holders() self.assertIn(cluster.control_connection, holders) self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc # include additional sessions session2 = cluster.connect(wait_for_all_pools=True) holders = cluster.get_connection_holders() self.assertIn(cluster.control_connection, holders) self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) + 1) # 2 sessions' hosts pools, 1 for cc cluster._idle_heartbeat.stop() cluster._idle_heartbeat.join() assert_quiescent_pool_state(self, cluster) cluster.shutdown()
def test_fetch_size(self): """ Ensure per-statement fetch_sizes override the default fetch size. """ statements_and_params = zip( cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) prepared = self.session.prepare("SELECT * FROM test3rf.test") self.session.default_fetch_size = 10 result = self.session.execute(prepared, []) self.assertTrue(result.has_more_pages) self.session.default_fetch_size = 2000 result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) self.session.default_fetch_size = None result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) self.session.default_fetch_size = 10 prepared.fetch_size = 2000 result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = None result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = 10 result = self.session.execute(prepared, []) self.assertTrue(result.has_more_pages) prepared.fetch_size = 2000 bound = prepared.bind([]) result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = None bound = prepared.bind([]) result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = 10 bound = prepared.bind([]) result = self.session.execute(bound, []) self.assertTrue(result.has_more_pages) bound.fetch_size = 2000 result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) bound.fetch_size = None result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) bound.fetch_size = 10 result = self.session.execute(bound, []) self.assertTrue(result.has_more_pages) s = SimpleStatement("SELECT * FROM test3rf.test", fetch_size=None) result = self.session.execute(s, []) self.assertFalse(result.has_more_pages) s = SimpleStatement("SELECT * FROM test3rf.test") result = self.session.execute(s, []) self.assertTrue(result.has_more_pages) s = SimpleStatement("SELECT * FROM test3rf.test") s.fetch_size = None result = self.session.execute(s, []) self.assertFalse(result.has_more_pages)
def test_paging_callbacks(self): """ Test to validate callback api @since 3.9.0 @jira_ticket PYTHON-733 @expected_result callbacks shouldn't be called twice per message and the fetch_size should be handled in a transparent way to the user @test_category queries """ statements_and_params = zip( cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]), [(i, ) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) prepared = self.session.prepare("SELECT * FROM test3rf.test") for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size future = self.session.execute_async("SELECT * FROM test3rf.test", timeout=20) event = Event() counter = count() number_of_calls = count() def handle_page(rows, future, counter, number_of_calls): next(number_of_calls) for row in rows: next(counter) if future.has_more_pages: future.start_fetching_next_page() else: event.set() def handle_error(err): event.set() self.fail(err) future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), errback=handle_error) event.wait() self.assertEqual(next(number_of_calls), 100 // fetch_size + 1) self.assertEqual(next(counter), 100) # simple statement future = self.session.execute_async( SimpleStatement("SELECT * FROM test3rf.test"), timeout=20) event.clear() counter = count() number_of_calls = count() future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), errback=handle_error) event.wait() self.assertEqual(next(number_of_calls), 100 // fetch_size + 1) self.assertEqual(next(counter), 100) # prepared statement future = self.session.execute_async(prepared, timeout=20) event.clear() counter = count() number_of_calls = count() future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), errback=handle_error) event.wait() self.assertEqual(next(number_of_calls), 100 // fetch_size + 1) self.assertEqual(next(counter), 100)