def test_prepare_on_all_hosts(self): """ Test to validate prepare_on_all_hosts flag is honored. Use a special ForcedHostSwitchPolicy to ensure prepared queries are cycled over nodes that should not have them prepared. Check the logs to insure they are being re-prepared on those nodes @since 3.4.0 @jira_ticket PYTHON-556 @expected_result queries will have to re-prepared on hosts that aren't the control connection """ white_list = ForcedHostSwitchPolicy() clus = Cluster( load_balancing_policy=white_list, protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) try: session = clus.connect(wait_for_all_pools=True) mock_handler = MockLoggingHandler() logger = logging.getLogger(cluster.__name__) logger.addHandler(mock_handler) select_statement = session.prepare("SELECT * FROM system.local") session.execute(select_statement) session.execute(select_statement) session.execute(select_statement) self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) finally: clus.shutdown()
def test_duplicate(self): """ Test duplicate RPC addresses. Modifies the system.peers table to make hosts have the same rpc address. Ensures such hosts are filtered out and a message is logged @since 3.4 @jira_ticket PYTHON-366 @expected_result only one hosts' metadata will be populated @test_category metadata """ mock_handler = MockLoggingHandler() logger = logging.getLogger(dse.cluster.__name__) logger.addHandler(mock_handler) test_cluster = self.cluster = Cluster( protocol_version=PROTOCOL_VERSION, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=self.load_balancing_policy) }) test_cluster.connect() warnings = mock_handler.messages.get("warning") self.assertEqual(len(warnings), 1) self.assertTrue('multiple' in warnings[0]) logger.removeHandler(mock_handler) test_cluster.shutdown()
def test_duplicate(self): mock_handler = MockLoggingHandler() logger = logging.getLogger(cassandra.cluster.__name__) logger.addHandler(mock_handler) address_column = "native_transport_address" if DSE_VERSION and DSE_VERSION > Version("6.0") else "rpc_address" rows = [ {"peer": "127.0.0.1", "data_center": "dc", "host_id": "dontcare1", "rack": "rack1", "release_version": "3.11.4", address_column: "127.0.0.1", "schema_version": "dontcare", "tokens": "1"}, {"peer": "127.0.0.2", "data_center": "dc", "host_id": "dontcare2", "rack": "rack1", "release_version": "3.11.4", address_column: "127.0.0.2", "schema_version": "dontcare", "tokens": "2"}, ] prime_query(ControlConnection._SELECT_PEERS, rows=rows) cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) session = cluster.connect(wait_for_all_pools=True) warnings = mock_handler.messages.get("warning") self.assertEqual(len(warnings), 1) self.assertTrue('multiple hosts with the same endpoint' in warnings[0]) logger.removeHandler(mock_handler) cluster.shutdown()
def test_sync_warnings(self): """ Test to insure when inconsistent changes are made to a table, or type as part of a sync call that the proper logging messages are surfaced @since 3.2 @jira_ticket PYTHON-260 @expected_result warnings are logged @test_category object_mapper """ mock_handler = MockLoggingHandler() logger = logging.getLogger(management.__name__) logger.addHandler(mock_handler) sync_table(self.conn, BaseInconsistent) sync_table(self.conn, ChangedInconsistent) self.assertTrue('differing from the model type' in mock_handler.messages.get('warning')[0]) if CASSANDRA_VERSION >= '2.1': sync_type(self.conn, BaseInconsistentType) mock_handler.reset() sync_type(self.conn, ChangedInconsistentType) self.assertTrue('differing from the model user type' in mock_handler.messages.get('warning')[0]) logger.removeHandler(mock_handler)
def test_prepare_on_all_hosts(self): """ Test to validate prepare_on_all_hosts flag is honored. Use a special ForcedHostSwitchPolicy to ensure prepared queries are cycled over nodes that should not have them prepared. Check the logs to insure they are being re-prepared on those nodes @since 3.4.0 @jira_ticket PYTHON-556 @expected_result queries will have to re-prepared on hosts that aren't the control connection # """ with Cluster(protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False, execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=ForcedHostSwitchPolicy()) }) as cluster: session = cluster.connect(wait_for_all_pools=True) mock_handler = MockLoggingHandler() logger = logging.getLogger(dse.cluster.__name__) logger.addHandler(mock_handler) self.assertGreaterEqual(len(cluster.metadata.all_hosts()), 3) select_statement = session.prepare("SELECT * FROM system.local") reponse_first = session.execute(select_statement) reponse_second = session.execute(select_statement) reponse_third = session.execute(select_statement) self.assertEqual( len({ reponse_first.response_future.attempted_hosts[0], reponse_second.response_future.attempted_hosts[0], reponse_third.response_future.attempted_hosts[0] }), 3) self.assertEqual( 2, mock_handler.get_message_count('debug', "Re-preparing"))
def setUpClass(cls): cls.logger_handler = MockLoggingHandler() logger = logging.getLogger(cluster.__name__) logger.addHandler(cls.logger_handler)