def test_write_timeout(self): policy = DowngradingConsistencyRetryPolicy() # if this is the second or greater attempt, rethrow retry, consistency = policy.on_write_timeout( query=None, consistency="ONE", write_type=WriteType.SIMPLE, required_responses=1, received_responses=2, retry_num=1) self.assertEqual(retry, RetryPolicy.RETHROW) # ignore failures on these types of writes for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): retry, consistency = policy.on_write_timeout( query=None, consistency="ONE", write_type=write_type, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.IGNORE) # downgrade consistency level on unlogged batch writes retry, consistency = policy.on_write_timeout( query=None, consistency="ONE", write_type=WriteType.UNLOGGED_BATCH, required_responses=3, received_responses=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE) # retry batch log writes at the same consistency level retry, consistency = policy.on_write_timeout( query=None, consistency="ONE", write_type=WriteType.BATCH_LOG, required_responses=3, received_responses=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, "ONE")
def test_unavailable(self): policy = DowngradingConsistencyRetryPolicy() # if this is the second or greater attempt, rethrow retry, consistency = policy.on_unavailable( query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=1) self.assertEqual(retry, RetryPolicy.RETHROW) # downgrade consistency on unavailable exceptions retry, consistency = policy.on_unavailable( query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE)
def _test_downgrading_cl(self, keyspace, rf, accepted): cluster = TestCluster( execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), DowngradingConsistencyRetryPolicy()) }) session = cluster.connect(wait_for_all_pools=True) create_schema(cluster, session, keyspace, replication_factor=rf) self._insert(session, keyspace, 1) self._query(session, keyspace, 1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2) cluster.shutdown()
def __init__(self): self.cluster = Cluster( contact_points=['127.0.0.1', '127.0.0.2'], default_retry_policy=DowngradingConsistencyRetryPolicy(), reconnection_policy=ConstantReconnectionPolicy(20.0, 10) ) self.session = self.cluster.connect()
def test_rfthree_roundrobin_downgradingcl(self): keyspace = 'test_rfthree_roundrobin_downgradingcl' cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy(), protocol_version=PROTOCOL_VERSION) self.rfthree_downgradingcl(cluster, keyspace, True)
def __init__(self, keyspace, table): self.keyspace = keyspace self.table_name = table self.profile = ExecutionProfile( load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']), retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.QUORUM, request_timeout=20, row_factory=dict_factory) self.cluster = Cluster( execution_profiles={EXEC_PROFILE_DEFAULT: self.profile}) # Checking if keyspace exists or not # noinspection PyBroadException try: self.session = self.cluster.connect('d3') except Exception as e: self.session = self.cluster.connect() self.session.execute( "CREATE KEYSPACE d3 WITH REPLICATION = {'class':'SimpleStrategy','replication_factor':3} AND durable_writes = 'true';" ) self.session.set_keyspace('d3') # checking if the table exists or not if self.table_name not in self.cluster.metadata.keyspaces['d3'].tables: self.session.execute( f"CREATE TABLE {self.table_name} (uuid text , key text , data text , PRIMARY KEY ((uuid,key),data));" )
def active_connection(self): ''' Active the Cassandra connection ''' portItr = int(self.__database_parameters.get_port()) # Cassandra cluster IP self.__contact_points = [ self.__database_parameters.get_contact_points() ] # Cassandra port self.__port = portItr # Cassandra username and password self.__auth_provider = PlainTextAuthProvider(username=self.__database_parameters.get_username(), \ password=self.__database_parameters.get_password()) try: # Create Cassandra cluster self.__cluster = Cluster(contact_points=self.__contact_points, port=self.__port, auth_provider=self.__auth_provider, \ # load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='DC2'), \ default_retry_policy=DowngradingConsistencyRetryPolicy()) except Exception as e: raise e self.__is_connected = True
def test_statement_params_override_profile(self): non_default_profile = ExecutionProfile(RoundRobinPolicy(), *[object() for _ in range(3)]) cluster = Cluster( execution_profiles={'non-default': non_default_profile}) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) self.assertEqual(cluster._config_mode, _ConfigMode.PROFILES) rf = session.execute_async("query", execution_profile='non-default') ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) my_timeout = 1.1234 self.assertNotEqual(ss.retry_policy.__class__, rf._load_balancer.__class__) self.assertNotEqual(ss.consistency_level, rf.message.consistency_level) self.assertNotEqual(ss._serial_consistency_level, rf.message.serial_consistency_level) self.assertNotEqual(my_timeout, rf.timeout) rf = session.execute_async(ss, timeout=my_timeout, execution_profile='non-default') expected_profile = ExecutionProfile( non_default_profile.load_balancing_policy, ss.retry_policy, ss.consistency_level, ss._serial_consistency_level, my_timeout, non_default_profile.row_factory) self._verify_response_future_profile(rf, expected_profile)
def _test_downgrading_cl(self, keyspace, rf, accepted): cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), default_retry_policy=DowngradingConsistencyRetryPolicy()) session = cluster.connect() create_schema(session, keyspace, replication_factor=rf) self._insert(session, keyspace, 1) self._query(session, keyspace, 1) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 1) self.coordinator_stats.assert_query_count_equals(self, 3, 0) try: force_stop(2) wait_for_down(cluster, 2) self._assert_writes_succeed(session, keyspace, accepted) self._assert_reads_succeed(session, keyspace, accepted - set([ConsistencyLevel.ANY])) self._assert_writes_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) self._assert_reads_fail(session, keyspace, SINGLE_DC_CONSISTENCY_LEVELS - accepted) finally: start(2) wait_for_up(cluster, 2)
def test_rfthree_tokenaware_downgradingcl(self): keyspace = 'test_rfthree_tokenaware_downgradingcl' cluster = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), default_retry_policy=DowngradingConsistencyRetryPolicy(), protocol_version=PROTOCOL_VERSION) self.rfthree_downgradingcl(cluster, keyspace, False)
def test_read_timeout(self): policy = DowngradingConsistencyRetryPolicy() # if this is the second or greater attempt, rethrow retry, consistency = policy.on_read_timeout( query=None, consistency="ONE", required_responses=1, received_responses=2, data_retrieved=True, retry_num=1) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) # if we didn't get enough responses, retry at a lower consistency retry, consistency = policy.on_read_timeout( query=None, consistency="ONE", required_responses=4, received_responses=3, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.THREE) # if we didn't get enough responses, retry at a lower consistency retry, consistency = policy.on_read_timeout( query=None, consistency="ONE", required_responses=3, received_responses=2, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.TWO) # retry consistency level goes down based on the # of recv'd responses retry, consistency = policy.on_read_timeout( query=None, consistency="ONE", required_responses=3, received_responses=1, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE) # if we got no responses, rethrow retry, consistency = policy.on_read_timeout( query=None, consistency="ONE", required_responses=3, received_responses=0, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) # if we got enough response but no data, retry retry, consistency = policy.on_read_timeout( query=None, consistency="ONE", required_responses=3, received_responses=3, data_retrieved=False, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, 'ONE') # if we got enough responses, but also got a data response, rethrow retry, consistency = policy.on_read_timeout( query=None, consistency="ONE", required_responses=2, received_responses=2, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None)
def test_rfthree_roundrobin_downgradingcl(self): keyspace = 'test_rfthree_roundrobin_downgradingcl' with TestCluster( execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(RoundRobinPolicy(), DowngradingConsistencyRetryPolicy()) }) as cluster: self.rfthree_downgradingcl(cluster, keyspace, True)
def test_rfthree_tokenaware_downgradingcl(self): keyspace = 'test_rfthree_tokenaware_downgradingcl' with TestCluster( execution_profiles={ EXEC_PROFILE_DEFAULT: ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()), DowngradingConsistencyRetryPolicy()) }) as cluster: self.rfthree_downgradingcl(cluster, keyspace, False)
def test_no_profile_with_legacy(self): # don't construct with both self.assertRaises(ValueError, Cluster, load_balancing_policy=RoundRobinPolicy(), execution_profiles={'a': ExecutionProfile()}) self.assertRaises( ValueError, Cluster, default_retry_policy=DowngradingConsistencyRetryPolicy(), execution_profiles={'a': ExecutionProfile()}) self.assertRaises( ValueError, Cluster, load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy(), execution_profiles={'a': ExecutionProfile()}) # can't add after cluster = Cluster(load_balancing_policy=RoundRobinPolicy()) self.assertRaises(ValueError, cluster.add_execution_profile, 'name', ExecutionProfile()) # session settings lock out profiles cluster = Cluster() session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) for attr, value in (('default_timeout', 1), ('default_consistency_level', ConsistencyLevel.ANY), ('default_serial_consistency_level', ConsistencyLevel.SERIAL), ('row_factory', tuple_factory)): cluster._config_mode = _ConfigMode.UNCOMMITTED setattr(session, attr, value) self.assertRaises(ValueError, cluster.add_execution_profile, 'name' + attr, ExecutionProfile()) # don't accept profile self.assertRaises(ValueError, session.execute_async, "query", execution_profile='some name here')
def test_unavailable(self): policy = DowngradingConsistencyRetryPolicy() # if this is the second or greater attempt, rethrow retry, consistency = policy.on_unavailable(query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=1) self.assertEqual(retry, RetryPolicy.RETHROW) # downgrade consistency on unavailable exceptions retry, consistency = policy.on_unavailable(query=None, consistency="ONE", required_replicas=3, alive_replicas=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE)
def test_write_timeout(self): policy = DowngradingConsistencyRetryPolicy() # if this is the second or greater attempt, rethrow retry, consistency = policy.on_write_timeout( query=None, consistency="ONE", write_type=WriteType.SIMPLE, required_responses=1, received_responses=2, retry_num=1) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) # ignore failures on these types of writes for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): retry, consistency = policy.on_write_timeout(query=None, consistency="ONE", write_type=write_type, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.IGNORE) # downgrade consistency level on unlogged batch writes retry, consistency = policy.on_write_timeout( query=None, consistency="ONE", write_type=WriteType.UNLOGGED_BATCH, required_responses=3, received_responses=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE) # retry batch log writes at the same consistency level retry, consistency = policy.on_write_timeout( query=None, consistency="ONE", write_type=WriteType.BATCH_LOG, required_responses=3, received_responses=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, "ONE") # timeout on an unknown write_type retry, consistency = policy.on_write_timeout(query=None, consistency="ONE", write_type=None, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None)
def test_read_timeout(self): policy = DowngradingConsistencyRetryPolicy() # if this is the second or greater attempt, rethrow retry, consistency = policy.on_read_timeout( query=None, consistency=ONE, required_responses=1, received_responses=2, data_retrieved=True, retry_num=1) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) # if we didn't get enough responses, retry at a lower consistency retry, consistency = policy.on_read_timeout( query=None, consistency=ONE, required_responses=4, received_responses=3, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.THREE) # if we didn't get enough responses, retry at a lower consistency retry, consistency = policy.on_read_timeout( query=None, consistency=ONE, required_responses=3, received_responses=2, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.TWO) # retry consistency level goes down based on the # of recv'd responses retry, consistency = policy.on_read_timeout( query=None, consistency=ONE, required_responses=3, received_responses=1, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE) # if we got no responses, rethrow retry, consistency = policy.on_read_timeout( query=None, consistency=ONE, required_responses=3, received_responses=0, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) # if we got enough response but no data, retry retry, consistency = policy.on_read_timeout( query=None, consistency=ONE, required_responses=3, received_responses=3, data_retrieved=False, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ONE) # if we got enough responses, but also got a data response, rethrow retry, consistency = policy.on_read_timeout( query=None, consistency=ONE, required_responses=2, received_responses=2, data_retrieved=True, retry_num=0) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None)
def main(): while 1: log.info("\n\n\n\n Starting iteration\n") cluster = Cluster(protocol_version=PROTOCOL_VERSION, connection_class=connection_class) cluster.connect(wait_for_all_pools=True) hosts = cluster.metadata.all_hosts() address = hosts[0].address node_to_stop = int(address.split('.')[-1:][0]) cluster.shutdown() log.info("node_to_stop: {0}".format(node_to_stop)) contact_point = '127.0.0.{0}'.format(get_node_not_x(node_to_stop)) cluster = Cluster(contact_points=[contact_point], protocol_version=PROTOCOL_VERSION, connection_class=connection_class) cluster.connect(wait_for_all_pools=True) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) # Attempt a query against that node. It should complete cluster2 = Cluster(contact_points=all_contact_points, protocol_version=PROTOCOL_VERSION, connection_class=connection_class) session2 = cluster2.connect() session2.execute("SELECT * FROM system.local") except Exception as e: traceback.print_exc() finally: cluster2.shutdown() log.info("\n\nDone, with cluster2, starting node {0}\n\n".format(node_to_stop)) start(node_to_stop) wait_for_up(cluster, node_to_stop) cluster.shutdown() log.info("\n\nCluster is shutdown and node {0} should be up\n\n".format(node_to_stop)) cluster3 = Cluster( load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), default_retry_policy=DowngradingConsistencyRetryPolicy(), protocol_version=PROTOCOL_VERSION, connection_class=connection_class) session = cluster3.connect(wait_for_all_pools=True) for _ in range(20): hosts = cluster3.metadata.all_hosts() log.info("cluster.metadata.all_hosts(): {0}".format(hosts)) if len(hosts) == 3 and (map(lambda x : x.is_up, hosts) == [True] * 3): log.info("Found all the required nodes") break time.sleep(1) else: raise Exception("Some node never came back") cluster3.shutdown() time.sleep(4)
def test_default_legacy(self): cluster = Cluster( load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy()) self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY) session = Session(cluster, hosts=[]) session.default_timeout = 3.7 session.default_consistency_level = ConsistencyLevel.ALL session.default_serial_consistency_level = ConsistencyLevel.SERIAL rf = session.execute_async("query") expected_profile = ExecutionProfile( cluster.load_balancing_policy, cluster.default_retry_policy, session.default_consistency_level, session.default_serial_consistency_level, session.default_timeout, session.row_factory) self._verify_response_future_profile(rf, expected_profile)
def test_statement_params_override_legacy(self): cluster = Cluster(load_balancing_policy=RoundRobinPolicy(), default_retry_policy=DowngradingConsistencyRetryPolicy()) self.assertEqual(cluster._config_mode, _ConfigMode.LEGACY) session = Session(cluster, hosts=[Host("127.0.0.1", SimpleConvictionPolicy)]) ss = SimpleStatement("query", retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.SERIAL) my_timeout = 1.1234 self.assertNotEqual(ss.retry_policy.__class__, cluster.default_retry_policy) self.assertNotEqual(ss.consistency_level, session.default_consistency_level) self.assertNotEqual(ss._serial_consistency_level, session.default_serial_consistency_level) self.assertNotEqual(my_timeout, session.default_timeout) rf = session.execute_async(ss, timeout=my_timeout) expected_profile = ExecutionProfile(load_balancing_policy=cluster.load_balancing_policy, retry_policy=ss.retry_policy, request_timeout=my_timeout, consistency_level=ss.consistency_level, serial_consistency_level=ss._serial_consistency_level) self._verify_response_future_profile(rf, expected_profile)
def test_write_timeout(self): policy = DowngradingConsistencyRetryPolicy() # if this is the second or greater attempt, rethrow retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=WriteType.SIMPLE, required_responses=1, received_responses=2, retry_num=1) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None) for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER): # ignore failures if at least one response (replica persisted) retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=write_type, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.IGNORE) # retrhow if we can't be sure we have a replica retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=write_type, required_responses=1, received_responses=0, retry_num=0) self.assertEqual(retry, RetryPolicy.RETHROW) # downgrade consistency level on unlogged batch writes retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=WriteType.UNLOGGED_BATCH, required_responses=3, received_responses=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ConsistencyLevel.ONE) # retry batch log writes at the same consistency level retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=WriteType.BATCH_LOG, required_responses=3, received_responses=1, retry_num=0) self.assertEqual(retry, RetryPolicy.RETRY) self.assertEqual(consistency, ONE) # timeout on an unknown write_type retry, consistency = policy.on_write_timeout( query=None, consistency=ONE, write_type=None, required_responses=1, received_responses=2, retry_num=0) self.assertEqual(retry, RetryPolicy.RETHROW) self.assertEqual(consistency, None)
affiliation text, country text, registration date, PRIMARY KEY (id));') # 刪除table session.execute('DROP TABLE cycling.table_1;') -------------------------- from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import WhiteListRoundRobinPolicy, DowngradingConsistencyRetryPolicy from cassandra.query import tuple_factory #執行配置文件 profile = ExecutionProfile( load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']), retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.LOCAL_QUORUM, serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL, request_timeout=15, row_factory=tuple_factory ) cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) session = cluster.connect() print(session.execute("SELECT release_version FROM system.local").one()) -------------------------- 查詢keyspaces/tables/columns狀態 # -*- encoding: utf-8 -*- # 引入Cluster模塊 #from cassandra.cluster import Cluster