示例#1
0
    def test_unavailable(self):
        """
        Trigger and ensure unavailables are counted
        Write a key, value pair. Kill a node while waiting for the cluster to register the death.
        Attempt an insert/read at cl.ALL and receive a Unavailable Exception.
        """

        cluster = Cluster(metrics_enabled=True)
        session = cluster.connect()

        # Test write
        session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL)
        results = session.execute(query, {'k': 1})
        self.assertEqual(1, results[0].v)

        # Force kill ccm node
        get_node(1).stop(wait=True, gently=True)

        try:
            # Test write
            query = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL)
            self.assertRaises(Unavailable, session.execute, query)
            self.assertEqual(1, cluster.metrics.stats.unavailables)

            # Test write
            query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL)
            self.assertRaises(Unavailable, session.execute, query, {'k': 1})
            self.assertEqual(2, cluster.metrics.stats.unavailables)
        finally:
            get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True)
示例#2
0
    def test_write_timeout(self):
        """
        Trigger and ensure write_timeouts are counted
        Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state.
        Attempt a write at cl.ALL and receive a WriteTimeout.
        """

        # Test write
        self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1",
                                consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(self.session, query)
        self.assertTrue(results)

        # Pause node so it shows as unreachable to coordinator
        get_node(1).pause()

        try:
            # Test write
            query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(WriteTimeout):
                self.session.execute(query, timeout=None)
            self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)

        finally:
            get_node(1).resume()
示例#3
0
    def test_read_timeout(self):
        """
        Trigger and ensure read_timeouts are counted
        Write a key, value pair. Force kill a node without waiting for the cluster to register the death.
        Attempt a read at cl.ALL and receive a ReadTimeout.
        """

        cluster = Cluster(metrics_enabled=True)
        session = cluster.connect()

        # Test write
        session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL)
        results = session.execute(query, {'k': 1})
        self.assertEqual(1, results[0].v)

        # Force kill ccm node
        get_node(1).stop(wait=False, gently=False)

        try:
            # Test read
            query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL)
            self.assertRaises(ReadTimeout, session.execute, query, {'k': 1})
            self.assertEqual(1, cluster.metrics.stats.read_timeouts)

        finally:
            get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True)
示例#4
0
    def test_raise_error_on_control_connection_timeout(self):
        """
        Test for initial control connection timeout

        test_raise_error_on_control_connection_timeout tests that the driver times out after the set initial connection
        timeout. It first pauses node1, essentially making it unreachable. It then attempts to create a Cluster object
        via connecting to node1 with a timeout of 1 second, and ensures that a NoHostAvailable is raised, along with
        an OperationTimedOut for 1 second.

        @expected_errors NoHostAvailable When node1 is paused, and a connection attempt is made.
        @since 2.6.0
        @jira_ticket PYTHON-206
        @expected_result NoHostAvailable exception should be raised after 1 second.

        @test_category connection
        """

        get_node(1).pause()
        cluster = Cluster(contact_points=['127.0.0.1'], protocol_version=PROTOCOL_VERSION, connect_timeout=1)

        with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"):
            cluster.connect()
        cluster.shutdown()

        get_node(1).resume()
示例#5
0
    def test_read_timeout(self):
        """
        Trigger and ensure read_timeouts are counted
        Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state.
        Attempt a read at cl.ALL and receive a ReadTimeout.
        """


        # Test write
        self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(self.session, query)
        self.assertTrue(results)

        # Pause node so it shows as unreachable to coordinator
        get_node(1).pause()

        try:
            # Test read
            query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(ReadTimeout):
                self.session.execute(query, timeout=None)
            self.assertEqual(1, self.cluster.metrics.stats.read_timeouts)

        finally:
            get_node(1).resume()
示例#6
0
    def test_write_timeout(self):
        """
        Trigger and ensure write_timeouts are counted
        Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state.
        Attempt a write at cl.ALL and receive a WriteTimeout.
        """

        cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION)
        session = cluster.connect("test3rf")

        # Test write
        session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(session, query)
        self.assertTrue(results)

        # Pause node so it shows as unreachable to coordinator
        get_node(1).pause()

        try:
            # Test write
            query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(WriteTimeout):
                session.execute(query, timeout=None)
            self.assertEqual(1, cluster.metrics.stats.write_timeouts)

        finally:
            get_node(1).resume()

        cluster.shutdown()
示例#7
0
    def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self):
        """
        Tests for the default retry policy in combination with lightweight transactions.

        @since 3.17
        @jira_ticket PYTHON-1007
        @expected_result the query is retried with the default CL, not the serial one.

        @test_category policy
        """
        ep = ExecutionProfile(consistency_level=ConsistencyLevel.ALL,
                              serial_consistency_level=ConsistencyLevel.SERIAL)

        cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep})
        session = cluster.connect()

        session.execute("CREATE KEYSPACE test_retry_policy_cas WITH replication = {'class':'SimpleStrategy','replication_factor': 3};")
        session.execute("CREATE TABLE test_retry_policy_cas.t (id int PRIMARY KEY, data text);")
        session.execute('INSERT INTO test_retry_policy_cas.t ("id", "data") VALUES (%(0)s, %(1)s)', {'0': 42, '1': 'testing'})

        get_node(2).stop()
        get_node(4).stop()

        # before fix: cassandra.InvalidRequest: Error from server: code=2200 [Invalid query] message="SERIAL is not
        # supported as conditional update commit consistency. ....""

        # after fix: cassandra.Unavailable (expected since replicas are down)
        with self.assertRaises(Unavailable) as cm:
            session.execute("update test_retry_policy_cas.t set data = 'staging' where id = 42 if data ='testing'")

        exception = cm.exception
        self.assertEqual(exception.consistency, ConsistencyLevel.SERIAL)
        self.assertEqual(exception.required_replicas, 2)
        self.assertEqual(exception.alive_replicas, 1)
示例#8
0
    def test_metrics_per_cluster(self):
        """
        Test to validate that metrics can be scopped to invdividual clusters
        @since 3.6.0
        @jira_ticket PYTHON-561
        @expected_result metrics should be scopped to a cluster level

        @test_category metrics
        """

        cluster2 = Cluster(metrics_enabled=True,
                           protocol_version=PROTOCOL_VERSION,
                           default_retry_policy=FallthroughRetryPolicy())
        cluster2.connect(self.ks_name, wait_for_all_pools=True)

        self.assertEqual(len(cluster2.metadata.all_hosts()), 3)

        query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name),
                                consistency_level=ConsistencyLevel.ALL)
        self.session.execute(query)

        # Pause node so it shows as unreachable to coordinator
        get_node(1).pause()

        try:
            # Test write
            query = SimpleStatement(
                "INSERT INTO {0}.{0} (k, v) VALUES (2, 2)".format(
                    self.ks_name),
                consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(WriteTimeout):
                self.session.execute(query, timeout=None)
        finally:
            get_node(1).resume()

        # Change the scales stats_name of the cluster2
        cluster2.metrics.set_stats_name('cluster2-metrics')

        stats_cluster1 = self.cluster.metrics.get_stats()
        stats_cluster2 = cluster2.metrics.get_stats()

        # Test direct access to stats
        self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)
        self.assertEqual(0, cluster2.metrics.stats.write_timeouts)

        # Test direct access to a child stats
        self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean'])
        self.assertEqual(0.0, cluster2.metrics.request_timer['mean'])

        # Test access via metrics.get_stats()
        self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean'])
        self.assertEqual(0.0, stats_cluster2['request_timer']['mean'])

        # Test access by stats_name
        self.assertEqual(
            0.0,
            scales.getStats()['cluster2-metrics']['request_timer']['mean'])

        cluster2.shutdown()
示例#9
0
    def test_metrics_per_cluster(self):
        """
        Test to validate that metrics can be scopped to invdividual clusters
        @since 3.6.0
        @jira_ticket PYTHON-561
        @expected_result metrics should be scopped to a cluster level

        @test_category metrics
        """

        cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION,
                           default_retry_policy=FallthroughRetryPolicy())
        cluster2.connect(self.ks_name, wait_for_all_pools=True)

        self.assertEqual(len(cluster2.metadata.all_hosts()), 3)

        query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
        self.session.execute(query)

        # Pause node so it shows as unreachable to coordinator
        get_node(1).pause()

        try:
            # Test write
            query = SimpleStatement("INSERT INTO {0}.{0} (k, v) VALUES (2, 2)".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(WriteTimeout):
                self.session.execute(query, timeout=None)
        finally:
            get_node(1).resume()

        # Change the scales stats_name of the cluster2
        cluster2.metrics.set_stats_name('cluster2-metrics')

        stats_cluster1 = self.cluster.metrics.get_stats()
        stats_cluster2 = cluster2.metrics.get_stats()

        # Test direct access to stats
        self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)
        self.assertEqual(0, cluster2.metrics.stats.write_timeouts)

        # Test direct access to a child stats
        self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean'])
        self.assertEqual(0.0, cluster2.metrics.request_timer['mean'])

        # Test access via metrics.get_stats()
        self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean'])
        self.assertEqual(0.0, stats_cluster2['request_timer']['mean'])

        # Test access by stats_name
        self.assertEqual(0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean'])

        cluster2.shutdown()
示例#10
0
 def test_heart_beat_timeout(self):
     # Setup a host listener to ensure the nodes don't go down
     test_listener = TestHostListener()
     host = "127.0.0.1"
     node = get_node(1)
     initial_connections = self.fetch_connections(host, self.cluster)
     self.assertNotEqual(len(initial_connections), 0)
     self.cluster.register_listener(test_listener)
     # Pause the node
     node.pause()
     # Wait for connections associated with this host go away
     self.wait_for_no_connections(host, self.cluster)
     # Resume paused node
     node.resume()
     # Run a query to ensure connections are re-established
     current_host = ""
     count = 0
     while current_host != host and count < 100:
         rs = self.session.execute_async("SELECT * FROM system.local",
                                         trace=False)
         rs.result()
         current_host = str(rs._current_host)
         count += 1
         time.sleep(.1)
     self.assertLess(count, 100, "Never connected to the first node")
     new_connections = self.wait_for_connections(host, self.cluster)
     self.assertIsNone(test_listener.host_down)
     # Make sure underlying new connections don't match previous ones
     for connection in initial_connections:
         self.assertFalse(connection in new_connections)
    def setUp(self):
        """
        Setup sessions and pause node1
        """
        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                                   execution_profiles={EXEC_PROFILE_DEFAULT:
                                           ExecutionProfile(
                                           load_balancing_policy=HostFilterPolicy(
                                               RoundRobinPolicy(), lambda host: host.address == "127.0.0.1"
                                           )
                                       )
                                   }
                               )

        self.session = self.cluster.connect(wait_for_all_pools=True)

        self.control_connection_host_number = 1
        self.node_to_stop = get_node(self.control_connection_host_number)

        ddl = '''
            CREATE TABLE test3rf.timeout (
                k int PRIMARY KEY,
                v int )'''
        self.session.execute(ddl)
        self.node_to_stop.pause()
 def test_heart_beat_timeout(self):
     # Setup a host listener to ensure the nodes don't go down
     test_listener = TestHostListener()
     host = "127.0.0.1"
     node = get_node(1)
     initial_connections = self.fetch_connections(host, self.cluster)
     self.assertNotEqual(len(initial_connections), 0)
     self.cluster.register_listener(test_listener)
     # Pause the node
     node.pause()
     # Wait for connections associated with this host go away
     self.wait_for_no_connections(host, self.cluster)
     # Resume paused node
     node.resume()
     # Run a query to ensure connections are re-established
     current_host = ""
     count = 0
     while current_host != host and count < 100:
         rs = self.session.execute_async("SELECT * FROM system.local", trace=False)
         rs.result()
         current_host = str(rs._current_host)
         count += 1
         time.sleep(.1)
     self.assertLess(count, 100, "Never connected to the first node")
     new_connections = self.wait_for_connections(host, self.cluster)
     self.assertIsNone(test_listener.host_down)
     # Make sure underlying new connections don't match previous ones
     for connection in initial_connections:
         self.assertFalse(connection in new_connections)
示例#13
0
    def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self):
        """
        Tests for the default retry policy in combination with lightweight transactions.

        @since 3.17
        @jira_ticket PYTHON-1007
        @expected_result the query is retried with the default CL, not the serial one.

        @test_category policy
        """
        ep = ExecutionProfile(consistency_level=ConsistencyLevel.ALL,
                              serial_consistency_level=ConsistencyLevel.SERIAL)

        cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep})
        session = cluster.connect()

        session.execute(
            "CREATE KEYSPACE test_retry_policy_cas WITH replication = {'class':'SimpleStrategy','replication_factor': 3};"
        )
        session.execute(
            "CREATE TABLE test_retry_policy_cas.t (id int PRIMARY KEY, data text);"
        )
        session.execute(
            'INSERT INTO test_retry_policy_cas.t ("id", "data") VALUES (%(0)s, %(1)s)',
            {
                '0': 42,
                '1': 'testing'
            })

        get_node(2).stop()
        get_node(4).stop()

        # before fix: cassandra.InvalidRequest: Error from server: code=2200 [Invalid query] message="SERIAL is not
        # supported as conditional update commit consistency. ....""

        # after fix: cassandra.Unavailable (expected since replicas are down)
        with self.assertRaises(Unavailable) as cm:
            session.execute(
                "update test_retry_policy_cas.t set data = 'staging' where id = 42 if data ='testing'"
            )

        exception = cm.exception
        self.assertEqual(exception.consistency, ConsistencyLevel.SERIAL)
        self.assertEqual(exception.required_replicas, 2)
        self.assertEqual(exception.alive_replicas, 1)
示例#14
0
    def test_unavailable(self):
        """
        Trigger and ensure unavailables are counted
        Write a key, value pair. Stop a node with the coordinator node knowing about the "DOWN" state.
        Attempt an insert/read at cl.ALL and receive a Unavailable Exception.
        """

        cluster = Cluster(metrics_enabled=True,
                          protocol_version=PROTOCOL_VERSION)
        session = cluster.connect("test3rf")

        # Test write
        session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1",
                                consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(session, query)
        self.assertEqual(1, len(results))

        # Stop node gracefully
        get_node(1).stop(wait=True, wait_other_notice=True)

        try:
            # Test write
            query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                session.execute(query)
            self.assertEqual(1, cluster.metrics.stats.unavailables)

            # Test write
            query = SimpleStatement("SELECT * FROM test",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                session.execute(query, timeout=None)
            self.assertEqual(2, cluster.metrics.stats.unavailables)
        finally:
            get_node(1).start(wait_other_notice=True,
                              wait_for_binary_proto=True)
            # Give some time for the cluster to come back up, for the next test
            time.sleep(5)

        cluster.shutdown()
示例#15
0
    def test_unavailable(self):
        """
        Trigger and ensure unavailables are counted
        Write a key, value pair. Kill a node while waiting for the cluster to register the death.
        Attempt an insert/read at cl.ALL and receive a Unavailable Exception.
        """

        cluster = Cluster(metrics_enabled=True,
                          protocol_version=PROTOCOL_VERSION)
        session = cluster.connect()

        # Test write
        session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s",
                                consistency_level=ConsistencyLevel.ALL)
        results = session.execute(query, {'k': 1})
        self.assertEqual(1, results[0].v)

        # Force kill ccm node
        get_node(1).stop(wait=True, gently=True)
        time.sleep(5)

        try:
            # Test write
            query = SimpleStatement(
                "INSERT INTO test3rf.test (k, v) VALUES (2, 2)",
                consistency_level=ConsistencyLevel.ALL)
            self.assertRaises(Unavailable, session.execute, query)
            self.assertEqual(1, cluster.metrics.stats.unavailables)

            # Test write
            query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s",
                                    consistency_level=ConsistencyLevel.ALL)
            self.assertRaises(Unavailable, session.execute, query, {'k': 1})
            self.assertEqual(2, cluster.metrics.stats.unavailables)
        finally:
            get_node(1).start(wait_other_notice=True,
                              wait_for_binary_proto=True)

        cluster.shutdown()
示例#16
0
    def test_unavailable(self):
        """
        Trigger and ensure unavailables are counted
        Write a key, value pair. Stop a node with the coordinator node knowing about the "DOWN" state.
        Attempt an insert/read at cl.ALL and receive a Unavailable Exception.
        """

        # Test write
        self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1",
                                consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(self.session, query)
        self.assertTrue(results)

        # Stop node gracefully
        # Sometimes this commands continues with the other nodes having not noticed
        # 1 is down, and a Timeout error is returned instead of an Unavailable
        get_node(1).stop(wait=True, wait_other_notice=True)
        time.sleep(5)
        try:
            # Test write
            query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                self.session.execute(query)
            self.assertEqual(self.cluster.metrics.stats.unavailables, 1)

            # Test write
            query = SimpleStatement("SELECT * FROM test",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                self.session.execute(query, timeout=None)
            self.assertEqual(self.cluster.metrics.stats.unavailables, 2)
        finally:
            get_node(1).start(wait_other_notice=True,
                              wait_for_binary_proto=True)
            # Give some time for the cluster to come back up, for the next test
            time.sleep(5)

        self.cluster.shutdown()
示例#17
0
    def test_removed_node_stops_reconnecting(self):
        """ Ensure we stop reconnecting after a node is removed. PYTHON-1181 """
        use_cluster("test_down_then_removed", [3], start=True)

        state_listener = StateListener()
        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        self.addCleanup(cluster.shutdown)
        cluster.register_listener(state_listener)
        session = cluster.connect(wait_for_all_pools=True)

        get_node(3).nodetool("disablebinary")

        wait_until(condition=lambda: state_listener.downed_host is not None, delay=2, max_attempts=50)
        self.assertTrue(state_listener.downed_host.is_currently_reconnecting())

        decommission(3)

        wait_until(condition=lambda: state_listener.removed_host is not None, delay=2, max_attempts=50)
        self.assertIs(state_listener.downed_host, state_listener.removed_host)  # Just a sanity check
        self.assertFalse(state_listener.removed_host.is_currently_reconnecting())
示例#18
0
def decommission(node):
    if (DSE_VERSION and DSE_VERSION >= Version("5.1")) or CASSANDRA_VERSION >= Version("4.0"):
        # CASSANDRA-12510
        get_node(node).decommission(force=True)
    else:
        get_node(node).decommission()
    get_node(node).stop()
示例#19
0
    def test_write_timeout(self):
        """
        Trigger and ensure write_timeouts are counted
        Write a key, value pair. Force kill a node without waiting for the cluster to register the death.
        Attempt a write at cl.ALL and receive a WriteTimeout.
        """

        cluster = Cluster(metrics_enabled=True,
                          protocol_version=PROTOCOL_VERSION)
        session = cluster.connect()

        # Test write
        session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s",
                                consistency_level=ConsistencyLevel.ALL)
        results = session.execute(query, {'k': 1})
        self.assertEqual(1, results[0].v)

        # Force kill ccm node
        get_node(1).stop(wait=False, gently=False)

        try:
            # Test write
            query = SimpleStatement(
                "INSERT INTO test3rf.test (k, v) VALUES (2, 2)",
                consistency_level=ConsistencyLevel.ALL)
            self.assertRaises(WriteTimeout,
                              session.execute,
                              query,
                              timeout=None)
            self.assertEqual(1, cluster.metrics.stats.write_timeouts)

        finally:
            get_node(1).start(wait_other_notice=True,
                              wait_for_binary_proto=True)

        cluster.shutdown()
示例#20
0
    def test_unavailable(self):
        """
        Trigger and ensure unavailables are counted
        Write a key, value pair. Stop a node with the coordinator node knowing about the "DOWN" state.
        Attempt an insert/read at cl.ALL and receive a Unavailable Exception.
        """

        cluster = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION)
        session = cluster.connect("test3rf")

        # Test write
        session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(session, query)
        self.assertTrue(results)

        # Stop node gracefully
        get_node(1).stop(wait=True, wait_other_notice=True)

        try:
            # Test write
            query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                session.execute(query)
            self.assertEqual(1, cluster.metrics.stats.unavailables)

            # Test write
            query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                session.execute(query, timeout=None)
            self.assertEqual(2, cluster.metrics.stats.unavailables)
        finally:
            get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True)
            # Give some time for the cluster to come back up, for the next test
            time.sleep(5)

        cluster.shutdown()
示例#21
0
    def test_unavailable(self):
        """
        Trigger and ensure unavailables are counted
        Write a key, value pair. Stop a node with the coordinator node knowing about the "DOWN" state.
        Attempt an insert/read at cl.ALL and receive a Unavailable Exception.
        """

        # Test write
        self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1", consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(self.session, query)
        self.assertTrue(results)

        # Stop node gracefully
        # Sometimes this commands continues with the other nodes having not noticed
        # 1 is down, and a Timeout error is returned instead of an Unavailable
        get_node(1).stop(wait=True, wait_other_notice=True)
        time.sleep(5)
        try:
            # Test write
            query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                self.session.execute(query)
            self.assertEqual(self.cluster.metrics.stats.unavailables, 1)

            # Test write
            query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(Unavailable):
                self.session.execute(query, timeout=None)
            self.assertEqual(self.cluster.metrics.stats.unavailables, 2)
        finally:
            get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True)
            # Give some time for the cluster to come back up, for the next test
            time.sleep(5)

        self.cluster.shutdown()
示例#22
0
def use_cluster_with_graph(num_nodes):
    """
    This is a  work around to account for the fact that spark nodes will conflict over master assignment
    when started all at once.
    """
    if USE_CASS_EXTERNAL:
        set_default_dse_ip()
        return

    # Create the cluster but don't start it.
    use_singledc(start=False, workloads=['graph', 'spark'])
    # Start first node.
    get_node(1).start(wait_for_binary_proto=True)
    # Wait binary protocol port to open
    wait_for_node_socket(get_node(1), 120)
    # Wait for spark master to start up
    spark_master_http = ("localhost", 7080)
    common.check_socket_listening(spark_master_http, timeout=60)
    tmp_cluster = Cluster(protocol_version=PROTOCOL_VERSION)

    # Start up remaining nodes.
    try:
        session = tmp_cluster.connect()
        statement = "ALTER KEYSPACE dse_leases WITH REPLICATION = {'class': 'NetworkTopologyStrategy', 'dc1': '%d'}" % (
            num_nodes)
        session.execute(statement)
    finally:
        tmp_cluster.shutdown()

    for i in range(1, num_nodes + 1):
        if i is not 1:
            node = get_node(i)
            node.start(wait_for_binary_proto=True)
            wait_for_node_socket(node, 120)

    # Wait for workers to show up as Alive on master
    wait_for_spark_workers(3, 120)
示例#23
0
    def test_raise_error_on_control_connection_timeout(self):
        """
        Test for initial control connection timeout

        test_raise_error_on_control_connection_timeout tests that the driver times out after the set initial connection
        timeout. It first pauses node1, essentially making it unreachable. It then attempts to create a Cluster object
        via connecting to node1 with a timeout of 1 second, and ensures that a NoHostAvailable is raised, along with
        an OperationTimedOut for 1 second.

        @expected_errors NoHostAvailable When node1 is paused, and a connection attempt is made.
        @since 2.6.0
        @jira_ticket PYTHON-206
        @expected_result NoHostAvailable exception should be raised after 1 second.

        @test_category connection
        """

        get_node(1).pause()
        cluster = Cluster(contact_points=['127.0.0.1'], protocol_version=PROTOCOL_VERSION, connect_timeout=1)

        with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"):
            cluster.connect()

        get_node(1).resume()
示例#24
0
def wait_for_up(cluster, node):
    tries = 0
    addr = IP_FORMAT % node
    while tries < 100:
        host = cluster.metadata.get_host(addr)
        if host and host.is_up:
            wait_for_node_socket(get_node(node), 60)
            log.debug("Done waiting for node %s to be up", node)
            return
        else:
            log.debug("Host {} is still marked down, waiting".format(addr))
            tries += 1
            time.sleep(1)

    # todo: don't mix string interpolation methods in the same package
    raise RuntimeError("Host {0} is not up after {1} attempts".format(addr, tries))
示例#25
0
    def setUp(self):
        """
        Setup sessions and pause node1
        """

        # self.node1, self.node2, self.node3 = get_cluster().nodes.values()
        self.node1 = get_node(1)
        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        self.session = self.cluster.connect()

        ddl = '''
            CREATE TABLE test3rf.timeout (
                k int PRIMARY KEY,
                v int )'''
        self.session.execute(ddl)
        self.node1.pause()
def decommission(node):
    try:
        get_node(node).decommission()
    except ToolError as e:
        expected_errs = (('Not enough live nodes to maintain replication '
                          'factor in keyspace system_distributed'),
                         'Perform a forceful decommission to ignore.')
        for err in expected_errs:
            assert_in(err, e.stdout)
        # in this case, we're running against a C* version with CASSANDRA-12510
        # applied and need to decommission with `--force`
        get_node(node).decommission(force=True)
    get_node(node).stop()
    def setUp(self):
        """
        Setup sessions and pause node1
        """
        self.cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(
                    load_balancing_policy=WhiteListRoundRobinPolicy(
                        ['127.0.0.1']))
            })
        self.session = self.cluster.connect()

        ddl = '''
            CREATE TABLE test3rf.timeout (
                k int PRIMARY KEY,
                v int )'''
        self.session.execute(ddl)
        self.node1 = get_node(1)
        self.node1.pause()
示例#28
0
    def test_blocking_connections(self):
        """
        Verify that reconnection is working as expected, when connection are being blocked.
        """
        res = run('which iptables'.split(' '))
        if not res.returncode == 0:
            self.skipTest("iptables isn't installed")

        self.create_ks_and_cf()
        self.create_data(self.session)
        self.query_data(self.session)

        node1_ip_address, node1_port = get_node(1).network_interfaces['binary']

        def remove_iptables():
            run((
                'sudo iptables -t filter -D INPUT -p tcp --dport {node1_port} '
                '--destination {node1_ip_address}/32 -j REJECT --reject-with icmp-port-unreachable'
            ).format(node1_ip_address=node1_ip_address,
                     node1_port=node1_port).split(' '))

        self.addCleanup(remove_iptables)

        for i in range(3):
            run((
                'sudo iptables -t filter -A INPUT -p tcp --dport {node1_port} '
                '--destination {node1_ip_address}/32 -j REJECT --reject-with icmp-port-unreachable'
            ).format(node1_ip_address=node1_ip_address,
                     node1_port=node1_port).split(' '))
            time.sleep(5)
            try:
                self.query_data(self.session, verify_in_tracing=False)
            except OperationTimedOut:
                pass
            remove_iptables()
            time.sleep(5)
            self.query_data(self.session, verify_in_tracing=False)

        self.query_data(self.session)
    def setUp(self):
        """
        Setup sessions and pause node1
        """

        # self.node1, self.node2, self.node3 = get_cluster().nodes.values()

        node1 = ExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        self.cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={EXEC_PROFILE_DEFAULT: node1})
        self.session = self.cluster.connect(wait_for_all_pools=True)

        self.control_connection_host_number = 1
        self.node_to_stop = get_node(self.control_connection_host_number)

        ddl = '''
            CREATE TABLE test3rf.timeout (
                k int PRIMARY KEY,
                v int )'''
        self.session.execute(ddl)
        self.node_to_stop.pause()
    def setUp(self):
        """
        Setup sessions and pause node1
        """

        # self.node1, self.node2, self.node3 = get_cluster().nodes.values()

        node1 = ExecutionProfile(
            load_balancing_policy=HostFilterPolicy(
                RoundRobinPolicy(), lambda host: host.address == "127.0.0.1"
            )
        )
        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={EXEC_PROFILE_DEFAULT: node1})
        self.session = self.cluster.connect(wait_for_all_pools=True)

        self.control_connection_host_number = 1
        self.node_to_stop = get_node(self.control_connection_host_number)

        ddl = '''
            CREATE TABLE test3rf.timeout (
                k int PRIMARY KEY,
                v int )'''
        self.session.execute(ddl)
        self.node_to_stop.pause()
示例#31
0
def ring(node):
    get_node(node).nodetool('ring')
示例#32
0
def ring(node):
    print 'From node%s:' % node
    get_node(node).nodetool('ring')
示例#33
0
def force_stop(node):
    log.debug("Forcing stop of node %s", node)
    get_node(node).stop(wait=False, gently=False)
    log.debug("Node %s was stopped", node)
示例#34
0
def stop(node):
    get_node(node).stop()
示例#35
0
def start(node):
    get_node(node).start()
示例#36
0
def force_stop(node):
    get_node(node).stop(wait=False, gently=False)
示例#37
0
def ring(node):
    print("From node%s:" % node)
    get_node(node).nodetool("ring")
示例#38
0
def decommission(node):
    get_node(node).decommission()
    get_node(node).stop()
示例#39
0
def force_stop(node):
    get_node(node).stop(wait=False, gently=False)
示例#40
0
def decommission(node):
    if (DSE_VERSION >= "5.1"):
        get_node(node).decommission(force=True)
    else:
        get_node(node).decommission()
    get_node(node).stop()
示例#41
0
def ring(node):
    get_node(node).nodetool('ring')