def _test_insert_query_from_node(
        self, session, dc_idx, rf_factors, num_nodes_alive, write_cl, read_cl, serial_cl=None, check_ret=True
    ):
        """
        Test availability for read and write via the session passed in as a prameter.
        """
        cluster = self.cluster

        self.log(
            "Connected to %s for %s/%s/%s"
            % (session.cluster.contact_points, self._name(write_cl), self._name(read_cl), self._name(serial_cl))
        )

        start = 0
        end = 100
        age = 30

        if self._should_succeed(write_cl, rf_factors, num_nodes_alive, dc_idx):
            for n in xrange(start, end):
                self.insert_user(session, n, age, write_cl, serial_cl)
        else:
            assert_unavailable(self.insert_user, session, end, age, write_cl, serial_cl)

        if self._should_succeed(read_cl, rf_factors, num_nodes_alive, dc_idx):
            for n in xrange(start, end):
                self.query_user(session, n, age, read_cl, check_ret)
        else:
            assert_unavailable(self.query_user, session, end, age, read_cl, check_ret)
Example #2
0
    def replica_availability_test(self):
        """
        @jira_ticket CASSANDRA-8640

        Regression test for a bug (CASSANDRA-8640) that required all nodes to
        be available in order to run LWT queries, even if the query could
        complete correctly with quorum nodes available.
        """
        session = self.prepare(nodes=3, rf=3)
        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
        session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS")

        self.cluster.nodelist()[2].stop()
        session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS")

        self.cluster.nodelist()[1].stop()
        assert_unavailable(
            session.execute,
            "INSERT INTO test (k, v) VALUES (2, 2) IF NOT EXISTS")

        self.cluster.nodelist()[1].start(wait_for_binary_proto=True,
                                         wait_other_notice=True)
        session.execute("INSERT INTO test (k, v) VALUES (3, 3) IF NOT EXISTS")

        self.cluster.nodelist()[2].start(wait_for_binary_proto=True)
        session.execute("INSERT INTO test (k, v) VALUES (4, 4) IF NOT EXISTS")
    def index_query_test(self):
        """
        Check that a secondary index query times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"])  # see above for explanation
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test3 (
                id int PRIMARY KEY,
                col int,
                val text
            );
        """)

        session.execute("CREATE INDEX ON test3 (col)")

        for i in xrange(500):
            session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))

        mark = node.mark_log()
        assert_unavailable(lambda c: debug(c.execute("SELECT * from test3 WHERE col < 50 ALLOW FILTERING")), session)
        node.watch_log_for("<SELECT \* FROM ks.test3 WHERE col < 50 (.*)> timed out", from_mark=mark, timeout=30)
    def local_query_test(self):
        """
        Check that a query running on the local coordinator node times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
        # iteration of non system queries, so that these queries take much longer to complete,
        # see ReadCommand.withStateTracking()
        cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"])
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test1 (
                id int PRIMARY KEY,
                val text
            );
        """)

        for i in xrange(500):
            session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))

        mark = node.mark_log()
        assert_unavailable(lambda c: debug(c.execute("SELECT * from test1")), session)
        node.watch_log_for("<SELECT \* FROM ks.test1 (.*)> timed out", from_mark=mark, timeout=30)
    def local_query_test(self):
        """
        Check that a query running on the local coordinator node times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
        # iteration of non system queries, so that these queries take much longer to complete,
        # see ReadCommand.withStateTracking()
        cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                                                        "-Dcassandra.test.read_iteration_delay_ms=1500"])
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test1 (
                id int PRIMARY KEY,
                val text
            );
        """)

        for i in xrange(500):
            session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))

        mark = node.mark_log()
        statement = SimpleStatement("SELECT * from test1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)
        node.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
    def index_query_test(self):
        """
        Check that a secondary index query times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                                                        "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test3 (
                id int PRIMARY KEY,
                col int,
                val text
            );
        """)

        session.execute("CREATE INDEX ON test3 (col)")

        for i in xrange(500):
            session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))

        mark = node.mark_log()
        statement = session.prepare("SELECT * from test3 WHERE col < ? ALLOW FILTERING")
        statement.consistency_level = ConsistencyLevel.ONE
        statement.retry_policy = FallthroughRetryPolicy()
        assert_unavailable(lambda c: debug(c.execute(statement, [50])), session)
        node.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
    def remote_query_test(self):
        """
        Check that a query running on a node other than the coordinator times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, jvm_args=["-Djoin_ring=false"])  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test2 (
                id int PRIMARY KEY,
                val text
            );
        """)

        for i in xrange(500):
            session.execute("INSERT INTO test2 (id, val) VALUES ({}, 'foo')".format(i))

        mark = node2.mark_log()
        assert_unavailable(lambda c: debug(c.execute("SELECT * from test2")), session)
        node2.watch_log_for("<SELECT \* FROM ks.test2 (.*)> timed out", from_mark=mark, timeout=30)
    def _test_insert_query_from_node(self,
                                     session,
                                     dc_idx,
                                     rf_factors,
                                     num_nodes_alive,
                                     write_cl,
                                     read_cl,
                                     serial_cl=None,
                                     check_ret=True):
        """
        Test availability for read and write via the session passed in as a prameter.
        """
        cluster = self.cluster

        self.log("Connected to %s for %s/%s/%s" %
                 (session.cluster.contact_points, self._name(write_cl),
                  self._name(read_cl), self._name(serial_cl)))

        start = 0
        end = 100
        age = 30

        if self._should_succeed(write_cl, rf_factors, num_nodes_alive, dc_idx):
            for n in xrange(start, end):
                self.insert_user(session, n, age, write_cl, serial_cl)
        else:
            assert_unavailable(self.insert_user, session, end, age, write_cl,
                               serial_cl)

        if self._should_succeed(read_cl, rf_factors, num_nodes_alive, dc_idx):
            for n in xrange(start, end):
                self.query_user(session, n, age, read_cl, check_ret)
        else:
            assert_unavailable(self.query_user, session, end, age, read_cl,
                               check_ret)
    def materialized_view_test(self):
        """
        Check that a materialized view query times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                                          "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test4 (
                id int PRIMARY KEY,
                col int,
                val text
            );
        """)

        session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test4 "
                         "WHERE col IS NOT NULL AND id IS NOT NULL PRIMARY KEY (col, id)"))

        for i in xrange(50):
            session.execute("INSERT INTO test4 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))

        mark = node2.mark_log()
        statement = SimpleStatement("SELECT * FROM mv WHERE col = 50", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)
        node2.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
    def quorum_quorum_test(self):
        cluster = self.cluster

        cluster.populate(3).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor1 = self.patient_cql_connection(node1).cursor()
        self.create_ks(cursor1, 'ks', 3)
        create_c1c2_table(self, cursor1)

        cursor2 = self.patient_cql_connection(node2, 'ks').cursor()

        # insert and get at CL.QUORUM
        for n in xrange(0, 100):
            insert_c1c2(cursor1, n, "QUORUM")
            query_c1c2(cursor2, n, "QUORUM")

        # shutdown a node an test again
        node3.stop(wait_other_notice=True)
        for n in xrange(100, 200):
            insert_c1c2(cursor1, n, "QUORUM")
            query_c1c2(cursor2, n, "QUORUM")

        # shutdown another node and test we get unavailabe exception
        node2.stop(wait_other_notice=True)
        assert_unavailable(insert_c1c2, cursor1, 200, "QUORUM")
    def quorum_quorum_test(self):
        cluster = self.cluster

        cluster.populate(3).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor1 = self.patient_cql_connection(node1).cursor()
        self.create_ks(cursor1, "ks", 3)
        create_c1c2_table(self, cursor1)

        cursor2 = self.patient_cql_connection(node2, "ks").cursor()

        # insert and get at CL.QUORUM
        for n in xrange(0, 100):
            insert_c1c2(cursor1, n, "QUORUM")
            query_c1c2(cursor2, n, "QUORUM")

        # shutdown a node an test again
        node3.stop(wait_other_notice=True)
        for n in xrange(100, 200):
            insert_c1c2(cursor1, n, "QUORUM")
            query_c1c2(cursor2, n, "QUORUM")

        # shutdown another node and test we get unavailabe exception
        node2.stop(wait_other_notice=True)
        assert_unavailable(insert_c1c2, cursor1, 200, "QUORUM")
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(
            **
            {'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unavailable("Dummy Unavailabile message.")
        })
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{
            'execute.side_effect':
            InvalidRequest("Dummy InvalidRequest message.")
        })
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unauthorized("Dummy Unauthorized message.")
        })
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i]
                                                  for i in range(0, 10)])
        assert_all(mock_session,
                   "SELECT k, v FROM test", [[i, i] for i in range(0, 10)],
                   ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
    def quorum_quorum_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        for n in xrange(0, 100):
            insert_c1c2(session, n, ConsistencyLevel.QUORUM)
            query_c1c2(session2, n, ConsistencyLevel.QUORUM)

        self.cluster.nodelist()[1].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.QUORUM)
    def simple_repair_test(self):
        """
        Test that a materialized view are consistent after a simple repair.
        """

        session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
        node1, node2, node3 = self.cluster.nodelist()

        session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
        session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                         "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))

        session.cluster.control_connection.wait_for_schema_agreement()

        debug('Shutdown node2')
        node2.stop(wait_other_notice=True)

        for i in xrange(1000):
            session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))

        self._replay_batchlogs()

        debug('Verify the data in the MV with CL=ONE')
        for i in xrange(1000):
            assert_one(
                session,
                "SELECT * FROM t_by_v WHERE v = {}".format(i),
                [i, i, 'a', 3.0]
            )

        debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
        for i in xrange(1000):
            statement = SimpleStatement(
                "SELECT * FROM t_by_v WHERE v = {}".format(i),
                consistency_level=ConsistencyLevel.ALL
            )

            assert_unavailable(
                session.execute,
                statement
            )

        debug('Start node2, and repair')
        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
        node1.repair()

        debug('Verify the data in the MV with CL=ONE. All should be available now.')
        for i in xrange(1000):
            assert_one(
                session,
                "SELECT * FROM t_by_v WHERE v = {}".format(i),
                [i, i, 'a', 3.0],
                cl=ConsistencyLevel.ONE
            )
 def logged_batch_throws_uae_test(self):
     """ Test that logged batch throws UAE if there aren't enough live nodes """
     cursor = self.prepare(nodes=3)
     [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]
     cursor.consistency_level = 'ONE'
     assert_unavailable(cursor.execute, """
         BEGIN BATCH
         INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
         INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')
         APPLY BATCH
     """)
Example #16
0
 def logged_batch_throws_uae_test(self):
     """ Test that logged batch throws UAE if there aren't enough live nodes """
     session = self.prepare(nodes=3)
     [node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:]]
     session.consistency_level = 'ONE'
     assert_unavailable(session.execute, """
         BEGIN BATCH
         INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
         INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')
         APPLY BATCH
     """)
    def all_one_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.ALL, ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[2].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.ALL)
        for n in xrange(0, 100):
            query_c1c2(session2, n, ConsistencyLevel.ONE)

        #Stop a node and retest
        self.cluster.nodelist()[1].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.ALL)
        for n in xrange(0, 100):
            query_c1c2(session2, n, ConsistencyLevel.ONE)
Example #18
0
    def ttl_is_respected_on_repair_test(self):
        """ Test that ttl is respected on repair """

        self.prepare()
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 1};
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
        """)

        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None], [2, 2, None, None]]
        )
        time.sleep(7)
        self.node1.stop()
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session2.execute("USE ks;")
        assert_unavailable(session2.execute, "SELECT * FROM ttl_table;")
        self.node1.start(wait_for_binary_proto=True)
        self.session1 = self.patient_exclusive_cql_connection(self.node1)
        self.session1.execute("USE ks;")
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 2};
        """)
        self.node1.repair(['ks'])
        ttl_start = time.time()
        ttl_session1 = self.session1.execute('SELECT ttl(col1) FROM ttl_table;')
        self.node1.stop()

        assert_row_count(session2, 'ttl_table', 1)
        assert_all(
            session2,
            "SELECT * FROM ttl_table;",
            [[2, 2, None, None]]
        )

        # Check that the TTL on both server are the same
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session1 = ttl_session1[0][0] - (time.time() - ttl_start)
        assert_almost_equal(ttl_session1, ttl_session2[0][0], error=0.005)
Example #19
0
    def replica_availability_test(self):
        #See CASSANDRA-8640
        session = self.prepare(nodes=3, rf=3)
        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
        session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS")

        self.cluster.nodelist()[2].stop()
        session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS")

        self.cluster.nodelist()[1].stop()
        assert_unavailable(session.execute, "INSERT INTO test (k, v) VALUES (2, 2) IF NOT EXISTS")

        self.cluster.nodelist()[1].start(wait_for_binary_proto=True, wait_other_notice=True)
        session.execute("INSERT INTO test (k, v) VALUES (3, 3) IF NOT EXISTS")

        self.cluster.nodelist()[2].start(wait_for_binary_proto=True)
        session.execute("INSERT INTO test (k, v) VALUES (4, 4) IF NOT EXISTS")
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{'execute.side_effect': Unavailable("Dummy Unavailabile message.")})
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.")})
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{'execute.side_effect': Unauthorized("Dummy Unauthorized message.")})
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)])
        assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
Example #21
0
    def local_query_test(self):
        """
        Check that a query running on the local coordinator node times out:

        - set a 1-second read timeout
        - start the cluster with read_iteration_delay set to 1.5 seconds
            - (this will cause read queries to take longer than the read timeout)
        - CREATE and INSERT into a table
        - SELECT * from the table using a retry policy that never retries, and assert it times out

        @jira_ticket CASSANDRA-7392
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
        # iteration of non system queries, so that these queries take much longer to complete,
        # see ReadCommand.withStateTracking()
        cluster.populate(1).start(wait_for_binary_proto=True,
                                  jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                            "-Dcassandra.test.read_iteration_delay_ms=1500"])
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test1 (
                id int PRIMARY KEY,
                val text
            );
        """)

        for i in range(500):
            session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))

        mark = node.mark_log()
        statement = SimpleStatement("SELECT * from test1",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)
        node.watch_log_for("operations timed out", from_mark=mark, timeout=60)
Example #22
0
    def ttl_is_respected_on_repair_test(self):
        """ Test that ttl is respected on repair """

        self.prepare()
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 1};
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
        """)

        assert_all(self.session1, "SELECT * FROM ttl_table;",
                   [[1, 1, None, None], [2, 2, None, None]])
        time.sleep(7)
        self.node1.stop()
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session2.execute("USE ks;")
        assert_unavailable(session2.execute, "SELECT * FROM ttl_table;")
        self.node1.start(wait_for_binary_proto=True)
        self.session1 = self.patient_exclusive_cql_connection(self.node1)
        self.session1.execute("USE ks;")
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 2};
        """)
        self.node1.repair(['ks'])
        ttl_start = time.time()
        ttl_session1 = self.session1.execute(
            'SELECT ttl(col1) FROM ttl_table;')
        self.node1.stop()

        assert_row_count(session2, 'ttl_table', 1)
        assert_all(session2, "SELECT * FROM ttl_table;", [[2, 2, None, None]])

        # Check that the TTL on both server are the same
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session1 = ttl_session1[0][0] - (time.time() - ttl_start)
        assert_almost_equal(ttl_session1, ttl_session2[0][0], error=0.005)
    def all_one_test(self):
        cluster = self.cluster

        cluster.populate(3).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor1 = self.patient_cql_connection(node1).cursor()
        self.create_ks(cursor1, 'ks', 3)
        create_c1c2_table(self, cursor1)

        cursor2 = self.patient_cql_connection(node2, 'ks').cursor()

        # insert and get at CL.ONE
        for n in xrange(0, 100):
            insert_c1c2(cursor1, n, "ALL")
            query_c1c2(cursor2, n, "ONE")

        # shutdown a node an test again
        node3.stop(wait_other_notice=True)
        assert_unavailable(insert_c1c2, cursor1, 100, "ALL")
    def all_one_test(self):
        cluster = self.cluster

        cluster.populate(3).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor1 = self.patient_cql_connection(node1).cursor()
        self.create_ks(cursor1, "ks", 3)
        create_c1c2_table(self, cursor1)

        cursor2 = self.patient_cql_connection(node2, "ks").cursor()

        # insert and get at CL.ONE
        for n in xrange(0, 100):
            insert_c1c2(cursor1, n, "ALL")
            query_c1c2(cursor2, n, "ONE")

        # shutdown a node an test again
        node3.stop(wait_other_notice=True)
        assert_unavailable(insert_c1c2, cursor1, 100, "ALL")
    def all_all_test(self):
        cluster = self.cluster

        cluster.populate(3).start()
        [node1, node2, node3] = cluster.nodelist()

        cursor1 = self.cql_connection(node1).cursor()
        self.create_ks(cursor1, 'ks', 3)
        create_c1c2_table(self, cursor1)

        cursor2 = self.cql_connection(node2, 'ks').cursor()

        # insert and get at CL.ALL
        for n in xrange(0, 100):
            insert_c1c2(cursor1, n, "ALL")
            query_c1c2(cursor2, n, "ALL")

        # shutdown one node and test we get unavailabe exception
        node3.stop(wait_other_notice=True)
        assert_unavailable(insert_c1c2, cursor1, 100, "ALL")
Example #26
0
    def replica_availability_test(self):
        """
        @jira_ticket CASSANDRA-8640

        Regression test for a bug (CASSANDRA-8640) that required all nodes to
        be available in order to run LWT queries, even if the query could
        complete correctly with quorum nodes available.
        """
        session = self.prepare(nodes=3, rf=3)
        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
        session.execute("INSERT INTO test (k, v) VALUES (0, 0) IF NOT EXISTS")

        self.cluster.nodelist()[2].stop()
        session.execute("INSERT INTO test (k, v) VALUES (1, 1) IF NOT EXISTS")

        self.cluster.nodelist()[1].stop()
        assert_unavailable(session.execute, "INSERT INTO test (k, v) VALUES (2, 2) IF NOT EXISTS")

        self.cluster.nodelist()[1].start(wait_for_binary_proto=True, wait_other_notice=True)
        session.execute("INSERT INTO test (k, v) VALUES (3, 3) IF NOT EXISTS")

        self.cluster.nodelist()[2].start(wait_for_binary_proto=True)
        session.execute("INSERT INTO test (k, v) VALUES (4, 4) IF NOT EXISTS")
    def remote_query_test(self):
        """
        Check that a query running on a node other than the coordinator times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                                          "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test2 (
                id int,
                col int,
                val text,
                PRIMARY KEY(id, col)
            );
        """)

        for i in xrange(500):
            for j in xrange(10):
                session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))

        mark = node2.mark_log()

        statement = SimpleStatement("SELECT * from test2", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id = 1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id IN (1, 10,  20) AND col < 10", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where col > 5 ALLOW FILTERING", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        node2.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
Example #28
0
    def remote_query_test(self):
        """
        Check that a query running on a node other than the coordinator times out:

        - populate the cluster with 2 nodes
        - set a 1-second read timeout
        - start one node without having it join the ring
        - start the other node with read_iteration_delay set to 1.5 seconds
            - (this will cause read queries to take longer than the read timeout)
        - CREATE a table
        - INSERT 5000 rows on a session on the node that is not a member of the ring
        - run SELECT statements and assert they fail
        # TODO refactor SELECT statements:
        #        - run the statements in a loop to reduce duplication
        #        - watch the log after each query
        #        - assert we raise the right error
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True,
                    jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                              "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test2 (
                id int,
                col int,
                val text,
                PRIMARY KEY(id, col)
            );
        """)

        for i, j in itertools.product(range(500), range(10)):
            session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))

        mark = node2.mark_log()

        statement = SimpleStatement("SELECT * from test2",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id = 1",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id IN (1, 10,  20) AND col < 10",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where col > 5 ALLOW FILTERING",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        node2.watch_log_for("operations timed out", from_mark=mark, timeout=60)
Example #29
0
    def remote_query_test(self):
        """
        Check that a query running on a node other than the coordinator times out:

        - populate the cluster with 2 nodes
        - set a 1-second read timeout
        - start one node without having it join the ring
        - start the other node with read_iteration_delay set to 1.5 seconds
            - (this will cause read queries to take longer than the read timeout)
        - CREATE a table
        - INSERT 5000 rows on a session on the node that is not a member of the ring
        - run SELECT statements and assert they fail
        # TODO refactor SELECT statements:
        #        - run the statements in a loop to reduce duplication
        #        - watch the log after each query
        #        - assert we raise the right error
        """
        cluster = self.cluster
        cluster.set_configuration_options(
            values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True,
                    join_ring=False)  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True,
                    jvm_args=[
                        "-Dcassandra.monitoring_check_interval_ms=50",
                        "-Dcassandra.test.read_iteration_delay_ms=1500"
                    ])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test2 (
                id int,
                col int,
                val text,
                PRIMARY KEY(id, col)
            );
        """)

        for i, j in itertools.product(range(500), range(10)):
            session.execute(
                "INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".
                format(i, j))

        mark = node2.mark_log()

        statement = SimpleStatement("SELECT * from test2",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id = 1",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement(
            "SELECT * from test2 where id IN (1, 10,  20) AND col < 10",
            consistency_level=ConsistencyLevel.ONE,
            retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement(
            "SELECT * from test2 where col > 5 ALLOW FILTERING",
            consistency_level=ConsistencyLevel.ONE,
            retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        node2.watch_log_for("operations timed out", from_mark=mark, timeout=60)
    def all_all_test(self):
        session, session2 = self.cl_cl_prepare(ConsistencyLevel.ALL, ConsistencyLevel.ALL)

        self.cluster.nodelist()[2].stop()
        assert_unavailable(insert_c1c2, session, 100, ConsistencyLevel.ALL)