def _assert_none(self, query):
     """
     Assert query returns nothing.
     @param query Query to run
     """
     decorated_query = self._decorate_query(query)
     assert_none(self.session, decorated_query)
示例#2
0
    def short_read_delete_test(self):
        """ Test short reads ultimately leaving no columns alive [#4000] """
        cluster = self.cluster

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(2).start(wait_other_notice=True)
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        create_cf(session, 'cf', read_repair=0.0)
        # insert 2 columns in one row
        insert_columns(self, session, 0, 2)

        # Delete the row while first node is dead
        node1.flush()
        node1.stop(wait_other_notice=True)
        session = self.patient_cql_connection(node2, 'ks')

        query = SimpleStatement('DELETE FROM cf WHERE key=\'k0\'', consistency_level=ConsistencyLevel.ONE)
        session.execute(query)

        node1.start(wait_other_notice=True)

        # Query first column
        session = self.patient_cql_connection(node1, 'ks')

        assert_none(session, "SELECT c, v FROM cf WHERE key=\'k0\' LIMIT 1", cl=ConsistencyLevel.QUORUM)
    def invalid_entries_removed_from_size_estimates_on_restart_test(self):
        """
        Entries for dropped tables/keyspaces should be cleared from size_estimates on restart.

        @jira_ticket CASSANDRA-14905
        """
        cluster = self.cluster
        cluster.populate(1).start()
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)
        session.execute("USE system;")
        session.execute(
            "INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ( 'system_auth', 'bad_table', '-5', '5', 0, 0);"
        )
        # Invalid keyspace and table
        session.execute(
            "INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ( 'bad_keyspace', 'bad_table', '-5', '5', 0, 0);"
        )
        node.stop()
        node.start()
        session = self.patient_cql_connection(node)
        assert_none(
            session,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='system_auth' AND table_name='bad_table'"
        )
        assert_none(
            session,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='bad_keyspace'"
        )
示例#4
0
    def test_ttl_is_replicated(self):
        """
        Test that the ttl setting is replicated properly on all nodes
        """
        self.prepare(default_time_to_live=5)
        session1 = self.patient_exclusive_cql_connection(self.node1)
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session1.execute("USE ks;")
        session2.execute("USE ks;")
        query = SimpleStatement(
            "INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
            consistency_level=ConsistencyLevel.ALL)
        session1.execute(query)
        assert_all(session1,
                   "SELECT * FROM ttl_table;", [[1, 1, None, None]],
                   cl=ConsistencyLevel.ALL)
        ttl_session1 = session1.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')

        # since the two queries are not executed simultaneously, the remaining
        # TTLs can differ by one second
        assert abs(ttl_session1[0][0] - ttl_session2[0][0]) <= 1

        time.sleep(7)

        assert_none(session1,
                    "SELECT * FROM ttl_table;",
                    cl=ConsistencyLevel.ALL)
    def short_read_delete_test(self):
        """ Test short reads ultimately leaving no columns alive [#4000] """
        cluster = self.cluster

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(2).start(wait_other_notice=True)
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        create_cf(session, 'cf', read_repair=0.0)
        # insert 2 columns in one row
        insert_columns(self, session, 0, 2)

        # Delete the row while first node is dead
        node1.flush()
        node1.stop(wait_other_notice=True)
        session = self.patient_cql_connection(node2, 'ks')

        query = SimpleStatement('DELETE FROM cf WHERE key=\'k0\'', consistency_level=ConsistencyLevel.ONE)
        session.execute(query)

        node1.start(wait_other_notice=True)

        # Query first column
        session = self.patient_cql_connection(node1, 'ks')

        assert_none(session, "SELECT c, v FROM cf WHERE key=\'k0\' LIMIT 1", cl=ConsistencyLevel.QUORUM)
示例#6
0
 def test_refresh_size_estimates_clears_invalid_entries(self):
     """
     @jira_ticket CASSANDRA-14905
      nodetool refreshsizeestimates should clear up entries for tables that no longer exist
     """
     cluster = self.cluster
     cluster.populate(1)
     node = cluster.nodelist()[0]
     cluster.start()
     session = self.patient_exclusive_cql_connection(node)
     session.execute("USE system;")
     # Valid keyspace but invalid table
     session.execute(
         "INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ('system_auth', 'bad_table', '-5', '5', 0, 0);"
     )
     # Invalid keyspace and table
     session.execute(
         "INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ('bad_keyspace', 'bad_table', '-5', '5', 0, 0);"
     )
     node.nodetool('refreshsizeestimates')
     assert_none(
         session,
         "SELECT * FROM size_estimates WHERE keyspace_name='system_auth' AND table_name='bad_table'"
     )
     assert_none(
         session,
         "SELECT * FROM size_estimates WHERE keyspace_name='bad_keyspace'")
    def test_ttl_is_replicated(self):
        """
        Test that the ttl setting is replicated properly on all nodes
        """
        self.prepare(default_time_to_live=5)
        session1 = self.patient_exclusive_cql_connection(self.node1)
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session1.execute("USE ks;")
        session2.execute("USE ks;")
        query = SimpleStatement(
            "INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
            consistency_level=ConsistencyLevel.ALL
        )
        session1.execute(query)
        assert_all(
            session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None]],
            cl=ConsistencyLevel.ALL
        )
        ttl_session1 = session1.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')

        # since the two queries are not executed simultaneously, the remaining
        # TTLs can differ by one second
        assert abs(ttl_session1[0][0] - ttl_session2[0][0]) <= 1

        time.sleep(7)

        assert_none(session1, "SELECT * FROM ttl_table;", cl=ConsistencyLevel.ALL)
    def test_assertions(self):
        # assert_exception_test
        mock_session = Mock(
            **
            {'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unavailable("Dummy Unavailabile message.")
        })
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{
            'execute.side_effect':
            InvalidRequest("Dummy InvalidRequest message.")
        })
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unauthorized("Dummy Unauthorized message.")
        })
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i]
                                                  for i in range(0, 10)])
        assert_all(mock_session,
                   "SELECT k, v FROM test", [[i, i] for i in range(0, 10)],
                   ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
示例#9
0
    def _cleanup_when_no_replica(self, with_index=False):
        """
        @jira_ticket CASSANDRA-13526
        Test nodetool cleanup KS to remove old data when new replicas in current node instead of directly returning success.
        """
        self.cluster.populate([1, 1]).start(wait_for_binary_proto=True, wait_other_notice=True)

        node_dc1 = self.cluster.nodelist()[0]
        node_dc2 = self.cluster.nodelist()[1]

        # init schema with rf on both data centers
        replication_factor = {'dc1': 1, 'dc2': 1}
        session = self.patient_exclusive_cql_connection(node_dc1, consistency_level=ConsistencyLevel.ALL)
        session_dc2 = self.patient_exclusive_cql_connection(node_dc2, consistency_level=ConsistencyLevel.LOCAL_ONE)
        create_ks(session, 'ks', replication_factor)
        session.execute('CREATE TABLE ks.cf (id int PRIMARY KEY, value text) with dclocal_read_repair_chance = 0 AND read_repair_chance = 0;', trace=False)
        if with_index:
            session.execute('CREATE INDEX value_by_key on ks.cf(value)', trace=False)

        # populate data
        for i in range(0, 100):
            session.execute(SimpleStatement("INSERT INTO ks.cf(id, value) VALUES({}, 'value');".format(i), consistency_level=ConsistencyLevel.ALL))

        # generate sstable
        self.cluster.flush()

        for node in self.cluster.nodelist():
            self.assertNotEqual(0, len(node.get_sstables('ks', 'cf')))
        if with_index:
            self.assertEqual(len(list(session_dc2.execute("SELECT * FROM ks.cf WHERE value = 'value'"))), 100)

        # alter rf to only dc1
        session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : 1, 'dc2' : 0};")

        # nodetool cleanup on dc2
        node_dc2.nodetool("cleanup ks cf")
        node_dc2.nodetool("compact ks cf")

        # check local data on dc2
        for node in self.cluster.nodelist():
            if node.data_center == 'dc2':
                self.assertEqual(0, len(node.get_sstables('ks', 'cf')))
            else:
                self.assertNotEqual(0, len(node.get_sstables('ks', 'cf')))

        # dc1 data remains
        statement = SimpleStatement("SELECT * FROM ks.cf", consistency_level=ConsistencyLevel.LOCAL_ONE)
        self.assertEqual(len(list(session.execute(statement))), 100)
        if with_index:
            statement = SimpleStatement("SELECT * FROM ks.cf WHERE value = 'value'", consistency_level=ConsistencyLevel.LOCAL_ONE)
            self.assertEqual(len(list(session.execute(statement))), 100)

        # alter rf back to query dc2, no data, no index
        session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : 0, 'dc2' : 1};")
        assert_none(session_dc2, "SELECT * FROM ks.cf")
        if with_index:
            assert_none(session_dc2, "SELECT * FROM ks.cf WHERE value = 'value'")
示例#10
0
    def short_read_quorum_delete_test(self):
        """
        @jira_ticket CASSANDRA-8933
        """
        cluster = self.cluster
        # Consider however 3 nodes A, B, C (RF=3), and following sequence of operations (all done at QUORUM):

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(
            values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(3).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)

        session.execute(
            "CREATE TABLE t (id int, v int, PRIMARY KEY(id, v)) WITH read_repair_chance = 0.0"
        )
        # we write 1 and 2 in a partition: all nodes get it.
        session.execute(
            SimpleStatement("INSERT INTO t (id, v) VALUES (0, 1)",
                            consistency_level=ConsistencyLevel.ALL))
        session.execute(
            SimpleStatement("INSERT INTO t (id, v) VALUES (0, 2)",
                            consistency_level=ConsistencyLevel.ALL))

        # we delete 1: only A and C get it.
        node2.flush()
        node2.stop(wait_other_notice=True)
        session.execute(
            SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 1",
                            consistency_level=ConsistencyLevel.QUORUM))
        node2.start(wait_other_notice=True)

        # we delete 2: only B and C get it.
        node1.flush()
        node1.stop(wait_other_notice=True)
        session = self.patient_cql_connection(node2, 'ks')
        session.execute(
            SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 2",
                            consistency_level=ConsistencyLevel.QUORUM))
        node1.start(wait_other_notice=True)
        session = self.patient_cql_connection(node1, 'ks')

        # we read the first row in the partition (so with a LIMIT 1) and A and B answer first.
        node3.flush()
        node3.stop(wait_other_notice=True)
        assert_none(session,
                    "SELECT * FROM t WHERE id = 0 LIMIT 1",
                    cl=ConsistencyLevel.QUORUM)
    def upgrade_with_wide_partition(self, query_modifier=""):
        ROWS = 100

        session = self._setup_cluster()

        session.execute(
            'CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))'
        )

        # the blob is only here to make the row bigger internally so it sometimes span multiple index blocks
        bigish_blob = "0x"
        for i in range(1000):
            bigish_blob = bigish_blob + "0000"

        for r in range(ROWS):
            session.execute(
                "INSERT INTO t(k, t, v1, v2, v3) VALUES ({}, {}, {}, {}, {{{}, {}}})"
                .format(0, r, r, bigish_blob, r * 2, r * 3))

        self.cluster.flush()

        # delete every other row
        for r in range(0, ROWS, 2):
            session.execute("DELETE FROM t WHERE k=0 AND t={}".format(r))

        # delete the set from every other remaining row
        for r in range(1, ROWS, 4):
            session.execute(
                "UPDATE t SET v3={{}} WHERE k=0 AND t={}".format(r))

        session = self._do_upgrade()

        for r in range(0, ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(
                r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)

        self.cluster.compact()

        for r in range(ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(
                r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)
    def test_upgrade_with_range_tombstone_ae(self):
        """
        Certain range tombstone pattern causes AssertionError when upgrade.
        This test makes sure it won't happeen.

        @jira_ticket CASSANDRA-12203
        """
        session = self._setup_cluster()
        session.execute('CREATE TABLE test (k ascii, c1 ascii, c2 int, c3 int, val text, PRIMARY KEY (k, c1, c2, c3))')
        session.execute("DELETE FROM ks.test WHERE k = 'a' AND c1 = 'a'")
        session.execute("DELETE FROM ks.test WHERE k = 'a' AND c1 = 'a' AND c2 = 1")
        session = self._do_upgrade()
        assert_none(session, "SELECT k FROM test")
示例#13
0
    def test_upgrade_with_range_tombstone_ae(self):
        """
        Certain range tombstone pattern causes AssertionError when upgrade.
        This test makes sure it won't happeen.

        @jira_ticket CASSANDRA-12203
        """
        session = self._setup_cluster()
        session.execute('CREATE TABLE test (k ascii, c1 ascii, c2 int, c3 int, val text, PRIMARY KEY (k, c1, c2, c3))')
        session.execute("DELETE FROM ks.test WHERE k = 'a' AND c1 = 'a'")
        session.execute("DELETE FROM ks.test WHERE k = 'a' AND c1 = 'a' AND c2 = 1")
        session = self._do_upgrade()
        assert_none(session, "SELECT k FROM test")
    def drop_table_reflected_in_size_estimates_test(self):
        """
        A dropped table should result in its entries being removed from size estimates, on both
        nodes that are up and down at the time of the drop.

        @jira_ticket CASSANDRA-14905
        """
        cluster = self.cluster
        cluster.populate(2).start()
        node1, node2 = cluster.nodelist()
        session = self.patient_exclusive_cql_connection(node1)
        create_ks(session, 'ks1', 2)
        create_ks(session, 'ks2', 2)
        create_cf(session, 'ks1.cf1', columns={'c1': 'text', 'c2': 'text'})
        create_cf(session, 'ks2.cf1', columns={'c1': 'text', 'c2': 'text'})
        create_cf(session, 'ks2.cf2', columns={'c1': 'text', 'c2': 'text'})

        node1.nodetool('refreshsizeestimates')
        node2.nodetool('refreshsizeestimates')
        node2.stop()
        session.execute('DROP TABLE ks2.cf1')
        session.execute('DROP KEYSPACE ks1')
        node2.start(wait_for_binary_proto=True)
        session2 = self.patient_exclusive_cql_connection(node2)

        session.cluster.control_connection.wait_for_schema_agreement()

        assert_none(session, "SELECT * FROM system.size_estimates WHERE keyspace_name='ks1'")
        assert_none(session, "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf1'")
        assert_some(session, "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf2'")
        assert_none(session2, "SELECT * FROM system.size_estimates WHERE keyspace_name='ks1'")
        assert_none(session2, "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf1'")
        assert_some(session, "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf2'")
    def test_compaction_strategy_switching(self, strategy):
        """
        Ensure that switching strategies does not result in problems.
        Insert data, switch strategies, then check against data loss.
        """
        strategies = [
            'LeveledCompactionStrategy', 'SizeTieredCompactionStrategy',
            'DateTieredCompactionStrategy'
        ]

        if strategy in strategies:
            strategies.remove(strategy)
            cluster = self.cluster
            cluster.populate(1).start(wait_for_binary_proto=True)
            [node1] = cluster.nodelist()

            for strat in strategies:
                session = self.patient_cql_connection(node1)
                create_ks(session, 'ks', 1)

                session.execute(
                    "create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'"
                    + strategy + "'};")

                for x in range(0, 100):
                    session.execute('insert into ks.cf (key, val) values (' +
                                    str(x) + ',1)')

                node1.flush()

                for x in range(0, 10):
                    session.execute('delete from cf where key = ' + str(x))

                session.execute(
                    "alter table ks.cf with compaction = {'class':'" + strat +
                    "'};")

                for x in range(11, 100):
                    assert_one(session,
                               "select * from ks.cf where key =" + str(x),
                               [x, 1])

                for x in range(0, 10):
                    assert_none(session,
                                'select * from cf where key = ' + str(x))

                node1.flush()
                cluster.clear()
                time.sleep(5)
                cluster.start(wait_for_binary_proto=True)
    def upgrade_with_wide_partition(self, query_modifier=""):
        ROWS = 100

        session = self._setup_cluster()

        session.execute("CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))")

        # the blob is only here to make the row bigger internally so it sometimes span multiple index blocks
        bigish_blob = "0x"
        for i in range(1000):
            bigish_blob = bigish_blob + "0000"

        for r in range(ROWS):
            session.execute(
                "INSERT INTO t(k, t, v1, v2, v3) VALUES ({}, {}, {}, {}, {{{}, {}}})".format(
                    0, r, r, bigish_blob, r * 2, r * 3
                )
            )

        self.cluster.flush()

        # delete every other row
        for r in range(0, ROWS, 2):
            session.execute("DELETE FROM t WHERE k=0 AND t={}".format(r))

        # delete the set from every other remaining row
        for r in range(1, ROWS, 4):
            session.execute("UPDATE t SET v3={{}} WHERE k=0 AND t={}".format(r))

        session = self._do_upgrade()

        for r in range(0, ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)

        self.cluster.compact()

        for r in range(ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)
示例#17
0
    def test_ignore_failure_policy(self):
        """
        Test the ignore commitlog failure policy
        """
        self.prepare(configuration={
            'commit_failure_policy': 'ignore'
        })

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log(r"ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
        assert failure, "Cannot find the commitlog failure message in logs"
        assert self.node1.is_running(), "Node1 should still be running"

        # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
        # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
        # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that
        # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit)
        query = "INSERT INTO test (key, col1) VALUES (2, 2);"
        if is_win():
            # We expect this to succeed
            self.session1.execute(query)
            assert not self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored."
            assert self.node1.is_running(), "Node1 should still be running after an ignore error on CL"
        else:
            with pytest.raises((OperationTimedOut, WriteTimeout)):
                self.session1.execute(query)

            # Should not exist
            assert_none(self.session1, "SELECT * FROM test where key=2;")

        # bring back the node commitlogs
        self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)

        self.session1.execute("""
          INSERT INTO test (key, col1) VALUES (3, 3);
        """)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=3;",
            [3, 3]
        )

        time.sleep(2)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=2;",
            [2, 2]
        )
    def ignore_failure_policy_test(self):
        """
        Test the ignore commitlog failure policy
        """
        self.prepare(configuration={
            'commit_failure_policy': 'ignore'
        })

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log("ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
        self.assertTrue(self.node1.is_running(), "Node1 should still be running")

        # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
        # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
        # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that
        # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit)
        query = "INSERT INTO test (key, col1) VALUES (2, 2);"
        if is_win():
            # We expect this to succeed
            self.session1.execute(query)
            self.assertFalse(self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored.")
            self.assertTrue(self.node1.is_running(), "Node1 should still be running after an ignore error on CL")
        else:
            with self.assertRaises((OperationTimedOut, WriteTimeout)):
                self.session1.execute(query)

            # Should not exist
            assert_none(self.session1, "SELECT * FROM test where key=2;")

        # bring back the node commitlogs
        self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)

        self.session1.execute("""
          INSERT INTO test (key, col1) VALUES (3, 3);
        """)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=3;",
            [3, 3]
        )

        time.sleep(2)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=2;",
            [2, 2]
        )
 def test_refresh_size_estimates_clears_invalid_entries(self):
     """
     @jira_ticket CASSANDRA-14905
      nodetool refreshsizeestimates should clear up entries for tables that no longer exist
     """
     cluster = self.cluster
     cluster.populate(1)
     node = cluster.nodelist()[0]
     cluster.start()
     session = self.patient_exclusive_cql_connection(node)
     session.execute("USE system;")
     # Valid keyspace but invalid table
     session.execute("INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ('system_auth', 'bad_table', '-5', '5', 0, 0);")
     # Invalid keyspace and table
     session.execute("INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ('bad_keyspace', 'bad_table', '-5', '5', 0, 0);")
     node.nodetool('refreshsizeestimates')
     assert_none(session, "SELECT * FROM size_estimates WHERE keyspace_name='system_auth' AND table_name='bad_table'")
     assert_none(session, "SELECT * FROM size_estimates WHERE keyspace_name='bad_keyspace'")
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{'execute.side_effect': Unavailable("Dummy Unavailabile message.")})
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.")})
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{'execute.side_effect': Unauthorized("Dummy Unauthorized message.")})
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)])
        assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
    def invalid_entries_removed_from_size_estimates_on_restart_test(self):
        """
        Entries for dropped tables/keyspaces should be cleared from size_estimates on restart.

        @jira_ticket CASSANDRA-14905
        """
        cluster = self.cluster
        cluster.populate(1).start()
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)
        session.execute("USE system;")
        session.execute("INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ( 'system_auth', 'bad_table', '-5', '5', 0, 0);")
        # Invalid keyspace and table
        session.execute("INSERT INTO size_estimates (keyspace_name, table_name, range_start, range_end, mean_partition_size, partitions_count) VALUES ( 'bad_keyspace', 'bad_table', '-5', '5', 0, 0);")
        node.stop()
        node.start()
        session = self.patient_cql_connection(node)
        assert_none(session, "SELECT * FROM system.size_estimates WHERE keyspace_name='system_auth' AND table_name='bad_table'")
        assert_none(session, "SELECT * FROM system.size_estimates WHERE keyspace_name='bad_keyspace'")
示例#22
0
    def test_udf_overload(self):

        session = self.prepare(nodes=3)

        session.execute("CREATE TABLE tab (k text PRIMARY KEY, v int)")
        session.execute("INSERT INTO tab (k, v) VALUES ('foo' , 1);")

        # create overloaded udfs
        session.execute(
            "CREATE FUNCTION overloaded(v varchar) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'"
        )
        session.execute(
            "CREATE OR REPLACE FUNCTION overloaded(i int) called on null input RETURNS text LANGUAGE java AS 'return \"f2\";'"
        )
        session.execute(
            "CREATE OR REPLACE FUNCTION overloaded(v1 text, v2 text) called on null input RETURNS text LANGUAGE java AS 'return \"f3\";'"
        )
        session.execute(
            "CREATE OR REPLACE FUNCTION overloaded(v ascii) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'"
        )

        # ensure that works with correct specificity
        if self.cluster.version() < LooseVersion('4.1'):
            assert_invalid(session,
                           "SELECT v FROM tab WHERE k = overloaded('foo')")
        else:
            assert_none(session,
                        "SELECT v FROM tab WHERE k = overloaded('foo')")
        assert_none(session,
                    "SELECT v FROM tab WHERE k = overloaded((text) 'foo')")
        assert_none(session,
                    "SELECT v FROM tab WHERE k = overloaded((ascii) 'foo')")
        assert_none(session,
                    "SELECT v FROM tab WHERE k = overloaded((varchar) 'foo')")

        # try non-existent functions
        assert_invalid(session, "DROP FUNCTION overloaded(boolean)")
        assert_invalid(session, "DROP FUNCTION overloaded(bigint)")

        # try dropping overloaded - should fail because ambiguous
        assert_invalid(session, "DROP FUNCTION overloaded")
        session.execute("DROP FUNCTION overloaded(varchar)")
        assert_invalid(session,
                       "SELECT v FROM tab WHERE k = overloaded((text)'foo')")
        session.execute("DROP FUNCTION overloaded(text, text)")
        assert_invalid(
            session,
            "SELECT v FROM tab WHERE k = overloaded((text)'foo',(text)'bar')")
        session.execute("DROP FUNCTION overloaded(ascii)")
        assert_invalid(session,
                       "SELECT v FROM tab WHERE k = overloaded((ascii)'foo')")
        # should now work - unambiguous
        session.execute("DROP FUNCTION overloaded")
    def short_read_quorum_delete_test(self):
        """
        @jira_ticket CASSANDRA-8933
        """
        cluster = self.cluster
        # Consider however 3 nodes A, B, C (RF=3), and following sequence of operations (all done at QUORUM):

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(3).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)

        session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY(id, v)) WITH read_repair_chance = 0.0")
        # we write 1 and 2 in a partition: all nodes get it.
        session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 1)", consistency_level=ConsistencyLevel.ALL))
        session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 2)", consistency_level=ConsistencyLevel.ALL))

        # we delete 1: only A and C get it.
        node2.flush()
        node2.stop(wait_other_notice=True)
        session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 1", consistency_level=ConsistencyLevel.QUORUM))
        node2.start(wait_other_notice=True)

        # we delete 2: only B and C get it.
        node1.flush()
        node1.stop(wait_other_notice=True)
        session = self.patient_cql_connection(node2, 'ks')
        session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 2", consistency_level=ConsistencyLevel.QUORUM))
        node1.start(wait_other_notice=True)
        session = self.patient_cql_connection(node1, 'ks')

        # we read the first row in the partition (so with a LIMIT 1) and A and B answer first.
        node3.flush()
        node3.stop(wait_other_notice=True)
        assert_none(session, "SELECT * FROM t WHERE id = 0 LIMIT 1", cl=ConsistencyLevel.QUORUM)
    def udf_overload_test(self):

        session = self.prepare(nodes=3)

        session.execute("CREATE TABLE tab (k text PRIMARY KEY, v int)")
        session.execute("INSERT INTO tab (k, v) VALUES ('foo' , 1);")

        # create overloaded udfs
        session.execute("CREATE FUNCTION overloaded(v varchar) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(i int) called on null input RETURNS text LANGUAGE java AS 'return \"f2\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(v1 text, v2 text) called on null input RETURNS text LANGUAGE java AS 'return \"f3\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(v ascii) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")

        # ensure that works with correct specificity
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded('foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((text) 'foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((ascii) 'foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((varchar) 'foo')")

        # try non-existent functions
        assert_invalid(session, "DROP FUNCTION overloaded(boolean)")
        assert_invalid(session, "DROP FUNCTION overloaded(bigint)")

        # try dropping overloaded - should fail because ambiguous
        assert_invalid(session, "DROP FUNCTION overloaded")
        session.execute("DROP FUNCTION overloaded(varchar)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo')")
        session.execute("DROP FUNCTION overloaded(text, text)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo',(text)'bar')")
        session.execute("DROP FUNCTION overloaded(ascii)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((ascii)'foo')")
        # should now work - unambiguous
        session.execute("DROP FUNCTION overloaded")
    def drop_table_reflected_in_size_estimates_test(self):
        """
        A dropped table should result in its entries being removed from size estimates, on both
        nodes that are up and down at the time of the drop.

        @jira_ticket CASSANDRA-14905
        """
        cluster = self.cluster
        cluster.populate(2).start()
        node1, node2 = cluster.nodelist()
        session = self.patient_exclusive_cql_connection(node1)
        create_ks(session, 'ks1', 2)
        create_ks(session, 'ks2', 2)
        create_cf(session, 'ks1.cf1', columns={'c1': 'text', 'c2': 'text'})
        create_cf(session, 'ks2.cf1', columns={'c1': 'text', 'c2': 'text'})
        create_cf(session, 'ks2.cf2', columns={'c1': 'text', 'c2': 'text'})

        node1.nodetool('refreshsizeestimates')
        node2.nodetool('refreshsizeestimates')
        node2.stop()
        session.execute('DROP TABLE ks2.cf1')
        session.execute('DROP KEYSPACE ks1')
        node2.start(wait_for_binary_proto=True)
        session2 = self.patient_exclusive_cql_connection(node2)

        session.cluster.control_connection.wait_for_schema_agreement()

        assert_none(
            session,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='ks1'")
        assert_none(
            session,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf1'"
        )
        assert_some(
            session,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf2'"
        )
        assert_none(
            session2,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='ks1'")
        assert_none(
            session2,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf1'"
        )
        assert_some(
            session,
            "SELECT * FROM system.size_estimates WHERE keyspace_name='ks2' AND table_name='cf2'"
        )
示例#26
0
    def test_compaction_strategy_switching(self, strategy):
        """
        Ensure that switching strategies does not result in problems.
        Insert data, switch strategies, then check against data loss.
        """
        strategies = ['LeveledCompactionStrategy', 'SizeTieredCompactionStrategy', 'DateTieredCompactionStrategy']

        if strategy in strategies:
            strategies.remove(strategy)
            cluster = self.cluster
            cluster.populate(1).start(wait_for_binary_proto=True)
            [node1] = cluster.nodelist()

            for strat in strategies:
                session = self.patient_cql_connection(node1)
                create_ks(session, 'ks', 1)

                session.execute("create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'" + strategy + "'};")

                for x in range(0, 100):
                    session.execute('insert into ks.cf (key, val) values (' + str(x) + ',1)')

                node1.flush()

                for x in range(0, 10):
                    session.execute('delete from cf where key = ' + str(x))

                session.execute("alter table ks.cf with compaction = {'class':'" + strat + "'};")

                for x in range(11, 100):
                    assert_one(session, "select * from ks.cf where key =" + str(x), [x, 1])

                for x in range(0, 10):
                    assert_none(session, 'select * from cf where key = ' + str(x))

                node1.flush()
                cluster.clear()
                time.sleep(5)
                cluster.start(wait_for_binary_proto=True)
示例#27
0
    def compaction_delete_test(self):
        """
        Test that executing a delete properly tombstones a row.
        Insert data, delete a partition of data and check that the requesite rows are tombstoned.
        """
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node1] = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 1)

        session.execute(
            "create table ks.cf (key int PRIMARY KEY, val int) with compaction = {'class':'"
            + self.strategy + "'} and gc_grace_seconds = 30;")

        for x in range(0, 100):
            session.execute('insert into cf (key, val) values (' + str(x) +
                            ',1)')

        node1.flush()
        for x in range(0, 10):
            session.execute('delete from cf where key = ' + str(x))

        node1.flush()
        for x in range(0, 10):
            assert_none(session, 'select * from cf where key = ' + str(x))

        json_path = tempfile.mkstemp(suffix='.json')
        jname = json_path[1]
        with open(jname, 'w') as f:
            node1.run_sstable2json(f)

        with open(jname, 'r') as g:
            jsoninfo = g.read()

        numfound = jsoninfo.count("markedForDeleteAt")

        self.assertEqual(numfound, 10)
    def compaction_delete_test(self):
        """
        Test that executing a delete properly tombstones a row.
        Insert data, delete a partition of data and check that the requesite rows are tombstoned.
        """
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node1] = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)

        session.execute("create table ks.cf (key int PRIMARY KEY, val int) with compaction = {'class':'" + self.strategy + "'} and gc_grace_seconds = 30;")

        for x in range(0, 100):
            session.execute('insert into cf (key, val) values (' + str(x) + ',1)')

        node1.flush()
        for x in range(0, 10):
            session.execute('delete from cf where key = ' + str(x))

        node1.flush()
        for x in range(0, 10):
            assert_none(session, 'select * from cf where key = ' + str(x))

        json_path = tempfile.mkstemp(suffix='.json')
        jname = json_path[1]
        with open(jname, 'w') as f:
            node1.run_sstable2json(f)

        with open(jname, 'r') as g:
            jsoninfo = g.read()

        numfound = jsoninfo.count("markedForDeleteAt")

        self.assertEqual(numfound, 10)
    def sstableloader_with_failing_2i_test(self):
        """
        @jira_ticket CASSANDRA-10130

        Simulates an index building failure during SSTables load.
        The table data should be loaded and the index should be marked for rebuilding during the next node start.
        """
        def create_schema_with_2i(session):
            create_ks(session, 'k', 1)
            session.execute(
                "CREATE TABLE k.t (p int, c int, v int, PRIMARY KEY(p, c))")
            session.execute("CREATE INDEX idx ON k.t(v)")

        cluster = self.cluster
        cluster.populate(
            1, install_byteman=True).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 1, 8)")

        # Stop node and copy SSTables
        node.nodetool('drain')
        node.stop()
        self.copy_sstables(cluster, node)

        # Wipe out data and restart
        cluster.clear()
        cluster.start()

        # Restore the schema
        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)

        # The table should exist and be empty, and the index should be empty and marked as built
        assert_one(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""",
            ['k', 'idx', None])
        assert_none(session, "SELECT * FROM k.t")
        assert_none(session, "SELECT * FROM k.t WHERE v = 8")

        # Add some additional data before loading the SSTable, to check that it will be still accessible
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 2, 8)")
        assert_one(session, "SELECT * FROM k.t", [0, 2, 8])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Load SSTables with a failure during index creation
        node.byteman_submit(['./byteman/index_build_failure.btm'])
        with self.assertRaises(Exception):
            self.load_sstables(cluster, node, 'k')

        # Check that the index isn't marked as built and the old SSTable data has been loaded but not indexed
        assert_none(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Restart the node to trigger index rebuild
        node.nodetool('drain')
        node.stop()
        cluster.start()
        session = self.patient_cql_connection(node)

        # Check that the index is marked as built and the index has been rebuilt
        assert_one(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""",
            ['k', 'idx', None])
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_all(session, "SELECT * FROM k.t WHERE v = 8",
                   [[0, 1, 8], [0, 2, 8]])
    def test_size_estimates_multidc(self):
        """
        Test that primary ranges are correctly generated on
        system.size_estimates for multi-dc, multi-ks scenario
        @jira_ticket CASSANDRA-9639
        """
        logger.debug("Creating cluster")
        cluster = self.cluster
        cluster.set_configuration_options(values={'num_tokens': 2})
        cluster.populate([2, 1])
        node1_1, node1_2, node2_1 = cluster.nodelist()

        logger.debug("Setting tokens")
        node1_tokens, node2_tokens, node3_tokens = ['-6639341390736545756,-2688160409776496397',
                                                    '-2506475074448728501,8473270337963525440',
                                                    '-3736333188524231709,8673615181726552074']
        node1_1.set_configuration_options(values={'initial_token': node1_tokens})
        node1_2.set_configuration_options(values={'initial_token': node2_tokens})
        node2_1.set_configuration_options(values={'initial_token': node3_tokens})
        cluster.set_configuration_options(values={'num_tokens': 2})

        logger.debug("Starting cluster")
        cluster.start()

        out, _, _ = node1_1.nodetool('ring')
        logger.debug("Nodetool ring output {}".format(out))

        logger.debug("Creating keyspaces")
        session = self.patient_cql_connection(node1_1)
        create_ks(session, 'ks1', 3)
        create_ks(session, 'ks2', {'dc1': 2})
        create_cf(session, 'ks1.cf1', columns={'c1': 'text', 'c2': 'text'})
        create_cf(session, 'ks2.cf2', columns={'c1': 'text', 'c2': 'text'})

        logger.debug("Refreshing size estimates")
        node1_1.nodetool('refreshsizeestimates')
        node1_2.nodetool('refreshsizeestimates')
        node2_1.nodetool('refreshsizeestimates')

        """
        CREATE KEYSPACE ks1 WITH replication =
            {'class': 'SimpleStrategy', 'replication_factor': '3'}
        CREATE KEYSPACE ks2 WITH replication =
            {'class': 'NetworkTopologyStrategy', 'dc1': '2'}  AND durable_writes = true;

        Datacenter: dc1
        ==========
        Address     Token
                    8473270337963525440
        127.0.0.1   -6639341390736545756
        127.0.0.1   -2688160409776496397
        127.0.0.2   -2506475074448728501
        127.0.0.2   8473270337963525440

        Datacenter: dc2
        ==========
        Address     Token
                    8673615181726552074
        127.0.0.3   -3736333188524231709
        127.0.0.3   8673615181726552074
        """

        logger.debug("Checking node1_1 size_estimates primary ranges")
        session = self.patient_exclusive_cql_connection(node1_1)
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks1'", [['-3736333188524231709', '-2688160409776496397'],
                                                            ['-9223372036854775808', '-6639341390736545756'],
                                                            ['8673615181726552074', '-9223372036854775808']])
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks2'", [['-3736333188524231709', '-2688160409776496397'],
                                                            ['-6639341390736545756', '-3736333188524231709'],
                                                            ['-9223372036854775808', '-6639341390736545756'],
                                                            ['8473270337963525440', '8673615181726552074'],
                                                            ['8673615181726552074', '-9223372036854775808']])

        logger.debug("Checking node1_2 size_estimates primary ranges")
        session = self.patient_exclusive_cql_connection(node1_2)
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks1'", [['-2506475074448728501', '8473270337963525440'],
                                                            ['-2688160409776496397', '-2506475074448728501']])
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks2'", [['-2506475074448728501', '8473270337963525440'],
                                                            ['-2688160409776496397', '-2506475074448728501']])

        logger.debug("Checking node2_1 size_estimates primary ranges")
        session = self.patient_exclusive_cql_connection(node2_1)
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks1'", [['-6639341390736545756', '-3736333188524231709'],
                                                            ['8473270337963525440', '8673615181726552074']])
        assert_none(session, "SELECT range_start, range_end FROM system.size_estimates "
                             "WHERE keyspace_name = 'ks2'")
    def sstableloader_with_failing_2i_test(self):
        """
        @jira_ticket CASSANDRA-10130

        Simulates an index building failure during SSTables load.
        The table data should be loaded and the index should be marked for rebuilding during the next node start.
        """
        def create_schema_with_2i(session):
            create_ks(session, 'k', 1)
            session.execute("CREATE TABLE k.t (p int, c int, v int, PRIMARY KEY(p, c))")
            session.execute("CREATE INDEX idx ON k.t(v)")

        cluster = self.cluster
        cluster.populate(1, install_byteman=True).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 1, 8)")

        # Stop node and copy SSTables
        node.nodetool('drain')
        node.stop()
        self.copy_sstables(cluster, node)

        # Wipe out data and restart
        cluster.clear()
        cluster.start()

        # Restore the schema
        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)

        # The table should exist and be empty, and the index should be empty and marked as built
        assert_one(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx', None])
        assert_none(session, "SELECT * FROM k.t")
        assert_none(session, "SELECT * FROM k.t WHERE v = 8")

        # Add some additional data before loading the SSTable, to check that it will be still accessible
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 2, 8)")
        assert_one(session, "SELECT * FROM k.t", [0, 2, 8])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Load SSTables with a failure during index creation
        node.byteman_submit(['./byteman/index_build_failure.btm'])
        with self.assertRaises(Exception):
            self.load_sstables(cluster, node, 'k')

        # Check that the index isn't marked as built and the old SSTable data has been loaded but not indexed
        assert_none(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Restart the node to trigger index rebuild
        node.nodetool('drain')
        node.stop()
        cluster.start()
        session = self.patient_cql_connection(node)

        # Check that the index is marked as built and the index has been rebuilt
        assert_one(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx', None])
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_all(session, "SELECT * FROM k.t WHERE v = 8", [[0, 1, 8], [0, 2, 8]])
    def test_replicated_system_keyspaces(self):
        cluster = self.cluster
        cluster.populate(1).start()

        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        # ALTER KEYSPACE should work for system_auth, system_distributed, and system_traces
        stmt = """
            ALTER KEYSPACE system_auth
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            ALTER KEYSPACE system_distributed
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            ALTER KEYSPACE system_traces
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            SELECT replication
            FROM system_schema.keyspaces
            WHERE keyspace_name IN ('system_auth', 'system_distributed', 'system_traces');"""
        replication = {
            'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy',
            'datacenter1': '1'
        }
        assert_all(session, stmt,
                   [[replication], [replication], [replication]])

        # DROP KEYSPACE should fail for system_auth, system_distributed, and system_traces
        assert_exception(session,
                         'DROP KEYSPACE system_auth;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP KEYSPACE system_distributed;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP KEYSPACE system_traces;',
                         expected=Unauthorized)

        # CREATE TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(
            session,
            'CREATE TABLE system_auth.new_table (id int PRIMARY KEY);',
            expected=Unauthorized)

        assert_exception(
            session,
            'CREATE TABLE system_distributed.new_table (id int PRIMARY KEY);',
            expected=Unauthorized)

        assert_exception(
            session,
            'CREATE TABLE system_traces.new_table (id int PRIMARY KEY);',
            expected=Unauthorized)

        # ALTER TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(session,
                         "ALTER TABLE system_auth.roles WITH comment = '';",
                         expected=Unauthorized)

        assert_exception(
            session,
            "ALTER TABLE system_distributed.repair_history WITH comment = '';",
            expected=Unauthorized)

        assert_exception(
            session,
            "ALTER TABLE system_traces.sessions WITH comment = '';",
            expected=Unauthorized)

        # DROP TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(session,
                         'DROP TABLE system_auth.roles;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP TABLE system_distributed.repair_history;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP TABLE system_traces.sessions;',
                         expected=Unauthorized)
    def test_replicated_system_keyspaces(self):
        cluster = self.cluster
        cluster.populate(1).start()

        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        # ALTER KEYSPACE should work for system_auth, system_distributed, and system_traces
        stmt = """
            ALTER KEYSPACE system_auth
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            ALTER KEYSPACE system_distributed
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            ALTER KEYSPACE system_traces
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            SELECT replication
            FROM system_schema.keyspaces
            WHERE keyspace_name IN ('system_auth', 'system_distributed', 'system_traces');"""
        replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '1'}
        assert_all(session, stmt, [[replication], [replication], [replication]])

        # DROP KEYSPACE should fail for system_auth, system_distributed, and system_traces
        assert_exception(session, 'DROP KEYSPACE system_auth;', expected=Unauthorized)
        assert_exception(session, 'DROP KEYSPACE system_distributed;', expected=Unauthorized)
        assert_exception(session, 'DROP KEYSPACE system_traces;', expected=Unauthorized)

        # CREATE TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(session,
                         'CREATE TABLE system_auth.new_table (id int PRIMARY KEY);',
                         expected=Unauthorized)

        assert_exception(session,
                         'CREATE TABLE system_distributed.new_table (id int PRIMARY KEY);',
                         expected=Unauthorized)

        assert_exception(session,
                         'CREATE TABLE system_traces.new_table (id int PRIMARY KEY);',
                         expected=Unauthorized)

        # ALTER TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(session,
                         "ALTER TABLE system_auth.roles WITH comment = '';",
                         expected=Unauthorized)

        assert_exception(session,
                         "ALTER TABLE system_distributed.repair_history WITH comment = '';",
                         expected=Unauthorized)

        assert_exception(session,
                         "ALTER TABLE system_traces.sessions WITH comment = '';",
                         expected=Unauthorized)

        # DROP TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(session, 'DROP TABLE system_auth.roles;', expected=Unauthorized)
        assert_exception(session, 'DROP TABLE system_distributed.repair_history;', expected=Unauthorized)
        assert_exception(session, 'DROP TABLE system_traces.sessions;', expected=Unauthorized)