Пример #1
0
 def query_users(self, session):
     ret = list(session.execute("SELECT * FROM users"))
     ret.extend(list(session.execute("SELECT * FROM users WHERE state='TX'")))
     ret.extend(list(session.execute("SELECT * FROM users WHERE gender='f'")))
     ret.extend(list(session.execute("SELECT * FROM users WHERE birth_year=1978")))
     assert_length_equal(ret, 8)
     return ret
Пример #2
0
    def simple_increment_test(self):
        """ Simple incrementation test (Created for #3465, that wasn't a bug) """
        cluster = self.cluster

        cluster.populate(3).start()
        nodes = cluster.nodelist()

        session = self.patient_cql_connection(nodes[0])
        self.create_ks(session, 'ks', 3)
        self.create_cf(session, 'cf', validation="CounterColumnType", columns={'c': 'counter'})

        sessions = [self.patient_cql_connection(node, 'ks') for node in nodes]
        nb_increment = 50
        nb_counter = 10

        for i in xrange(0, nb_increment):
            for c in xrange(0, nb_counter):
                session = sessions[(i + c) % len(nodes)]
                query = SimpleStatement("UPDATE cf SET c = c + 1 WHERE key = 'counter%i'" % c, consistency_level=ConsistencyLevel.QUORUM)
                session.execute(query)

            session = sessions[i % len(nodes)]
            keys = ",".join(["'counter%i'" % c for c in xrange(0, nb_counter)])
            query = SimpleStatement("SELECT key, c FROM cf WHERE key IN (%s)" % keys, consistency_level=ConsistencyLevel.QUORUM)
            res = list(session.execute(query))

            assert_length_equal(res, nb_counter)
            for c in xrange(0, nb_counter):
                self.assertEqual(len(res[c]), 2, "Expecting key and counter for counter {}, got {}".format(c, str(res[c])))
                self.assertEqual(res[c][1], i + 1, "Expecting counter {} = {}, got {}".format(c, i + 1, res[c][0]))
Пример #3
0
    def large_compaction_warning_test(self):
        """
        @jira_ticket CASSANDRA-9643
        Check that we log a warning when the partition size is bigger than compaction_large_partition_warning_threshold_mb
        """
        cluster = self.cluster
        cluster.set_configuration_options({'compaction_large_partition_warning_threshold_mb': 1})
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node] = cluster.nodelist()

        session = self.patient_cql_connection(node)
        self.create_ks(session, 'ks', 1)

        mark = node.mark_log()
        strlen = (1024 * 1024) / 100
        session.execute("CREATE TABLE large(userid text PRIMARY KEY, properties map<int, text>) with compression = {}")
        for i in range(200):  # ensures partition size larger than compaction_large_partition_warning_threshold_mb
            session.execute("UPDATE ks.large SET properties[%i] = '%s' WHERE userid = 'user'" % (i, get_random_word(strlen)))

        ret = list(session.execute("SELECT properties from ks.large where userid = 'user'"))
        assert_length_equal(ret, 1)
        self.assertEqual(200, len(ret[0][0].keys()))

        node.flush()

        node.nodetool('compact ks large')
        verb = 'Writing' if self.cluster.version() > '2.2' else 'Compacting'
        sizematcher = '\d+ bytes' if LooseVersion(self.cluster.version()) < LooseVersion('3.6') else '\d+\.\d{3}(K|M|G)iB'
        node.watch_log_for('{} large partition ks/large:user \({}\)'.format(verb, sizematcher), from_mark=mark, timeout=180)

        ret = list(session.execute("SELECT properties from ks.large where userid = 'user'"))
        assert_length_equal(ret, 1)
        self.assertEqual(200, len(ret[0][0].keys()))
Пример #4
0
 def launch_nodetool_cmd(self, cmd):
     """
     Launch a nodetool command and check the result is empty (no error)
     """
     node1 = self.cluster.nodelist()[0]
     response = node1.nodetool(cmd, capture_output=True)[0]
     if not common.is_win():  # nodetool always prints out on windows
         assert_length_equal(response, 0)  # nodetool does not print anything unless there is an error
Пример #5
0
def query_c1c2(session, key, consistency=ConsistencyLevel.QUORUM, tolerate_missing=False, must_be_missing=False):
    query = SimpleStatement('SELECT c1, c2 FROM cf WHERE key=\'k%d\'' % key, consistency_level=consistency)
    rows = list(session.execute(query))
    if not tolerate_missing:
        assertions.assert_length_equal(rows, 1)
        res = rows[0]
        assert_true(len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res)
    if must_be_missing:
        assertions.assert_length_equal(rows, 0)
Пример #6
0
def _validate_row(cluster, res):
    assertions.assert_length_equal(res, 100)
    for i in xrange(0, 100):
        if i % 5 == 0:
            assert_equal(res[i][2], 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2]))
        elif i % 2 == 0:
            assert_equal(res[i][2], 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2]))
        else:
            assert_equal(res[i][2], 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2]))
Пример #7
0
def _validate_row(cluster, res):
    assertions.assert_length_equal(res, 100)
    for i in xrange(0, 100):
        if i % 5 == 0:
            assert_equal(res[i][2], 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2]))
        elif i % 2 == 0:
            assert_equal(res[i][2], 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2]))
        else:
            assert_equal(res[i][2], 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2]))
Пример #8
0
def query_c1c2(session, key, consistency=ConsistencyLevel.QUORUM, tolerate_missing=False, must_be_missing=False):
    query = SimpleStatement('SELECT c1, c2 FROM cf WHERE key=\'k%d\'' % key, consistency_level=consistency)
    rows = list(session.execute(query))
    if not tolerate_missing:
        assertions.assert_length_equal(rows, 1)
        res = rows[0]
        assert_true(len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res)
    if must_be_missing:
        assertions.assert_length_equal(rows, 0)
Пример #9
0
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(
            **
            {'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unavailable("Dummy Unavailabile message.")
        })
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{
            'execute.side_effect':
            InvalidRequest("Dummy InvalidRequest message.")
        })
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unauthorized("Dummy Unauthorized message.")
        })
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i]
                                                  for i in range(0, 10)])
        assert_all(mock_session,
                   "SELECT k, v FROM test", [[i, i] for i in range(0, 10)],
                   ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
Пример #10
0
 def launch_nodetool_cmd(self, cmd):
     """
     Launch a nodetool command and check the result is empty (no error)
     """
     node1 = self.cluster.nodelist()[0]
     response = node1.nodetool(cmd, capture_output=True)[0]
     if not common.is_win():  # nodetool always prints out on windows
         assert_length_equal(
             response,
             0)  # nodetool does not print anything unless there is an error
Пример #11
0
 def query_users(self, session):
     ret = list(session.execute("SELECT * FROM users"))
     ret.extend(
         list(session.execute("SELECT * FROM users WHERE state='TX'")))
     ret.extend(
         list(session.execute("SELECT * FROM users WHERE gender='f'")))
     ret.extend(
         list(session.execute("SELECT * FROM users WHERE birth_year=1978")))
     assert_length_equal(ret, 8)
     return ret
Пример #12
0
def range_putget(cluster, session, cl=ConsistencyLevel.QUORUM):
    keys = 100

    _put_with_overwrite(cluster, session, keys, cl)

    paged_results = session.execute('SELECT * FROM cf LIMIT 10000000')
    rows = [result for result in paged_results]

    assertions.assert_length_equal(rows, keys * 100)
    for k in xrange(0, keys):
        res = rows[:100]
        del rows[:100]
        _validate_row(cluster, res)
Пример #13
0
def range_putget(cluster, session, cl=ConsistencyLevel.QUORUM):
    keys = 100

    _put_with_overwrite(cluster, session, keys, cl)

    paged_results = session.execute('SELECT * FROM cf LIMIT 10000000')
    rows = [result for result in paged_results]

    assertions.assert_length_equal(rows, keys * 100)
    for k in xrange(0, keys):
        res = rows[:100]
        del rows[:100]
        _validate_row(cluster, res)
Пример #14
0
def query_columns(tester,
                  session,
                  key,
                  columns_count,
                  consistency=ConsistencyLevel.QUORUM,
                  offset=0):
    query = SimpleStatement(
        'SELECT c, v FROM cf WHERE key=\'k%s\' AND c >= \'c%06d\' AND c <= \'c%06d\''
        % (key, offset, columns_count + offset - 1),
        consistency_level=consistency)
    res = list(session.execute(query))
    assertions.assert_length_equal(res, columns_count)
    for i in xrange(0, columns_count):
        assert_equal(res[i][1], 'value{}'.format(i + offset))
Пример #15
0
    def large_compaction_warning_test(self):
        """
        @jira_ticket CASSANDRA-9643
        Check that we log a warning when the partition size is bigger than compaction_large_partition_warning_threshold_mb
        """
        cluster = self.cluster
        cluster.set_configuration_options(
            {'compaction_large_partition_warning_threshold_mb': 1})
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node] = cluster.nodelist()

        session = self.patient_cql_connection(node)
        self.create_ks(session, 'ks', 1)

        mark = node.mark_log()
        strlen = (1024 * 1024) / 100
        session.execute(
            "CREATE TABLE large(userid text PRIMARY KEY, properties map<int, text>) with compression = {}"
        )
        for i in range(
                200
        ):  # ensures partition size larger than compaction_large_partition_warning_threshold_mb
            session.execute(
                "UPDATE ks.large SET properties[%i] = '%s' WHERE userid = 'user'"
                % (i, get_random_word(strlen)))

        ret = list(
            session.execute(
                "SELECT properties from ks.large where userid = 'user'"))
        assert_length_equal(ret, 1)
        self.assertEqual(200, len(ret[0][0].keys()))

        node.flush()

        node.nodetool('compact ks large')
        verb = 'Writing' if self.cluster.version() > '2.2' else 'Compacting'
        sizematcher = '\d+ bytes' if LooseVersion(self.cluster.version(
        )) < LooseVersion('3.6') else '\d+\.\d{3}(K|M|G)iB'
        node.watch_log_for('{} large partition ks/large:user \({}\)'.format(
            verb, sizematcher),
                           from_mark=mark,
                           timeout=180)

        ret = list(
            session.execute(
                "SELECT properties from ks.large where userid = 'user'"))
        assert_length_equal(ret, 1)
        self.assertEqual(200, len(ret[0][0].keys()))
Пример #16
0
    def simple_increment_test(self):
        """ Simple incrementation test (Created for #3465, that wasn't a bug) """
        cluster = self.cluster

        cluster.populate(3).start()
        nodes = cluster.nodelist()

        session = self.patient_cql_connection(nodes[0])
        self.create_ks(session, 'ks', 3)
        self.create_cf(session,
                       'cf',
                       validation="CounterColumnType",
                       columns={'c': 'counter'})

        sessions = [self.patient_cql_connection(node, 'ks') for node in nodes]
        nb_increment = 50
        nb_counter = 10

        for i in xrange(0, nb_increment):
            for c in xrange(0, nb_counter):
                session = sessions[(i + c) % len(nodes)]
                query = SimpleStatement(
                    "UPDATE cf SET c = c + 1 WHERE key = 'counter%i'" % c,
                    consistency_level=ConsistencyLevel.QUORUM)
                session.execute(query)

            session = sessions[i % len(nodes)]
            keys = ",".join(["'counter%i'" % c for c in xrange(0, nb_counter)])
            query = SimpleStatement("SELECT key, c FROM cf WHERE key IN (%s)" %
                                    keys,
                                    consistency_level=ConsistencyLevel.QUORUM)
            res = list(session.execute(query))

            assert_length_equal(res, nb_counter)
            for c in xrange(0, nb_counter):
                self.assertEqual(
                    len(res[c]), 2,
                    "Expecting key and counter for counter {}, got {}".format(
                        c, str(res[c])))
                self.assertEqual(
                    res[c][1], i + 1,
                    "Expecting counter {} = {}, got {}".format(
                        c, i + 1, res[c][0]))
Пример #17
0
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{'execute.side_effect': Unavailable("Dummy Unavailabile message.")})
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.")})
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{'execute.side_effect': Unauthorized("Dummy Unauthorized message.")})
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)])
        assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
Пример #18
0
    def test_column_index_stress(self):
        """Write a large number of columns to a single row and set
        'column_index_size_in_kb' to a sufficiently low value to force
        the creation of a column index. The test will then randomly
        read columns from that row and ensure that all data is
        returned. See CASSANDRA-5225.
        """
        cluster = self.cluster
        cluster.populate(1).start()
        (node1, ) = cluster.nodelist()
        cluster.set_configuration_options(
            values={'column_index_size_in_kb':
                    1})  # reduce this value to force column index creation
        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'wide_rows', 1)

        create_table_query = 'CREATE TABLE test_table (row varchar, name varchar, value int, PRIMARY KEY (row, name));'
        session.execute(create_table_query)

        # Now insert 100,000 columns to row 'row0'
        insert_column_query = "UPDATE test_table SET value = {value} WHERE row = '{row}' AND name = '{name}';"
        for i in range(100000):
            row = 'row0'
            name = 'val' + str(i)
            session.execute(
                insert_column_query.format(value=i, row=row, name=name))

        # now randomly fetch columns: 1 to 3 at a time
        for i in range(10000):
            select_column_query = "SELECT value FROM test_table WHERE row='row0' AND name in ('{name1}', '{name2}', '{name3}');"
            values2fetch = [str(random.randint(0, 99999)) for i in range(3)]
            # values2fetch is a list of random values.  Because they are random, they will not be unique necessarily.
            # To simplify the template logic in the select_column_query I will not expect the query to
            # necessarily return 3 values.  Hence I am computing the number of unique values in values2fetch
            # and using that in the assert at the end.
            expected_rows = len(set(values2fetch))
            rows = list(
                session.execute(
                    select_column_query.format(name1="val" + values2fetch[0],
                                               name2="val" + values2fetch[1],
                                               name3="val" + values2fetch[2])))
            assert_length_equal(rows, expected_rows)
Пример #19
0
    def ghosts_test(self):
        """ Check range ghost are correctly removed by the system """
        cluster = self.cluster
        cluster.populate(1).start()
        [node1] = cluster.nodelist()

        time.sleep(.5)
        session = self.cql_connection(node1)
        self.create_ks(session, 'ks', 1)
        self.create_cf(session, 'cf', gc_grace=0, columns={'c': 'text'})

        rows = 1000

        for i in xrange(0, rows):
            session.execute("UPDATE cf SET c = 'value' WHERE key = 'k%i'" % i)

        res = list(session.execute("SELECT * FROM cf LIMIT 10000"))
        assert_length_equal(res, rows)

        node1.flush()

        for i in xrange(0, rows / 2):
            session.execute("DELETE FROM cf WHERE key = 'k%i'" % i)

        res = list(session.execute("SELECT * FROM cf LIMIT 10000"))
        # no ghosts in 1.2+
        assert_length_equal(res, rows / 2)

        node1.flush()
        time.sleep(1)  # make sure tombstones are collected
        node1.compact()

        res = list(session.execute("SELECT * FROM cf LIMIT 10000"))
        assert_length_equal(res, rows / 2)
Пример #20
0
    def ghosts_test(self):
        """ Check range ghost are correctly removed by the system """
        cluster = self.cluster
        cluster.populate(1).start()
        [node1] = cluster.nodelist()

        time.sleep(.5)
        session = self.cql_connection(node1)
        self.create_ks(session, 'ks', 1)
        self.create_cf(session, 'cf', gc_grace=0, columns={'c': 'text'})

        rows = 1000

        for i in xrange(0, rows):
            session.execute("UPDATE cf SET c = 'value' WHERE key = 'k%i'" % i)

        res = list(session.execute("SELECT * FROM cf LIMIT 10000"))
        assert_length_equal(res, rows)

        node1.flush()

        for i in xrange(0, rows / 2):
            session.execute("DELETE FROM cf WHERE key = 'k%i'" % i)

        res = list(session.execute("SELECT * FROM cf LIMIT 10000"))
        # no ghosts in 1.2+
        assert_length_equal(res, rows / 2)

        node1.flush()
        time.sleep(1)  # make sure tombstones are collected
        node1.compact()

        res = list(session.execute("SELECT * FROM cf LIMIT 10000"))
        assert_length_equal(res, rows / 2)
Пример #21
0
    def test_column_index_stress(self):
        """Write a large number of columns to a single row and set
        'column_index_size_in_kb' to a sufficiently low value to force
        the creation of a column index. The test will then randomly
        read columns from that row and ensure that all data is
        returned. See CASSANDRA-5225.
        """
        cluster = self.cluster
        cluster.populate(1).start()
        (node1,) = cluster.nodelist()
        cluster.set_configuration_options(values={'column_index_size_in_kb': 1})  # reduce this value to force column index creation
        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'wide_rows', 1)

        create_table_query = 'CREATE TABLE test_table (row varchar, name varchar, value int, PRIMARY KEY (row, name));'
        session.execute(create_table_query)

        # Now insert 100,000 columns to row 'row0'
        insert_column_query = "UPDATE test_table SET value = {value} WHERE row = '{row}' AND name = '{name}';"
        for i in range(100000):
            row = 'row0'
            name = 'val' + str(i)
            session.execute(insert_column_query.format(value=i, row=row, name=name))

        # now randomly fetch columns: 1 to 3 at a time
        for i in range(10000):
            select_column_query = "SELECT value FROM test_table WHERE row='row0' AND name in ('{name1}', '{name2}', '{name3}');"
            values2fetch = [str(random.randint(0, 99999)) for i in range(3)]
            # values2fetch is a list of random values.  Because they are random, they will not be unique necessarily.
            # To simplify the template logic in the select_column_query I will not expect the query to
            # necessarily return 3 values.  Hence I am computing the number of unique values in values2fetch
            # and using that in the assert at the end.
            expected_rows = len(set(values2fetch))
            rows = list(session.execute(select_column_query.format(name1="val" + values2fetch[0],
                                                                   name2="val" + values2fetch[1],
                                                                   name3="val" + values2fetch[2])))
            assert_length_equal(rows, expected_rows)
Пример #22
0
    def short_read_test(self):
        """
        @jira_ticket CASSANDRA-9460
        """
        cluster = self.cluster

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfer with the test
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(3).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)
        self.create_cf(session, 'cf', read_repair=0.0)

        normal_key = 'normal'
        reversed_key = 'reversed'

        # Repeat this test 10 times to make it more easy to spot a null pointer exception caused by a race, see CASSANDRA-9460
        for k in xrange(10):
            # insert 9 columns in two rows
            insert_columns(self, session, normal_key, 9)
            insert_columns(self, session, reversed_key, 9)

            # Delete 3 first columns (and 3 last columns, for the reversed version) with a different node dead each time
            for node, column_number_to_delete in zip(range(1, 4), range(3)):
                self.stop_node(node)
                self.delete(node, normal_key, column_number_to_delete)
                self.delete(node, reversed_key, 8 - column_number_to_delete)
                self.restart_node(node)

            # Query 3 firsts columns in normal order
            session = self.patient_cql_connection(node1, 'ks')
            query = SimpleStatement(
                'SELECT c, v FROM cf WHERE key=\'k{}\' LIMIT 3'.format(normal_key),
                consistency_level=ConsistencyLevel.QUORUM)
            rows = list(session.execute(query))
            res = rows
            assert_length_equal(res, 3)

            # value 0, 1 and 2 have been deleted
            for i in xrange(1, 4):
                self.assertEqual('value{}'.format(i + 2), res[i - 1][1])

            # Query 3 firsts columns in reverse order
            session = self.patient_cql_connection(node1, 'ks')
            query = SimpleStatement(
                'SELECT c, v FROM cf WHERE key=\'k{}\' ORDER BY c DESC LIMIT 3'.format(reversed_key),
                consistency_level=ConsistencyLevel.QUORUM)
            rows = list(session.execute(query))
            res = rows
            assert_length_equal(res, 3)

            # value 6, 7 and 8 have been deleted
            for i in xrange(0, 3):
                self.assertEqual('value{}'.format(5 - i), res[i][1])

            session.execute('TRUNCATE cf')
Пример #23
0
 def query_users(self, session):
     ret = list(session.execute("SELECT * FROM users"))
     assert_length_equal(ret, 5)
     return ret
Пример #24
0
 def query_users(self, session):
     ret = list(session.execute("SELECT * FROM users"))
     assert_length_equal(ret, 5)
     return ret
Пример #25
0
    def short_read_test(self):
        """
        @jira_ticket CASSANDRA-9460
        """
        cluster = self.cluster

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfer with the test
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(3).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)
        self.create_cf(session, 'cf', read_repair=0.0)

        normal_key = 'normal'
        reversed_key = 'reversed'

        # Repeat this test 10 times to make it more easy to spot a null pointer exception caused by a race, see CASSANDRA-9460
        for k in xrange(10):
            # insert 9 columns in two rows
            insert_columns(self, session, normal_key, 9)
            insert_columns(self, session, reversed_key, 9)

            # Delete 3 first columns (and 3 last columns, for the reversed version) with a different node dead each time
            for node, column_number_to_delete in zip(range(1, 4), range(3)):
                self.stop_node(node)
                self.delete(node, normal_key, column_number_to_delete)
                self.delete(node, reversed_key, 8 - column_number_to_delete)
                self.restart_node(node)

            # Query 3 firsts columns in normal order
            session = self.patient_cql_connection(node1, 'ks')
            query = SimpleStatement(
                'SELECT c, v FROM cf WHERE key=\'k{}\' LIMIT 3'.format(normal_key),
                consistency_level=ConsistencyLevel.QUORUM)
            rows = list(session.execute(query))
            res = rows
            assert_length_equal(res, 3)

            # value 0, 1 and 2 have been deleted
            for i in xrange(1, 4):
                self.assertEqual('value{}'.format(i + 2), res[i - 1][1])

            # Query 3 firsts columns in reverse order
            session = self.patient_cql_connection(node1, 'ks')
            query = SimpleStatement(
                'SELECT c, v FROM cf WHERE key=\'k{}\' ORDER BY c DESC LIMIT 3'.format(reversed_key),
                consistency_level=ConsistencyLevel.QUORUM)
            rows = list(session.execute(query))
            res = rows
            assert_length_equal(res, 3)

            # value 6, 7 and 8 have been deleted
            for i in xrange(0, 3):
                self.assertEqual('value{}'.format(5 - i), res[i][1])

            session.execute('TRUNCATE cf')
Пример #26
0
def query_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
    query = SimpleStatement('SELECT c, v FROM cf WHERE key=\'k%s\' AND c >= \'c%06d\' AND c <= \'c%06d\'' % (key, offset, columns_count + offset - 1), consistency_level=consistency)
    res = list(session.execute(query))
    assertions.assert_length_equal(res, columns_count)
    for i in xrange(0, columns_count):
        assert_equal(res[i][1], 'value{}'.format(i + offset))