Exemplo n.º 1
0
    def test_collection_map_ttl(self):
        """
        Test that ttl has a granularity of elements using a map collection.
        """
        self.prepare(default_time_to_live=6)

        self.session1.execute("ALTER TABLE ttl_table ADD mymap map<int, int>;""")
        start = time.time()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, mymap) VALUES (%d, %d, %s);
        """ % (1, 1, '{1:1,2:2,3:3,4:4,5:5}'))
        self.session1.execute("""
            UPDATE ttl_table USING TTL 2 SET mymap[1] = 42, mymap[5] = 42 WHERE key=1;
        """)
        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None, OrderedDict([(1, 42), (2, 2), (3, 3), (4, 4), (5, 42)])]]
        )
        self.smart_sleep(start, 4)
        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None, OrderedDict([(2, 2), (3, 3), (4, 4)])]]
        )
        self.smart_sleep(start, 8)
        assert_row_count(self.session1, 'ttl_table', 0)
    def check_permissions(self, node, upgraded):
        # use an exclusive connection to ensure we only talk to the specified node
        klaus = self.patient_exclusive_cql_connection(node, user='******', password='******', timeout=20)
        # klaus is a superuser, so should be able to list all permissions
        # the output of LIST PERMISSIONS changes slightly with #7653 adding
        # a new role column to results, so we need to tailor our check
        # based on whether the node has been upgraded or not
        if not upgraded:
            assert_all(klaus,
                       'LIST ALL PERMISSIONS',
                       [['michael', '<table ks.cf1>', 'MODIFY'],
                        ['michael', '<table ks.cf2>', 'SELECT']],
                       timeout=60)
        else:
            assert_all(klaus,
                       'LIST ALL PERMISSIONS',
                       [['michael', 'michael', '<table ks.cf1>', 'MODIFY'],
                        ['michael', 'michael', '<table ks.cf2>', 'SELECT']],
                       timeout=60)

        klaus.cluster.shutdown()

        michael = self.patient_exclusive_cql_connection(node, user='******', password='******')
        michael.execute('INSERT INTO ks.cf1 (id, val) VALUES (0,0)')
        michael.execute('SELECT * FROM ks.cf2')
        assert_invalid(michael,
                       'SELECT * FROM ks.cf1',
                       'User michael has no SELECT permission on <table ks.cf1> or any of its parents',
                       Unauthorized)
        michael.cluster.shutdown()
Exemplo n.º 3
0
    def test_ttl_is_replicated(self):
        """
        Test that the ttl setting is replicated properly on all nodes
        """
        self.prepare(default_time_to_live=5)
        session1 = self.patient_exclusive_cql_connection(self.node1)
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session1.execute("USE ks;")
        session2.execute("USE ks;")
        query = SimpleStatement(
            "INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
            consistency_level=ConsistencyLevel.ALL
        )
        session1.execute(query)
        assert_all(
            session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None]],
            cl=ConsistencyLevel.ALL
        )
        ttl_session1 = session1.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')

        # since the two queries are not executed simultaneously, the remaining
        # TTLs can differ by one second
        assert abs(ttl_session1[0][0] - ttl_session2[0][0]) <= 1

        time.sleep(7)

        assert_none(session1, "SELECT * FROM ttl_table;", cl=ConsistencyLevel.ALL)
    def test_transient_full_merge_read(self):
        """ When reading, transient replica should serve a missing read """
        for node in self.nodes:
            self.assert_has_no_sstables(node)

        tm = lambda n: self.table_metrics(n)
        self.insert_row(1, 1, 1)
        # Stop writes to the other full node
        self.node2.byteman_submit(['./byteman/stop_writes.btm'])
        self.insert_row(1, 2, 2)

        self.assert_local_rows(self.node1,
                               [[1, 1, 1],
                                [1, 2, 2]])
        self.assert_local_rows(self.node2,
                               [[1, 1, 1]])
        self.assert_local_rows(self.node3,
                               [[1, 1, 1],
                                [1, 2, 2]])
        self.assert_local_rows(self.node4,
                               [[1, 2, 2]])
        self.assert_local_rows(self.node5,
                               [[1, 2, 2]])
        # Stop reads from the node that will hold the second row
        self.node1.stop()

        # Whether we're reading from the full node or from the transient node, we should get consistent results
        for node in [self.node2, self.node3, self.node4, self.node5]:
            assert_all(self.exclusive_cql_connection(node),
                       "SELECT * FROM %s.%s" % (self.keyspace, self.table),
                       [[1, 1, 1],
                        [1, 2, 2]],
                       cl=ConsistencyLevel.QUORUM)
Exemplo n.º 5
0
    def drop_column_queries_test(self):
        session = self.prepare()

        session.execute("USE ks")
        session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
        session.execute("CREATE INDEX ON cf(c2)")

        # insert some data.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")

        # drop and readd c1.
        session.execute("ALTER TABLE cf DROP c1")
        session.execute("ALTER TABLE cf ADD c1 int")

        # add another row.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")

        # test that old (pre-drop) c1 values aren't returned and new ones are.
        assert_all(session, "SELECT c1 FROM cf", [[None], [None], [None], [4]], ignore_order=True)

        assert_all(session, "SELECT * FROM cf", [[0, None, 2], [1, None, 3], [2, None, 4], [3, 4, 5]], ignore_order=True)

        assert_one(session, "SELECT c1 FROM cf WHERE key = 0", [None])

        assert_one(session, "SELECT c1 FROM cf WHERE key = 3", [4])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 2", [0, None, 2])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 5", [3, 4, 5])
    def test_upgrade_with_range_tombstones(self):
        """
        Checks sstable including range tombstone can be read after upgrade.

        @jira_ticket CASSANDRA-10360
        """
        ROWS = 100

        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k int, t1 int, t2 int, PRIMARY KEY (k, t1, t2))')

        for n in range(ROWS):
            session.execute("INSERT INTO t(k, t1, t2) VALUES (0, 0, {})".format(n))

        session.execute("DELETE FROM t WHERE k=0 AND t1=0")

        for n in range(0, ROWS, 2):
            session.execute("INSERT INTO t(k, t1, t2) VALUES (0, 0, {})".format(n))

        session = self._do_upgrade()

        assert_all(session, "SELECT * FROM t WHERE k = 0", [[0, 0, n] for n in range(0, ROWS, 2)])

        self.cluster.compact()
Exemplo n.º 7
0
    def test_collection_set_ttl(self):
        """
        Test that ttl has a granularity of elements using a set collection.
        """
        self.prepare(default_time_to_live=10)

        self.session1.execute("ALTER TABLE ttl_table ADD myset set<int>;""")
        start = time.time()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, myset) VALUES (%d, %d, %s);
        """ % (1, 1, '{1,2,3,4,5}'))
        self.session1.execute("""
            UPDATE ttl_table USING TTL 3 SET myset = myset + {42} WHERE key=1;
        """)
        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None, sortedset([1, 2, 3, 4, 5, 42])]]
        )
        self.smart_sleep(start, 5)
        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None, sortedset([1, 2, 3, 4, 5])]]
        )
        self.smart_sleep(start, 12)
        assert_row_count(self.session1, 'ttl_table', 0)
Exemplo n.º 8
0
    def test_recover_negative_expiration_date_sstables_with_scrub(self):
        """
        @jira_ticket CASSANDRA-14092
        Check that row with negative overflowed ttl is recovered by offline scrub
        """
        cluster = self.cluster
        if self.cluster.version() >= '4':
            cluster.set_configuration_options(values={'corrupted_tombstone_strategy': 'disabled'})
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node] = cluster.nodelist()

        session = self.patient_cql_connection(node)
        create_ks(session, 'ks', 1)
        session.execute("DROP TABLE IF EXISTS ttl_table;")
        query = """
            CREATE TABLE ttl_table (
                key int primary key,
                col1 int,
                col2 int,
                col3 int,
            )
        """
        session.execute(query)

        version = '2.1' if self.cluster.version() < LooseVersion('3.0') else \
                  ('3.0' if self.cluster.version() < LooseVersion('3.11') else '3.11')

        corrupt_sstable_dir = os.path.join('sstables', 'ttl_test', version)
        table_dir = self.get_table_paths('ttl_table')[0]
        logger.debug("Copying sstables from {} into {}", corrupt_sstable_dir, table_dir)
        dir_util.copy_tree(corrupt_sstable_dir, table_dir)

        logger.debug("Load corrupted sstable")
        node.nodetool('refresh ks ttl_table')
        node.watch_log_for('Loading new SSTables', timeout=10)

        logger.debug("Check that there are no rows present")
        assert_row_count(session, 'ttl_table', 0)

        logger.debug("Shutting down node")
        self.cluster.stop()

        logger.debug("Will run offline scrub on sstable")
        scrubbed_sstables = self.launch_standalone_scrub('ks', 'ttl_table',
                                                         reinsert_overflowed_ttl=True,
                                                         no_validate=True)

        logger.debug("Executed offline scrub on {}", str(scrubbed_sstables))

        logger.debug("Starting node again")
        self.cluster.start(wait_for_binary_proto=True)
        session = self.patient_cql_connection(node)
        session.execute("USE ks;")

        logger.debug("Check that row was recovered")
        assert_all(session, "SELECT * FROM ttl_table;", [[1, 1, None, None]])
Exemplo n.º 9
0
 def logged_batch_accepts_regular_mutations_test(self):
     """ Test that logged batch accepts regular mutations """
     session = self.prepare()
     session.execute("""
         BEGIN BATCH
         INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
         INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')
         APPLY BATCH
     """)
     assert_all(session, "SELECT * FROM users", [[1, u'Will', u'Turner'], [0, u'Jack', u'Sparrow']])
Exemplo n.º 10
0
 def unlogged_batch_accepts_regular_mutations_test(self):
     """ Test that unlogged batch accepts regular mutations """
     session = self.prepare()
     session.execute("""
         BEGIN UNLOGGED BATCH
         INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
         INSERT INTO users (id, firstname, lastname) VALUES (2, 'Elizabeth', 'Swann')
         APPLY BATCH
     """)
     assert_all(session, "SELECT * FROM users", [[0, u'Jack', u'Sparrow'], [2, u'Elizabeth', u'Swann']])
Exemplo n.º 11
0
    def test_reloadlocalschema(self):
        """
        @jira_ticket CASSANDRA-13954

        Test that `nodetool reloadlocalschema` works as intended
        """
        cluster = self.cluster
        cluster.populate(1)
        node = cluster.nodelist()[0]
        remove_perf_disable_shared_mem(node)  # for jmx
        cluster.start()

        session = self.patient_cql_connection(node)

        query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
        session.execute(query)

        query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
        session.execute(query)

        ss = make_mbean('db', type='StorageService')

        schema_version = ''

        # get initial schema version
        with JolokiaAgent(node) as jmx:
            schema_version = jmx.read_attribute(ss, 'SchemaVersion')

        # manually add a regular column 'val' to test.test
        query = """
            INSERT INTO system_schema.columns
                (keyspace_name, table_name, column_name, clustering_order,
                 column_name_bytes, kind, position, type)
            VALUES
                ('test', 'test', 'val', 'none',
                 0x76616c, 'regular', -1, 'int');"""
        session.execute(query)

        # validate that schema version wasn't automatically updated
        with JolokiaAgent(node) as jmx:
            self.assertEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion'))

        # make sure the new column wasn't automagically picked up
        assert_invalid(session, 'INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);')

        # force the node to reload schema from disk
        node.nodetool('reloadlocalschema')

        # validate that schema version changed
        with JolokiaAgent(node) as jmx:
            self.assertNotEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion'))

        # try an insert with the new column again and validate it succeeds this time
        session.execute('INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);')
        assert_all(session, 'SELECT pk, ck, val FROM test.test;', [[0, 1, 2]])
Exemplo n.º 12
0
 def counter_batch_accepts_counter_mutations_test(self):
     """ Test that counter batch accepts counter mutations """
     session = self.prepare()
     session.execute("""
         BEGIN COUNTER BATCH
         UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com'
         UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://bar.com'
         UPDATE clicks SET total = total + 1 WHERE userid = 2 and url = 'http://baz.com'
         APPLY BATCH
     """)
     assert_all(session, "SELECT total FROM clicks", [[1], [1], [1]])
 def check_expected(self, sessions, expected, node=[i for i in range(0,1000)], cleanup=False):
     """Check that each node has the expected values present"""
     for idx, session, expect, node in zip(range(0, 1000), sessions, expected, node):
         print("Checking idx " + str(idx))
         print(str([row for row in session.execute(self.select_statement())]))
         if cleanup:
             node.nodetool('cleanup')
         assert_all(session,
                    self.select(),
                    expect,
                    cl=NODELOCAL)
Exemplo n.º 14
0
 def batch_uses_proper_timestamp_test(self):
     """ Test that each statement will be executed with provided BATCH timestamp """
     session = self.prepare()
     session.execute("""
         BEGIN BATCH USING TIMESTAMP 1111111111111111
         INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
         INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')
         APPLY BATCH
     """)
     query = "SELECT id, writetime(firstname), writetime(lastname) FROM users"
     assert_all(session, query, [[1, 1111111111111111, 1111111111111111], [0, 1111111111111111, 1111111111111111]])
Exemplo n.º 15
0
    def test_update_single_column_ttl(self):
        """ Test that specifying a TTL on a single column works """
        self.prepare()

        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
        """ % (1, 1, 1, 1))
        start = time.time()
        self.session1.execute("UPDATE ttl_table USING TTL 3 set col1=42 where key=%s;" % (1,))
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, 1, 1]])
        self.smart_sleep(start, 5)
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, None, 1, 1]])
Exemplo n.º 16
0
    def test_remove_column_ttl(self):
        """
        Test that removing a column ttl works.
        """
        self.prepare()

        start = time.time()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d) USING TTL 2;
        """ % (1, 1, 1, 1))
        self.session1.execute("UPDATE ttl_table set col1=42 where key=%s;" % (1,))
        self.smart_sleep(start, 4)
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
    def upgrade_with_statics(self, rows):
        """
        Validates we can read legacy sstables with static columns.
        """
        PARTITIONS = 1
        ROWS = rows
        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k int, s1 int static, s2 int static, t int, v1 int, v2 int, PRIMARY KEY (k, t))')

        for n in range(PARTITIONS):
            for r in range(ROWS):
                session.execute("INSERT INTO t(k, s1, s2, t, v1, v2) VALUES ({}, {}, {}, {}, {}, {})".format(n, r, r + 1, r, r, r + 1))

        session = self._do_upgrade()

        for n in range(PARTITIONS):
            assert_all(session,
                       "SELECT * FROM t WHERE k = {}".format(n),
                       [[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS)])
            assert_all(session,
                       "SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
                       [[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS - 1, -1, -1)])

        self.cluster.compact()

        for n in range(PARTITIONS):
            assert_all(session,
                       "SELECT * FROM t WHERE k = {}".format(n),
                       [[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS)])
            assert_all(session,
                       "SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
                       [[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS - 1, -1, -1)])
Exemplo n.º 18
0
    def update_multiple_columns_ttl_test(self):
        """ Test that specifying a TTL on multiple columns works """

        self.prepare()

        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
        """ % (1, 1, 1, 1))
        start = time.time()
        self.session1.execute("""
            UPDATE ttl_table USING TTL 2 set col1=42, col2=42, col3=42 where key=%s;
        """ % (1,))
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, 42, 42]])
        self.smart_sleep(start, 4)
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, None, None, None]])
    def test_custom_speculate(self):
        """ If write can't succeed on full replica, it's written to the transient node instead """
        session = self.exclusive_cql_connection(self.node1)
        session.execute("ALTER TABLE %s.%s WITH speculative_retry = '99.99PERCENTILE';" % (self.keyspace, self.table))
        self.insert_row(1, 1, 1)
        # Stop writes to the other full node
        self.node2.byteman_submit(['./byteman/stop_writes.btm'])
        self.insert_row(1, 2, 2)

        for node in self.nodes:
            assert_all(self.exclusive_cql_connection(node),
                       "SELECT * FROM %s.%s WHERE pk = 1" % (self.keyspace, self.table),
                       [[1, 1, 1],
                        [1, 2, 2]],
                       cl=ConsistencyLevel.QUORUM)
Exemplo n.º 20
0
    def logged_batch_doesnt_throw_uae_test(self):
        """ Test that logged batch DOES NOT throw UAE if there are at least 2 live nodes """
        session = self.prepare(nodes=3)
        self.cluster.nodelist()[-1].stop(wait_other_notice=True)
        query = SimpleStatement("""
            BEGIN BATCH
            INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
            INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')
            APPLY BATCH
        """, consistency_level=ConsistencyLevel.ONE)
        session.execute(query)

        self.cluster.nodelist()[-1].start(wait_for_binary_proto=True, wait_other_notice=True)
        assert_all(session, "SELECT * FROM users", [[1, u'Will', u'Turner'], [0, u'Jack', u'Sparrow']],
                   cl=ConsistencyLevel.ALL)
Exemplo n.º 21
0
    def ttl_is_respected_on_repair_test(self):
        """ Test that ttl is respected on repair """

        self.prepare()
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 1};
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
        """)

        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None], [2, 2, None, None]]
        )
        time.sleep(7)
        self.node1.stop()
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session2.execute("USE ks;")
        assert_unavailable(session2.execute, "SELECT * FROM ttl_table;")
        self.node1.start(wait_for_binary_proto=True)
        self.session1 = self.patient_exclusive_cql_connection(self.node1)
        self.session1.execute("USE ks;")
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 2};
        """)
        self.node1.repair(['ks'])
        ttl_start = time.time()
        ttl_session1 = self.session1.execute('SELECT ttl(col1) FROM ttl_table;')
        self.node1.stop()

        assert_row_count(session2, 'ttl_table', 1)
        assert_all(
            session2,
            "SELECT * FROM ttl_table;",
            [[2, 2, None, None]]
        )

        # Check that the TTL on both server are the same
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session1 = ttl_session1[0][0] - (time.time() - ttl_start)
        assert_almost_equal(ttl_session1, ttl_session2[0][0], error=0.005)
    def _run_test(self, session):
        # Make sure the system_auth table will get replicated to the node that we're going to replace

        session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '2'} ;")
        session.execute("CREATE TABLE test.test (a text PRIMARY KEY, b text, c text);")

        for i in range(1, 6):
            session.execute("INSERT INTO test.test (a, b, c) VALUES ('{}', '{}', '{}');".format(i, i + 1, i + 2))

        assert_all(session,
                   "SELECT * FROM test.test",
                   [[str(i), str(i + 1), str(i + 2)] for i in range(1, 6)], ignore_order=True)

        assert_all(session,
                   "SELECT a,c FROM test.test",
                   [[str(i), str(i + 2)] for i in range(1, 6)], ignore_order=True)
Exemplo n.º 23
0
    def test_set_ttl_to_zero_to_default_ttl(self):
        """
        Test that we can remove the default ttl by setting the ttl explicitly to zero.
        CASSANDRA-11207
        """
        self.prepare(default_time_to_live=2)

        start = time.time()
        self.session1.execute("INSERT INTO ttl_table (key, col1, col2, col3) VALUES ({}, {}, {}, {});".format(1, 1, 1, 1))
        self.session1.execute("INSERT INTO ttl_table (key, col1, col2, col3) VALUES ({}, {}, {}, {});".format(2, 1, 1, 1))
        self.session1.execute("UPDATE ttl_table using ttl 0 set col1=42 where key={};".format(1))
        self.session1.execute("UPDATE ttl_table using ttl 3 set col1=42 where key={};".format(2))
        self.smart_sleep(start, 5)

        # The first row should be deleted, using ttl 0 should fallback to default_time_to_live
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
Exemplo n.º 24
0
    def test_set_ttl_to_zero_to_default_ttl(self):
        """
        Test that we can remove the default ttl by setting the ttl explicitly to zero.
        CASSANDRA-11207
        """
        self.prepare(default_time_to_live=2)

        start = time.time()
        self.session1.execute("INSERT INTO ttl_table (key, col1, col2, col3) VALUES ({}, {}, {}, {});".format(1, 1, 1, 1))
        self.session1.execute("INSERT INTO ttl_table (key, col1, col2, col3) VALUES ({}, {}, {}, {});".format(2, 1, 1, 1))
        self.session1.execute("UPDATE ttl_table using ttl 0 set col1=42 where key={};".format(1))
        self.session1.execute("UPDATE ttl_table using ttl 3 set col1=42 where key={};".format(2))
        self.smart_sleep(start, 5)

        # The first row should be deleted, using ttl 0 should fallback to default_time_to_live
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
Exemplo n.º 25
0
    def _verify_data(self, initial_data, table='keyspace1.standard1', cl=ConsistencyLevel.ONE, limit=10000,
                     restart_nodes=False):
        assert len(initial_data) > 0, "Initial data must be greater than 0"

        # query should work again
        logger.debug("Stopping old nodes")
        for node in self.cluster.nodelist():
            if node.is_running() and node != self.replacement_node:
                logger.debug("Stopping {}".format(node.name))
                node.stop(gently=False, wait_other_notice=True)

        logger.debug("Verifying {} on {} with CL={} and LIMIT={}".format(table, self.replacement_node.address(), cl, limit))
        session = self.patient_exclusive_cql_connection(self.replacement_node)
        assert_all(session, 'select * from {} LIMIT {}'.format(table, limit),
                   expected=initial_data,
                   cl=cl)
Exemplo n.º 26
0
    def test_update_multiple_columns_ttl(self):
        """ Test that specifying a TTL on multiple columns works """
        self.prepare()

        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
        """ % (1, 1, 1, 1))
        start = time.time()
        self.session1.execute("""
            UPDATE ttl_table USING TTL 2 set col1=42, col2=42, col3=42 where key=%s;
        """ % (1, ))
        assert_all(self.session1, "SELECT * FROM ttl_table;",
                   [[1, 42, 42, 42]])
        self.smart_sleep(start, 4)
        assert_all(self.session1, "SELECT * FROM ttl_table;",
                   [[1, None, None, None]])
    def test_14766(self):
        """
        @jira_ticket CASSANDRA-14766

        A reproduction / regression test to illustrate CASSANDRA-14766: when
        reading a legacy 2.1 sstable with SSTableReversedIterator, it's possible
        to skip and not return the last Unfiltered in the last indexed block.

        It would lead to a missing row, if that Unfiltered was a row, or potentially
        resurrected data, if it's a tombstone.
        """
        cluster = self.cluster

        # set column_index_size_in_kb to 1 for a small reproduction sequence
        cluster.set_configuration_options(values={'column_index_size_in_kb': 1})

        # start with 2.1.20 to generate a legacy sstable
        cluster.set_install_dir(version='2.1.20')

        cluster.populate(1).start(wait_other_notice=True)
        node1 = cluster.nodelist()[0]
        session = self.patient_cql_connection(node1)

        query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 1};"
        session.execute(query)

        query = 'CREATE TABLE test.test (pk int, ck int, value text, PRIMARY KEY (pk, ck));'
        session.execute(query)

        # insert 4 rows to fill 2 index blocks and flush the 2.1 sstable
        stmt = session.prepare('INSERT INTO test.test (pk, ck, value) VALUES (0, ?, ?);');
        for i in range(0, 4):
            session.execute(stmt, [i, '0' * 512])
        cluster.flush()

        # stop, upgrade to current version (3.0 or 3.11), start up
        node1.stop(wait_other_notice=True)
        self.set_node_to_current_version(node1)
        node1.start(wait_other_notice=True)
        session = self.patient_cql_connection(node1)

        # make sure all 4 rows are there when reading backwards
        # prior to the fix, this would return 3 rows (ck = 2, 1, 0), skipping ck = 3
        assert_all(session,
                   "SELECT ck FROM test.test WHERE pk = 0 ORDER BY ck DESC;",
                   [[3], [2], [1], [0]],
                   cl=ConsistencyLevel.ONE)
Exemplo n.º 28
0
    def test_ttl_is_respected_on_repair(self):
        """ Test that ttl is respected on repair """
        self.prepare()
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 1};
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
        """)

        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None], [2, 2, None, None]]
        )
        time.sleep(7)
        self.node1.stop()
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session2.execute("USE ks;")
        assert_unavailable(session2.execute, "SELECT * FROM ttl_table;")
        self.node1.start(wait_for_binary_proto=True)
        self.session1 = self.patient_exclusive_cql_connection(self.node1)
        self.session1.execute("USE ks;")
        self.session1.execute("""
            ALTER KEYSPACE ks WITH REPLICATION =
            {'class' : 'SimpleStrategy', 'replication_factor' : 2};
        """)
        self.node1.repair(['ks'])
        ttl_start = time.time()
        ttl_session1 = self.session1.execute('SELECT ttl(col1) FROM ttl_table;')
        self.node1.stop()

        assert_row_count(session2, 'ttl_table', 1)
        assert_all(
            session2,
            "SELECT * FROM ttl_table;",
            [[2, 2, None, None]]
        )

        # Check that the TTL on both server are the same
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session1 = ttl_session1[0][0] - (time.time() - ttl_start)
        assert_almost_equal(ttl_session1, ttl_session2[0][0], error=0.005)
Exemplo n.º 29
0
    def update_column_ttl_with_default_ttl_test2(self):
        """
        Test that specifying a column ttl works when a default ttl is set.
        This test specify a higher column ttl than the default ttl.
        """

        self.prepare(default_time_to_live=2)

        start = time.time()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
        """ % (1, 1, 1, 1))
        self.session1.execute("UPDATE ttl_table USING TTL 6 set col1=42 where key=%s;" % (1,))
        self.smart_sleep(start, 4)
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
        self.smart_sleep(start, 8)
        assert_row_count(self.session1, 'ttl_table', 0)
Exemplo n.º 30
0
    def test_logged_batch_doesnt_throw_uae(self):
        """ Test that logged batch DOES NOT throw UAE if there are at least 2 live nodes """
        session = self.prepare(nodes=3)
        self.cluster.nodelist()[-1].stop(wait_other_notice=True)
        query = SimpleStatement("""
            BEGIN BATCH
            INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
            INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')
            APPLY BATCH
        """,
                                consistency_level=ConsistencyLevel.ONE)
        session.execute(query)

        self.cluster.nodelist()[-1].start(wait_for_binary_proto=True)
        assert_all(session,
                   "SELECT * FROM users",
                   [[1, 'Will', 'Turner'], [0, 'Jack', 'Sparrow']],
                   cl=ConsistencyLevel.ALL)
Exemplo n.º 31
0
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{'execute.side_effect': Unavailable("Dummy Unavailabile message.")})
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.")})
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{'execute.side_effect': Unauthorized("Dummy Unauthorized message.")})
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)])
        assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
Exemplo n.º 32
0
    def test_collection_list_ttl(self):
        """
        Test that ttl has a granularity of elements using a list collection.
        """
        self.prepare(default_time_to_live=10)

        self.session1.execute("ALTER TABLE ttl_table ADD mylist list<int>;""")
        start = time.time()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, mylist) VALUES (%d, %d, %s);
        """ % (1, 1, [1, 2, 3, 4, 5]))
        self.session1.execute("""
            UPDATE ttl_table USING TTL 5 SET mylist[0] = 42, mylist[4] = 42 WHERE key=1;
        """)
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 1, None, None, [42, 2, 3, 4, 42]]])
        self.smart_sleep(start, 7)
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 1, None, None, [2, 3, 4]]])
        self.smart_sleep(start, 12)
        assert_row_count(self.session1, 'ttl_table', 0)
Exemplo n.º 33
0
    def test_remove_column_ttl_with_default_ttl(self):
        """
        Test that we cannot remove a column ttl when a default ttl is set.
        """
        self.prepare(default_time_to_live=2)

        start = time.time()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
        """ % (1, 1, 1, 1))
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
        """ % (2, 1, 1, 1))
        self.session1.execute("UPDATE ttl_table using ttl 0 set col1=42 where key=%s;" % (1,))
        self.session1.execute("UPDATE ttl_table using ttl 8 set col1=42 where key=%s;" % (2,))
        self.smart_sleep(start, 5)
        # The first row should be deleted, using ttl 0 should fallback to default_time_to_live
        assert_all(self.session1, "SELECT * FROM ttl_table;", [[2, 42, None, None]])
        self.smart_sleep(start, 10)
        assert_row_count(self.session1, 'ttl_table', 0)
Exemplo n.º 34
0
    def _test_transient_full_merge_read_with_delete(self, coordinator):
        """ When reading, transient replica should serve a missing read """
        for node in self.nodes:
            self.assert_has_no_sstables(node)

        tm = lambda n: self.table_metrics(n)
        self.insert_row(1, 1, 1)
        self.insert_row(1, 2, 2)
        # Stop writes to the other full node
        self.node2.byteman_submit(['./byteman/stop_writes.btm'])
        self.delete_row(1, 2)

        self.assert_local_rows(self.node3, [])
        # Stop reads from the node that will hold the second row
        self.node1.stop()

        assert_all(self.exclusive_cql_connection(coordinator),
                   "SELECT * FROM %s.%s" % (self.keyspace, self.table),
                   [[1, 1, 1]],
                   cl=ConsistencyLevel.QUORUM)
Exemplo n.º 35
0
    def test_transient_full_merge_read(self):
        """ When reading, transient replica should serve a missing read """
        for node in self.nodes:
            self.assert_has_no_sstables(node)

        tm = lambda n: self.table_metrics(n)
        self.insert_row(1, 1, 1)
        # Stop writes to the other full node
        self.node2.byteman_submit(['./byteman/stop_writes.btm'])
        self.insert_row(1, 2, 2)

        # Stop reads from the node that will hold the second row
        self.node1.stop()

        # Whether we're reading from the full node or from the transient node, we should get consistent results
        for node in [self.node2, self.node3]:
            assert_all(self.exclusive_cql_connection(node),
                       "SELECT * FROM %s.%s" % (self.keyspace, self.table),
                       [[1, 1, 1], [1, 2, 2]],
                       cl=ConsistencyLevel.QUORUM)
    def _test_transient_full_merge_read_with_delete(self, coordinator):
        """ When reading, transient replica should serve a missing read """
        for node in self.nodes:
            self.assert_has_no_sstables(node)

        tm = lambda n: self.table_metrics(n)
        self.insert_row(1, 1, 1)
        self.insert_row(1, 2, 2)
        # Stop writes to the other full node
        self.node2.byteman_submit(['./byteman/stop_writes.btm'])
        self.delete_row(1, 2)

        self.assert_local_rows(self.node3,
                               [])
        # Stop reads from the node that will hold the second row
        self.node1.stop()

        assert_all(self.exclusive_cql_connection(coordinator),
                   "SELECT * FROM %s.%s" % (self.keyspace, self.table),
                   [[1, 1, 1]],
                   cl=ConsistencyLevel.QUORUM)
    def _verify_data(self,
                     initial_data,
                     table='keyspace1.standard1',
                     cl=ConsistencyLevel.ONE,
                     limit=10000,
                     restart_nodes=False):
        assert len(initial_data) > 0, "Initial data must be greater than 0"

        # query should work again
        logger.debug("Stopping old nodes")
        for node in self.cluster.nodelist():
            if node.is_running() and node != self.replacement_node:
                logger.debug("Stopping {}".format(node.name))
                node.stop(gently=False, wait_other_notice=True)

        logger.debug("Verifying {} on {} with CL={} and LIMIT={}".format(
            table, self.replacement_node.address(), cl, limit))
        session = self.patient_exclusive_cql_connection(self.replacement_node)
        assert_all(session,
                   'select * from {} LIMIT {}'.format(table, limit),
                   expected=initial_data,
                   cl=cl)
    def _run_test(self, session):
        # Make sure the system_auth table will get replicated to the node that we're going to replace

        session.execute(
            "CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '2'} ;"
        )
        session.execute(
            "CREATE TABLE test.test (a text PRIMARY KEY, b text, c text);")

        for i in range(1, 6):
            session.execute(
                "INSERT INTO test.test (a, b, c) VALUES ('{}', '{}', '{}');".
                format(i, i + 1, i + 2))

        assert_all(session,
                   "SELECT * FROM test.test",
                   [[str(i), str(i + 1), str(i + 2)] for i in range(1, 6)],
                   ignore_order=True)

        assert_all(session,
                   "SELECT a,c FROM test.test",
                   [[str(i), str(i + 2)] for i in range(1, 6)],
                   ignore_order=True)
Exemplo n.º 39
0
    def test_collection_set_ttl(self):
        """
        Test that ttl has a granularity of elements using a set collection.
        """
        self.prepare(default_time_to_live=10)

        self.session1.execute("ALTER TABLE ttl_table ADD myset set<int>;" "")
        start = time.time()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1, myset) VALUES (%d, %d, %s);
        """ % (1, 1, '{1,2,3,4,5}'))
        self.session1.execute("""
            UPDATE ttl_table USING TTL 3 SET myset = myset + {42} WHERE key=1;
        """)
        assert_all(self.session1, "SELECT * FROM ttl_table;",
                   [[1, 1, None, None,
                     sortedset([1, 2, 3, 4, 5, 42])]])
        self.smart_sleep(start, 5)
        assert_all(
            self.session1, "SELECT * FROM ttl_table;",
            [[1, 1, None, None, sortedset([1, 2, 3, 4, 5])]])
        self.smart_sleep(start, 12)
        assert_row_count(self.session1, 'ttl_table', 0)
Exemplo n.º 40
0
    def drop_column_queries_test(self):
        session = self.prepare()

        session.execute("USE ks")
        session.execute(
            "CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
        session.execute("CREATE INDEX ON cf(c2)")

        # insert some data.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")

        # drop and readd c1.
        session.execute("ALTER TABLE cf DROP c1")
        session.execute("ALTER TABLE cf ADD c1 int")

        # add another row.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")

        # test that old (pre-drop) c1 values aren't returned and new ones are.
        assert_all(session,
                   "SELECT c1 FROM cf", [[None], [None], [None], [4]],
                   ignore_order=True)

        assert_all(session,
                   "SELECT * FROM cf",
                   [[0, None, 2], [1, None, 3], [2, None, 4], [3, 4, 5]],
                   ignore_order=True)

        assert_one(session, "SELECT c1 FROM cf WHERE key = 0", [None])

        assert_one(session, "SELECT c1 FROM cf WHERE key = 3", [4])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 2", [0, None, 2])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 5", [3, 4, 5])
Exemplo n.º 41
0
    def upgrade_with_statics_test(self):
        """
        Validates we can read legacy sstables with static columns.
        """
        PARTITIONS = 1
        ROWS = 10

        session = self._setup_cluster()

        session.execute(
            'CREATE TABLE t (k int, s1 int static, s2 int static, t int, v1 int, v2 int, PRIMARY KEY (k, t))'
        )

        for n in range(PARTITIONS):
            for r in range(ROWS):
                session.execute(
                    "INSERT INTO t(k, s1, s2, t, v1, v2) VALUES ({}, {}, {}, {}, {}, {})"
                    .format(n, r, r + 1, r, r, r + 1))

        session = self._do_upgrade()

        for n in range(PARTITIONS):
            assert_all(session, "SELECT * FROM t WHERE k = {}".format(n),
                       [[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
                [[n, v, ROWS - 1, ROWS, v, v + 1]
                 for v in range(ROWS - 1, -1, -1)])

        self.cluster.compact()

        for n in range(PARTITIONS):
            assert_all(session, "SELECT * FROM t WHERE k = {}".format(n),
                       [[n, v, ROWS - 1, ROWS, v, v + 1] for v in range(ROWS)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
                [[n, v, ROWS - 1, ROWS, v, v + 1]
                 for v in range(ROWS - 1, -1, -1)])
    def upgrade_with_index_test(self):
        """
        Checks a simple index can still be read after upgrade.
        """
        PARTITIONS = 2
        ROWS = 4

        session = self._setup_cluster()

        session.execute(
            'CREATE TABLE t (k int, t int, v1 int, v2 int, PRIMARY KEY (k, t))'
        )

        session.execute('CREATE INDEX ON t(v1)')

        for p in range(PARTITIONS):
            for r in range(ROWS):
                session.execute(
                    "INSERT INTO t(k, t, v1, v2) VALUES ({}, {}, {}, {})".
                    format(p, r, r % 2, r * 2))

        self.cluster.flush()

        assert_all(session,
                   "SELECT * FROM t WHERE v1 = 0",
                   [[p, r, 0, r * 2] for p in range(PARTITIONS)
                    for r in range(ROWS) if r % 2 == 0],
                   ignore_order=True)

        session = self._do_upgrade()

        assert_all(session,
                   "SELECT * FROM t WHERE v1 = 0",
                   [[p, r, 0, r * 2] for p in range(PARTITIONS)
                    for r in range(ROWS) if r % 2 == 0],
                   ignore_order=True)

        self.cluster.compact()

        assert_all(session,
                   "SELECT * FROM t WHERE v1 = 0",
                   [[p, r, 0, r * 2] for p in range(PARTITIONS)
                    for r in range(ROWS) if r % 2 == 0],
                   ignore_order=True)
Exemplo n.º 43
0
    def test_ttl_is_respected_on_delayed_replication(self):
        """ Test that ttl is respected on delayed replication """
        self.prepare()
        self.node2.stop()
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
        """)
        self.session1.execute("""
            INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
        """)
        assert_all(
            self.session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None], [2, 2, None, None]]
        )
        time.sleep(7)
        self.node1.stop()
        self.node2.start(wait_for_binary_proto=True)
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session2.execute("USE ks;")
        assert_row_count(session2, 'ttl_table', 0)  # should be 0 since node1 is down, no replica yet
        self.node1.start(wait_for_binary_proto=True)
        self.session1 = self.patient_exclusive_cql_connection(self.node1)
        self.session1.execute("USE ks;")
        self.node1.cleanup()

        assert_all(session2, "SELECT count(*) FROM ttl_table", [[1]], cl=ConsistencyLevel.ALL)
        assert_all(
            session2,
            "SELECT * FROM ttl_table;",
            [[2, 2, None, None]],
            cl=ConsistencyLevel.ALL
        )

        # Check that the TTL on both server are the same
        ttl_1 = self.session1.execute('SELECT ttl(col1) FROM ttl_table;')[0][0]
        ttl_2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')[0][0]

        logger.debug("ttl_1 is {}:".format(ttl_1))
        logger.debug("ttl_2 is {}:".format(ttl_2))
        assert abs(ttl_1 - ttl_2) <= 1
Exemplo n.º 44
0
    def drop_static_column_and_restart_test(self):
        """
        Dropping a static column caused an sstable corrupt exception after restarting, here
        we test that we can drop a static column and restart safely.

        @jira_ticket CASSANDRA-12582
        """
        session = self.prepare()

        session.execute("USE ks")
        session.execute(
            "CREATE TABLE ts (id1 int, id2 int, id3 int static, val text, PRIMARY KEY (id1, id2))"
        )

        session.execute(
            "INSERT INTO ts (id1, id2, id3, val) VALUES (1, 1, 0, 'v1')")
        session.execute(
            "INSERT INTO ts (id1, id2, id3, val) VALUES (1, 2, 0, 'v2')")
        session.execute(
            "INSERT INTO ts (id1, id2, id3, val) VALUES (2, 1, 1, 'v3')")

        self.cluster.nodelist()[0].nodetool('flush ks ts')
        assert_all(session, "SELECT * FROM ts",
                   [[1, 1, 0, 'v1'], [1, 2, 0, 'v2'], [2, 1, 1, 'v3']])

        session.execute("alter table ts drop id3")
        assert_all(session, "SELECT * FROM ts",
                   [[1, 1, 'v1'], [1, 2, 'v2'], [2, 1, 'v3']])

        self.cluster.stop()
        self.cluster.start()

        session = self.patient_cql_connection(self.cluster.nodelist()[0])

        session.execute("USE ks")
        assert_all(session, "SELECT * FROM ts",
                   [[1, 1, 'v1'], [1, 2, 'v2'], [2, 1, 'v3']])
    def sstableloader_with_failing_2i_test(self):
        """
        @jira_ticket CASSANDRA-10130

        Simulates an index building failure during SSTables load.
        The table data should be loaded and the index should be marked for rebuilding during the next node start.
        """
        def create_schema_with_2i(session):
            create_ks(session, 'k', 1)
            session.execute(
                "CREATE TABLE k.t (p int, c int, v int, PRIMARY KEY(p, c))")
            session.execute("CREATE INDEX idx ON k.t(v)")

        cluster = self.cluster
        cluster.populate(
            1, install_byteman=True).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 1, 8)")

        # Stop node and copy SSTables
        node.nodetool('drain')
        node.stop()
        self.copy_sstables(cluster, node)

        # Wipe out data and restart
        cluster.clear()
        cluster.start()

        # Restore the schema
        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)

        # The table should exist and be empty, and the index should be empty and marked as built
        assert_one(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""",
            ['k', 'idx', None])
        assert_none(session, "SELECT * FROM k.t")
        assert_none(session, "SELECT * FROM k.t WHERE v = 8")

        # Add some additional data before loading the SSTable, to check that it will be still accessible
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 2, 8)")
        assert_one(session, "SELECT * FROM k.t", [0, 2, 8])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Load SSTables with a failure during index creation
        node.byteman_submit(['./byteman/index_build_failure.btm'])
        with self.assertRaises(Exception):
            self.load_sstables(cluster, node, 'k')

        # Check that the index isn't marked as built and the old SSTable data has been loaded but not indexed
        assert_none(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Restart the node to trigger index rebuild
        node.nodetool('drain')
        node.stop()
        cluster.start()
        session = self.patient_cql_connection(node)

        # Check that the index is marked as built and the index has been rebuilt
        assert_one(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""",
            ['k', 'idx', None])
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_all(session, "SELECT * FROM k.t WHERE v = 8",
                   [[0, 1, 8], [0, 2, 8]])
Exemplo n.º 46
0
    def test_size_estimates_multidc(self):
        """
        Test that primary ranges are correctly generated on
        system.size_estimates for multi-dc, multi-ks scenario
        @jira_ticket CASSANDRA-9639
        """
        logger.debug("Creating cluster")
        cluster = self.cluster
        cluster.set_configuration_options(values={'num_tokens': 2})
        cluster.populate([2, 1])
        node1_1, node1_2, node2_1 = cluster.nodelist()

        logger.debug("Setting tokens")
        node1_tokens, node2_tokens, node3_tokens = ['-6639341390736545756,-2688160409776496397',
                                                    '-2506475074448728501,8473270337963525440',
                                                    '-3736333188524231709,8673615181726552074']
        node1_1.set_configuration_options(values={'initial_token': node1_tokens})
        node1_2.set_configuration_options(values={'initial_token': node2_tokens})
        node2_1.set_configuration_options(values={'initial_token': node3_tokens})
        cluster.set_configuration_options(values={'num_tokens': 2})

        logger.debug("Starting cluster")
        cluster.start()

        out, _, _ = node1_1.nodetool('ring')
        logger.debug("Nodetool ring output {}".format(out))

        logger.debug("Creating keyspaces")
        session = self.patient_cql_connection(node1_1)
        create_ks(session, 'ks1', 3)
        create_ks(session, 'ks2', {'dc1': 2})
        create_cf(session, 'ks1.cf1', columns={'c1': 'text', 'c2': 'text'})
        create_cf(session, 'ks2.cf2', columns={'c1': 'text', 'c2': 'text'})

        logger.debug("Refreshing size estimates")
        node1_1.nodetool('refreshsizeestimates')
        node1_2.nodetool('refreshsizeestimates')
        node2_1.nodetool('refreshsizeestimates')

        """
        CREATE KEYSPACE ks1 WITH replication =
            {'class': 'SimpleStrategy', 'replication_factor': '3'}
        CREATE KEYSPACE ks2 WITH replication =
            {'class': 'NetworkTopologyStrategy', 'dc1': '2'}  AND durable_writes = true;

        Datacenter: dc1
        ==========
        Address     Token
                    8473270337963525440
        127.0.0.1   -6639341390736545756
        127.0.0.1   -2688160409776496397
        127.0.0.2   -2506475074448728501
        127.0.0.2   8473270337963525440

        Datacenter: dc2
        ==========
        Address     Token
                    8673615181726552074
        127.0.0.3   -3736333188524231709
        127.0.0.3   8673615181726552074
        """

        logger.debug("Checking node1_1 size_estimates primary ranges")
        session = self.patient_exclusive_cql_connection(node1_1)
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks1'", [['-3736333188524231709', '-2688160409776496397'],
                                                            ['-9223372036854775808', '-6639341390736545756'],
                                                            ['8673615181726552074', '-9223372036854775808']])
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks2'", [['-3736333188524231709', '-2688160409776496397'],
                                                            ['-6639341390736545756', '-3736333188524231709'],
                                                            ['-9223372036854775808', '-6639341390736545756'],
                                                            ['8473270337963525440', '8673615181726552074'],
                                                            ['8673615181726552074', '-9223372036854775808']])

        logger.debug("Checking node1_2 size_estimates primary ranges")
        session = self.patient_exclusive_cql_connection(node1_2)
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks1'", [['-2506475074448728501', '8473270337963525440'],
                                                            ['-2688160409776496397', '-2506475074448728501']])
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks2'", [['-2506475074448728501', '8473270337963525440'],
                                                            ['-2688160409776496397', '-2506475074448728501']])

        logger.debug("Checking node2_1 size_estimates primary ranges")
        session = self.patient_exclusive_cql_connection(node2_1)
        assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
                            "WHERE keyspace_name = 'ks1'", [['-6639341390736545756', '-3736333188524231709'],
                                                            ['8473270337963525440', '8673615181726552074']])
        assert_none(session, "SELECT range_start, range_end FROM system.size_estimates "
                             "WHERE keyspace_name = 'ks2'")
    def test_replicated_system_keyspaces(self):
        cluster = self.cluster
        cluster.populate(1).start()

        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        # ALTER KEYSPACE should work for system_auth, system_distributed, and system_traces
        stmt = """
            ALTER KEYSPACE system_auth
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            ALTER KEYSPACE system_distributed
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            ALTER KEYSPACE system_traces
            WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1' : '1'};"""
        assert_none(session, stmt)

        stmt = """
            SELECT replication
            FROM system_schema.keyspaces
            WHERE keyspace_name IN ('system_auth', 'system_distributed', 'system_traces');"""
        replication = {
            'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy',
            'datacenter1': '1'
        }
        assert_all(session, stmt,
                   [[replication], [replication], [replication]])

        # DROP KEYSPACE should fail for system_auth, system_distributed, and system_traces
        assert_exception(session,
                         'DROP KEYSPACE system_auth;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP KEYSPACE system_distributed;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP KEYSPACE system_traces;',
                         expected=Unauthorized)

        # CREATE TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(
            session,
            'CREATE TABLE system_auth.new_table (id int PRIMARY KEY);',
            expected=Unauthorized)

        assert_exception(
            session,
            'CREATE TABLE system_distributed.new_table (id int PRIMARY KEY);',
            expected=Unauthorized)

        assert_exception(
            session,
            'CREATE TABLE system_traces.new_table (id int PRIMARY KEY);',
            expected=Unauthorized)

        # ALTER TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(session,
                         "ALTER TABLE system_auth.roles WITH comment = '';",
                         expected=Unauthorized)

        assert_exception(
            session,
            "ALTER TABLE system_distributed.repair_history WITH comment = '';",
            expected=Unauthorized)

        assert_exception(
            session,
            "ALTER TABLE system_traces.sessions WITH comment = '';",
            expected=Unauthorized)

        # DROP TABLE should fail in system_auth, system_distributed, and system_traces
        assert_exception(session,
                         'DROP TABLE system_auth.roles;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP TABLE system_distributed.repair_history;',
                         expected=Unauthorized)
        assert_exception(session,
                         'DROP TABLE system_traces.sessions;',
                         expected=Unauthorized)
Exemplo n.º 48
0
    def test_bootstrap_and_cleanup(self):
        """Test bootstrapping a new node across a mix of repaired and unrepaired data"""
        main_session = self.patient_cql_connection(self.node1)
        nodes = [self.node1, self.node2, self.node3]

        for i in range(0, 40, 2):
            self.insert_row(i, i, i, main_session)

        sessions = [
            self.exclusive_cql_connection(node)
            for node in [self.node1, self.node2, self.node3]
        ]

        expected = [
            gen_expected(range(0, 11, 2), range(22, 40, 2)),
            gen_expected(range(0, 22, 2), range(32, 40, 2)),
            gen_expected(range(12, 31, 2))
        ]
        self.check_expected(sessions, expected)

        # Make sure at least a little data is repaired, this shouldn't move data anywhere
        repair_nodes(nodes)

        self.check_expected(sessions, expected)

        # Ensure that there is at least some transient data around, because of this if it's missing after bootstrap
        # We know we failed to get it from the transient replica losing the range entirely
        nodes[1].stop(wait_other_notice=True)

        for i in range(1, 40, 2):
            self.insert_row(i, i, i, main_session)

        nodes[1].start(wait_for_binary_proto=True)

        sessions = [
            self.exclusive_cql_connection(node)
            for node in [self.node1, self.node2, self.node3]
        ]

        expected = [
            gen_expected(range(0, 11), range(11, 20, 2), range(21, 40)),
            gen_expected(range(0, 21, 2), range(32, 40, 2)),
            gen_expected(range(1, 11, 2), range(11, 31), range(31, 40, 2))
        ]

        # Every node should have some of its fully replicated data and one and two should have some transient data
        self.check_expected(sessions, expected)

        node4 = new_node(self.cluster, bootstrap=True, token='00040')
        patch_start(node4)
        nodes.append(node4)
        node4.start(wait_for_binary_proto=True)

        expected.append(gen_expected(range(11, 20, 2), range(21, 40)))
        sessions.append(self.exclusive_cql_connection(node4))

        # Because repair was never run and nodes had transient data it will have data for transient ranges (node1, 11-20)
        assert_all(sessions[3], self.select(), expected[3], cl=NODELOCAL)

        # Node1 no longer transiently replicates 11-20, so cleanup will clean it up
        # Node1 also now transiently replicates 21-30 and half the values in that range were repaired
        expected[0] = gen_expected(range(0, 11), range(21, 30, 2),
                                   range(31, 40))
        # Node2 still missing data since it was down during some insertions, it also lost some range (31-40)
        expected[1] = gen_expected(range(0, 21, 2))
        expected[2] = gen_expected(range(1, 11, 2), range(11, 31))

        # Cleanup should only impact if a node lost a range entirely or started to transiently replicate it and the data
        # was repaired
        self.check_expected(sessions, expected, nodes, cleanup=True)

        repair_nodes(nodes)

        expected = [
            gen_expected(range(0, 11), range(31, 40)),
            gen_expected(range(0, 21)),
            gen_expected(range(11, 31)),
            gen_expected(range(21, 40))
        ]

        self.check_expected(sessions, expected, nodes, cleanup=True)

        # Every value should be replicated exactly 2 times
        self.check_replication(sessions, exactly=2)
    def upgrade_with_clustered_table(self, compact_storage=False):
        PARTITIONS = 2
        ROWS = 1000

        session = self._setup_cluster()

        session.execute(
            'CREATE TABLE t (k int, t int, v int, PRIMARY KEY (k, t))' +
            (' WITH COMPACT STORAGE' if compact_storage else ''))

        for n in range(PARTITIONS):
            for r in range(ROWS):
                session.execute(
                    "INSERT INTO t(k, t, v) VALUES ({n}, {r}, {r})".format(
                        n=n, r=r))

        session = self._do_upgrade()

        for n in range(PARTITIONS):
            assert_all(session, "SELECT * FROM t WHERE k = {}".format(n),
                       [[n, v, v] for v in range(ROWS)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
                [[n, v, v] for v in range(ROWS - 1, -1, -1)])

            # Querying a "large" slice
            start = ROWS / 10
            end = ROWS - 1 - (ROWS / 10)
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".
                format(n=n, start=start,
                       end=end), [[n, v, v] for v in range(start, end)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC"
                .format(n=n, start=start, end=end),
                [[n, v, v] for v in range(end - 1, start - 1, -1)])

            # Querying a "small" slice
            start = ROWS / 2
            end = ROWS / 2 + 5
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".
                format(n=n, start=start,
                       end=end), [[n, v, v] for v in range(start, end)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC"
                .format(n=n, start=start, end=end),
                [[n, v, v] for v in range(end - 1, start - 1, -1)])

        self.cluster.compact()

        for n in range(PARTITIONS):
            assert_all(session, "SELECT * FROM t WHERE k = {}".format(n),
                       [[n, v, v] for v in range(ROWS)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {} ORDER BY t DESC".format(n),
                [[n, v, v] for v in range(ROWS - 1, -1, -1)])

            # Querying a "large" slice
            start = ROWS / 10
            end = ROWS - 1 - (ROWS / 10)
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".
                format(n=n, start=start,
                       end=end), [[n, v, v] for v in range(start, end)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC"
                .format(n=n, start=start, end=end),
                [[n, v, v] for v in range(end - 1, start - 1, -1)])

            # Querying a "small" slice
            start = ROWS / 2
            end = ROWS / 2 + 5
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end}".
                format(n=n, start=start,
                       end=end), [[n, v, v] for v in range(start, end)])
            assert_all(
                session,
                "SELECT * FROM t WHERE k = {n} AND t >= {start} AND t < {end} ORDER BY t DESC"
                .format(n=n, start=start, end=end),
                [[n, v, v] for v in range(end - 1, start - 1, -1)])
 def _validate_entries(connection):
     logger.debug("Expecting entries %s", expected_entries)
     assert_all(connection, "SELECT key, c2 FROM ti WHERE c1='v1'",
                [[key, c2] for [key, _, c2] in expected_entries],
                ignore_order=True, cl=ConsistencyLevel.QUORUM)
Exemplo n.º 51
0
 def assert_local_rows(self, node, rows, ignore_order=False):
     assert_all(self.exclusive_cql_connection(node),
                "SELECT * FROM %s.%s" % (self.keyspace, self.table),
                rows,
                cl=NODELOCAL,
                ignore_order=ignore_order)
Exemplo n.º 52
0
 def verify_with_cql(self, session):
     session.execute("USE supcols")
     expected = [[name, 'attr', 'name', name] for name in
                 ['Grace', 'Claire', 'Dave', 'Frank', 'Ed', 'Bob', 'Alice']]
     assert_all(session, "SELECT * FROM cols", expected)