Ejemplo n.º 1
0
    def short_read_quorum_delete_test(self):
        """Test CASSANDRA-8933"""
        cluster = self.cluster
        #Consider however 3 nodes A, B, C (RF=3), and following sequence of operations (all done at QUORUM):

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(values={ 'hinted_handoff_enabled' : False}, batch_commitlog=True)

        cluster.populate(3).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)

        session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY(id, v)) WITH read_repair_chance = 0.0")
        # we write 1 and 2 in a partition: all nodes get it.
        session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 1)", consistency_level=ConsistencyLevel.ALL))
        session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 2)", consistency_level=ConsistencyLevel.ALL))

        # we delete 1: only A and C get it.
        node2.flush()
        node2.stop(wait_other_notice=True)
        session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 1", consistency_level=ConsistencyLevel.QUORUM))
        node2.start(wait_other_notice=True)

        # we delete 2: only B and C get it.
        node1.flush()
        node1.stop(wait_other_notice=True)
        session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 2", consistency_level=ConsistencyLevel.QUORUM))
        node1.start(wait_other_notice=True)

        # we read the first row in the partition (so with a LIMIT 1) and A and B answer first.
        node3.stop()
        assert_none(session, "SELECT * FROM t WHERE id = 0 LIMIT 1", cl=ConsistencyLevel.QUORUM)
Ejemplo n.º 2
0
    def ttl_is_replicated_test(self):
        """
        Test that the ttl setting is replicated properly on all nodes
        """

        self.prepare(default_time_to_live=5)
        session1 = self.patient_exclusive_cql_connection(self.node1)
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session1.execute("USE ks;")
        session2.execute("USE ks;")
        query = SimpleStatement(
            "INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
            consistency_level=ConsistencyLevel.ALL
        )
        session1.execute(query)
        assert_all(
            session1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None]],
            cl=ConsistencyLevel.ALL
        )
        ttl_session1 = session1.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')

        # since the two queries are not executed simultaneously, the remaining
        # TTLs can differ by one second
        self.assertLessEqual(abs(ttl_session1[0][0] - ttl_session2[0][0]), 1)

        time.sleep(7)

        assert_none(session1, "SELECT * FROM ttl_table;", cl=ConsistencyLevel.ALL)
Ejemplo n.º 3
0
    def short_read_delete_test(self):
        """ Test short reads ultimately leaving no columns alive [#4000] """
        cluster = self.cluster

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(2).start(wait_other_notice=True)
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)
        self.create_cf(session, 'cf', read_repair=0.0)
        # insert 2 columns in one row
        insert_columns(self, session, 0, 2)

        # Delete the row while first node is dead
        node1.flush()
        node1.stop(wait_other_notice=True)
        session = self.patient_cql_connection(node2, 'ks')

        query = SimpleStatement('DELETE FROM cf WHERE key=\'k0\'', consistency_level=ConsistencyLevel.ONE)
        session.execute(query)

        node1.start(wait_other_notice=True)

        # Query first column
        session = self.patient_cql_connection(node1, 'ks')

        assert_none(session, "SELECT c, v FROM cf WHERE key=\'k0\' LIMIT 1", cl=ConsistencyLevel.QUORUM)
Ejemplo n.º 4
0
    def ttl_is_replicated_test(self):
        """
        Test that the ttl setting is replicated properly on all nodes
        """

        self.prepare(default_time_to_live=5)
        session1 = self.patient_exclusive_cql_connection(self.node1)
        session2 = self.patient_exclusive_cql_connection(self.node2)
        session1.execute("USE ks;")
        session2.execute("USE ks;")
        query = SimpleStatement(
            "INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
            consistency_level=ConsistencyLevel.ALL)
        session1.execute(query)
        assert_all(session1,
                   "SELECT * FROM ttl_table;", [[1, 1, None, None]],
                   cl=ConsistencyLevel.ALL)
        ttl_session1 = session1.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_session2 = session2.execute('SELECT ttl(col1) FROM ttl_table;')

        # since the two queries are not executed simultaneously, the remaining
        # TTLs can differ by one second
        self.assertLessEqual(abs(ttl_session1[0][0] - ttl_session2[0][0]), 1)

        time.sleep(7)

        assert_none(session1,
                    "SELECT * FROM ttl_table;",
                    cl=ConsistencyLevel.ALL)
Ejemplo n.º 5
0
    def statements_test(self):
        """
        Smoke test SELECT and UPDATE statements:

        - create a table
        - insert 20 rows into the table
        - run SELECT COUNT queries and assert they return the correct values
            - bare and with IN and equality conditions
        - run SELECT * queries with = conditions
        - run UPDATE queries
        - SELECT * and assert the UPDATEd values are there
        - DELETE with a = condition
        - SELECT the deleted values and make sure nothing is returned
        # TODO run SELECTs to make sure each statement works
        """
        session = self.prepare()

        session.execute(
            "CREATE TABLE test7 (kind text, time int, v1 int, v2 int, PRIMARY KEY(kind, time) )"
        )

        for i in range(0, 10):
            session.execute(
                "INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev1', {i}, {i}, {i})"
                .format(i=i))
            session.execute(
                "INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev2', {i}, {i}, {i})"
                .format(i=i))

        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'",
                   [10])

        assert_one(session,
                   "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2')",
                   [20])

        assert_one(
            session,
            "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2') AND time=0",
            [2])

        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'",
                   [['ev1', i, i, i] for i in range(0, 10)])

        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev2'",
                   [['ev2', i, i, i] for i in range(0, 10)])

        for i in range(0, 10):
            session.execute(
                "UPDATE test7 SET v1 = 0, v2 = 0 where kind = 'ev1' AND time={i}"
                .format(i=i))

        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'",
                   [['ev1', i, 0, 0] for i in range(0, 10)])

        session.execute("DELETE FROM test7 WHERE kind = 'ev1'")
        assert_none(session, "SELECT * FROM test7 WHERE kind = 'ev1'")

        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'",
                   [0])
Ejemplo n.º 6
0
    def short_read_delete_test(self):
        """ Test short reads ultimately leaving no columns alive [#4000] """
        cluster = self.cluster

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)

        cluster.populate(2).start(wait_other_notice=True)
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)
        self.create_cf(session, 'cf', read_repair=0.0)
        # insert 2 columns in one row
        insert_columns(self, session, 0, 2)

        # Delete the row while first node is dead
        node1.flush()
        node1.stop(wait_other_notice=True)
        session = self.patient_cql_connection(node2, 'ks')

        query = SimpleStatement('DELETE FROM cf WHERE key=\'k0\'', consistency_level=ConsistencyLevel.ONE)
        session.execute(query)

        node1.start(wait_other_notice=True)

        # Query first column
        session = self.patient_cql_connection(node1, 'ks')

        assert_none(session, "SELECT c, v FROM cf WHERE key=\'k0\' LIMIT 1", cl=ConsistencyLevel.QUORUM)
Ejemplo n.º 7
0
    def conditional_updates_on_static_columns_with_non_existing_values_test(
            self):
        session = self.prepare(3)

        table_name = "conditional_updates_on_static_columns_with_ne"
        session.execute("""
            CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
        """.format(table_name))

        self._validate_non_existing_or_null_values(table_name, session)

        assert_one(
            session,
            "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(
                table_name), [False])

        assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name))

        for operator in [">", "<", ">=", "<=", "="]:
            assert_one(
                session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(
                    table_name, operator), [False])

            assert_none(session,
                        "SELECT * FROM {} WHERE a = 5".format(table_name))
Ejemplo n.º 8
0
    def ttl_is_replicated_test(self):
        """
        Test that the ttl setting is replicated properly on all nodes
        """

        self.prepare(default_time_to_live=5)
        cursor1 = self.patient_exclusive_cql_connection(self.node1)
        cursor2 = self.patient_exclusive_cql_connection(self.node2)
        cursor1.execute("USE ks;")
        cursor2.execute("USE ks;")
        query = SimpleStatement(
            "INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
            consistency_level=ConsistencyLevel.ALL
        )
        cursor1.execute(query)
        assert_all(
            cursor1,
            "SELECT * FROM ttl_table;",
            [[1, 1, None, None]],
            cl=ConsistencyLevel.ALL
        )
        ttl_cursor1 = cursor1.execute('SELECT ttl(col1) FROM ttl_table;')
        ttl_cursor2 = cursor2.execute('SELECT ttl(col1) FROM ttl_table;')
        assert_almost_equal(ttl_cursor1[0][0], ttl_cursor2[0][0], error=0.05)

        time.sleep(7)

        assert_none(cursor1, "SELECT * FROM ttl_table;", cl=ConsistencyLevel.ALL)
Ejemplo n.º 9
0
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(
            **
            {'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unavailable("Dummy Unavailabile message.")
        })
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{
            'execute.side_effect':
            InvalidRequest("Dummy InvalidRequest message.")
        })
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unauthorized("Dummy Unauthorized message.")
        })
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i]
                                                  for i in range(0, 10)])
        assert_all(mock_session,
                   "SELECT k, v FROM test", [[i, i] for i in range(0, 10)],
                   ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
Ejemplo n.º 10
0
    def conditional_deletes_on_static_columns_with_null_values_batch_test(self):
        session = self.prepare(3)

        table_name = "conditional_deletes_on_static_with_null_batch"
        session.execute("""
            CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b))
        """.format(table_name))

        assert_one(session, """
             BEGIN BATCH
                 INSERT INTO {table_name} (a, b, s1, v) values (2, 2, 2, 2);
                 DELETE s1 FROM {table_name} WHERE a = 2 IF s2 = null;
             APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name), [2, 2, None, None, 2])

        for operator in [">", "<", ">=", "<=", "="]:
            assert_one(session, """
                BEGIN BATCH
                    INSERT INTO {table_name} (a, b, s1, v) values (3, 3, 3, 3);
                    DELETE s1 FROM {table_name} WHERE a = 3 IF s2 {operator} 5;
                APPLY BATCH""".format(table_name=table_name, operator=operator), [False])

            assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name))

        assert_one(session, """
             BEGIN BATCH
                 INSERT INTO {table_name} (a, b, s1, v) values (6, 6, 6, 6);
                 DELETE s1 FROM {table_name} WHERE a = 6 IF s2 IN (1,2,3);
             APPLY BATCH""".format(table_name=table_name), [False])

        assert_none(session, "SELECT * FROM {} WHERE a = 6".format(table_name))

        assert_one(session, """
             BEGIN BATCH
                 INSERT INTO {table_name} (a, b, s1, v) values (4, 4, 4, 4);
                 DELETE s1 FROM {table_name} WHERE a = 4 IF s2 = null;
             APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name), [4, 4, None, None, 4])

        assert_one(session, """
            BEGIN BATCH
                INSERT INTO {table_name} (a, b, s1, v) VALUES (5, 5, 5, 5);
                DELETE s1 FROM {table_name} WHERE a = 5 IF s1 IN (1,2,null);
            APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name), [5, 5, None, None, 5])

        assert_one(session, """
            BEGIN BATCH
                INSERT INTO {table_name} (a, b, s1, v) values (7, 7, 7, 7);
                DELETE s1 FROM {table_name} WHERE a = 7 IF s2 != 7;
            APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 7".format(table_name), [7, 7, None, None, 7])
    def interrupt_build_process_test(self):
        """Test that an interupted MV build process is resumed as it should"""

        session = self.prepare(options={'hinted_handoff_enabled': False})
        node1, node2, node3 = self.cluster.nodelist()

        session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")

        debug("Inserting initial data")
        for i in xrange(10000):
            session.execute(
                "INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
            )

        debug("Create a MV")
        session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                         "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))

        debug("Stop the cluster. Interrupt the MV build process.")
        self.cluster.stop()

        debug("Restart the cluster")
        self.cluster.start(wait_for_binary_proto=True)
        session = self.patient_cql_connection(node1)
        session.execute("USE ks")

        debug("MV shouldn't be built yet.")
        assert_none(session, "SELECT * FROM t_by_v WHERE v=10000;")

        debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
        start = time.time()
        while True:
            try:
                result = session.execute("SELECT count(*) FROM t_by_v;")
                self.assertNotEqual(result[0].count, 10000)
            except AssertionError:
                debug("MV build process is finished")
                break

            elapsed = (time.time() - start) / 60
            if elapsed > 2:
                break

            time.sleep(5)

        debug("Verify all data")
        result = session.execute("SELECT count(*) FROM t_by_v;")
        self.assertEqual(result[0].count, 10000)
        for i in xrange(10000):
            assert_one(
                session,
                "SELECT * FROM t_by_v WHERE v = {}".format(i),
                [i, i, 'a', 3.0],
                cl=ConsistencyLevel.ALL
            )
    def short_read_quorum_delete_test(self):
        """
        @jira_ticket CASSANDRA-8933
        """
        cluster = self.cluster
        # Consider however 3 nodes A, B, C (RF=3), and following sequence of operations (all done at QUORUM):

        # Disable hinted handoff and set batch commit log so this doesn't
        # interfere with the test
        cluster.set_configuration_options(
            values={'hinted_handoff_enabled': False}, batch_commitlog=True)

        cluster.populate(3).start(wait_other_notice=True)
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 3)

        session.execute(
            "CREATE TABLE t (id int, v int, PRIMARY KEY(id, v)) WITH read_repair_chance = 0.0"
        )
        # we write 1 and 2 in a partition: all nodes get it.
        session.execute(
            SimpleStatement("INSERT INTO t (id, v) VALUES (0, 1)",
                            consistency_level=ConsistencyLevel.ALL))
        session.execute(
            SimpleStatement("INSERT INTO t (id, v) VALUES (0, 2)",
                            consistency_level=ConsistencyLevel.ALL))

        # we delete 1: only A and C get it.
        node2.flush()
        node2.stop(wait_other_notice=True)
        session.execute(
            SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 1",
                            consistency_level=ConsistencyLevel.QUORUM))
        node2.start(wait_other_notice=True)

        # we delete 2: only B and C get it.
        node1.flush()
        node1.stop(wait_other_notice=True)
        session = self.patient_cql_connection(node2, 'ks')
        session.execute(
            SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 2",
                            consistency_level=ConsistencyLevel.QUORUM))
        node1.start(wait_other_notice=True)
        session = self.patient_cql_connection(node1, 'ks')

        # we read the first row in the partition (so with a LIMIT 1) and A and B answer first.
        node3.flush()
        node3.stop(wait_other_notice=True)
        assert_none(session,
                    "SELECT * FROM t WHERE id = 0 LIMIT 1",
                    cl=ConsistencyLevel.QUORUM)
Ejemplo n.º 13
0
    def upgrade_with_wide_partition(self, query_modifier=""):
        ROWS = 100

        session = self._setup_cluster()

        session.execute(
            'CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))'
        )

        # the blob is only here to make the row bigger internally so it sometimes span multiple index blocks
        bigish_blob = "0x"
        for i in range(1000):
            bigish_blob = bigish_blob + "0000"

        for r in range(ROWS):
            session.execute(
                "INSERT INTO t(k, t, v1, v2, v3) VALUES ({}, {}, {}, {}, {{{}, {}}})"
                .format(0, r, r, bigish_blob, r * 2, r * 3))

        self.cluster.flush()

        # delete every other row
        for r in range(0, ROWS, 2):
            session.execute("DELETE FROM t WHERE k=0 AND t={}".format(r))

        # delete the set from every other remaining row
        for r in range(1, ROWS, 4):
            session.execute(
                "UPDATE t SET v3={{}} WHERE k=0 AND t={}".format(r))

        session = self._do_upgrade()

        for r in range(0, ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(
                r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)

        self.cluster.compact()

        for r in range(ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(
                r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)
Ejemplo n.º 14
0
    def table_test(self):
        """
        Smoke test that basic table operations work:

        - create 2 tables, one with and one without COMPACT STORAGE
        - ALTER the table without COMPACT STORAGE, adding a column

        For each of those tables:

        - insert 10 values
        - SELECT * and assert the values are there
        - TRUNCATE the table
        - SELECT * and assert there are no values
        - DROP the table
        - SELECT * and assert the statement raises an InvalidRequest
        # TODO run SELECTs to make sure each statement works
        """
        session = self.prepare()

        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')

        session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)")
        self.assertIn('test1', ks_meta.tables)
        session.execute("CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE")
        self.assertIn('test2', ks_meta.tables)

        t1_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test1')

        session.execute("ALTER TABLE test1 ADD v2 int")
        self.assertIn('v2', t1_meta.columns)

        for i in range(0, 10):
            session.execute("INSERT INTO test1 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i))
            session.execute("INSERT INTO test2 (k, c1, v1) VALUES ({i}, {i}, {i})".format(i=i))

        assert_all(session, "SELECT * FROM test1", [[i, i, i] for i in range(0, 10)], ignore_order=True)

        assert_all(session, "SELECT * FROM test2", [[i, i, i] for i in range(0, 10)], ignore_order=True)

        session.execute("TRUNCATE test1")
        session.execute("TRUNCATE test2")

        assert_none(session, "SELECT * FROM test1")

        assert_none(session, "SELECT * FROM test2")

        session.execute("DROP TABLE test1")
        self.assertNotIn('test1', ks_meta.tables)
        session.execute("DROP TABLE test2")
        self.assertNotIn('test2', ks_meta.tables)
Ejemplo n.º 15
0
    def compaction_strategy_switching_test(self):
        """Ensure that switching strategies does not result in problems.
        Insert data, switch strategies, then check against data loss.
        """
        strategies = [
            'LeveledCompactionStrategy', 'SizeTieredCompactionStrategy',
            'DateTieredCompactionStrategy'
        ]

        if self.strategy in strategies:
            strategies.remove(self.strategy)
            cluster = self.cluster
            cluster.populate(1).start(wait_for_binary_proto=True)
            [node1] = cluster.nodelist()

            for strat in strategies:
                session = self.patient_cql_connection(node1)
                self.create_ks(session, 'ks', 1)

                session.execute(
                    "create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'"
                    + self.strategy + "'};")

                for x in range(0, 100):
                    session.execute('insert into ks.cf (key, val) values (' +
                                    str(x) + ',1)')

                node1.flush()

                for x in range(0, 10):
                    session.execute('delete from cf where key = ' + str(x))

                session.execute(
                    "alter table ks.cf with compaction = {'class':'" + strat +
                    "'};")

                for x in range(11, 100):
                    assert_one(session,
                               "select * from ks.cf where key =" + str(x),
                               [x, 1])

                for x in range(0, 10):
                    assert_none(session,
                                'select * from cf where key = ' + str(x))

                node1.flush()
                cluster.clear()
                time.sleep(5)
                cluster.start(wait_for_binary_proto=True)
Ejemplo n.º 16
0
    def ignore_failure_policy_test(self):
        """
        Test the ignore commitlog failure policy
        """
        self.prepare(configuration={
            'commit_failure_policy': 'ignore'
        })

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log("ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
        self.assertTrue(self.node1.is_running(), "Node1 should still be running")

        # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
        # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
        # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that
        # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit)
        query = "INSERT INTO test (key, col1) VALUES (2, 2);"
        if is_win():
            # We expect this to succeed
            self.session1.execute(query)
            self.assertFalse(self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored.")
            self.assertTrue(self.node1.is_running(), "Node1 should still be running after an ignore error on CL")
        else:
            with self.assertRaises((OperationTimedOut, WriteTimeout)):
                self.session1.execute(query)

            # Should not exist
            assert_none(self.session1, "SELECT * FROM test where key=2;")

        # bring back the node commitlogs
        self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)

        self.session1.execute("""
          INSERT INTO test (key, col1) VALUES (3, 3);
        """)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=3;",
            [3, 3]
        )

        time.sleep(2)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=2;",
            [2, 2]
        )
    def upgrade_with_wide_partition(self, query_modifier=""):
        ROWS = 100

        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))')

        # the blob is only here to make the row bigger internally so it sometimes span multiple index blocks
        bigish_blob = "0x"
        for i in range(1000):
            bigish_blob = bigish_blob + "0000"

        for r in range(ROWS):
            session.execute("INSERT INTO t(k, t, v1, v2, v3) VALUES (%d, %d, %d, %s, {%d, %d})" % (0, r, r, bigish_blob, r * 2, r * 3))

        self.cluster.flush()

        # delete every other row
        for r in range(0, ROWS, 2):
            session.execute("DELETE FROM t WHERE k=0 AND t=%d" % (r))

        # delete the set from every other remaining row
        for r in range(1, ROWS, 4):
            session.execute("UPDATE t SET v3={} WHERE k=0 AND t=%d" % (r))

        session = self._do_upgrade()

        for r in range(0, ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t=%d%s" % (r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)

        self.cluster.compact()

        for r in range(ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t=%d%s" % (r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)
Ejemplo n.º 18
0
    def ignore_failure_policy_test(self):
        """
        Test the ignore commitlog failure policy
        """
        self.prepare(configuration={'commit_failure_policy': 'ignore'})

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log(
            "ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
        self.assertTrue(failure,
                        "Cannot find the commitlog failure message in logs")
        self.assertTrue(self.node1.is_running(),
                        "Node1 should still be running")

        # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
        # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
        # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that
        # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit)
        query = "INSERT INTO test (key, col1) VALUES (2, 2);"
        if is_win():
            # We expect this to succeed
            self.session1.execute(query)
            self.assertFalse(
                self.node1.grep_log("terminating thread"),
                "thread was terminated but CL error should have been ignored.")
            self.assertTrue(
                self.node1.is_running(),
                "Node1 should still be running after an ignore error on CL")
        else:
            with self.assertRaises((OperationTimedOut, WriteTimeout)):
                self.session1.execute(query)

            # Should not exist
            assert_none(self.session1, "SELECT * FROM test where key=2;")

        # bring back the node commitlogs
        self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD
                                     | stat.S_IEXEC)

        self.session1.execute("""
          INSERT INTO test (key, col1) VALUES (3, 3);
        """)
        assert_one(self.session1, "SELECT * FROM test where key=3;", [3, 3])

        time.sleep(2)
        assert_one(self.session1, "SELECT * FROM test where key=2;", [2, 2])
Ejemplo n.º 19
0
    def conditional_updates_on_static_columns_with_non_existing_values_test(self):
        session = self.prepare(3)

        table_name = "conditional_updates_on_static_columns_with_ne"
        session.execute("""
            CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b))
        """.format(table_name))

        self._validate_non_existing_or_null_values(table_name, session)

        assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name), [False])

        assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name))

        for operator in [">", "<", ">=", "<=", "="]:
            assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator), [False])

            assert_none(session, "SELECT * FROM {} WHERE a = 5".format(table_name))
Ejemplo n.º 20
0
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{'execute.side_effect': Unavailable("Dummy Unavailabile message.")})
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.")})
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{'execute.side_effect': Unauthorized("Dummy Unauthorized message.")})
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)])
        assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
Ejemplo n.º 21
0
    def statements_test(self):
        """
        Smoke test SELECT and UPDATE statements:

        - create a table
        - insert 20 rows into the table
        - run SELECT COUNT queries and assert they return the correct values
            - bare and with IN and equality conditions
        - run SELECT * queries with = conditions
        - run UPDATE queries
        - SELECT * and assert the UPDATEd values are there
        - DELETE with a = condition
        - SELECT the deleted values and make sure nothing is returned
        # TODO run SELECTs to make sure each statement works
        """
        session = self.prepare()

        session.execute("CREATE TABLE test7 (kind text, time int, v1 int, v2 int, PRIMARY KEY(kind, time) )")

        for i in range(0, 10):
            session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev1', {i}, {i}, {i})".format(i=i))
            session.execute("INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev2', {i}, {i}, {i})".format(i=i))

        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [10])

        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2')", [20])

        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2') AND time=0", [2])

        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, i, i] for i in range(0, 10)])

        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev2'", [['ev2', i, i, i] for i in range(0, 10)])

        for i in range(0, 10):
            session.execute("UPDATE test7 SET v1 = 0, v2 = 0 where kind = 'ev1' AND time={i}".format(i=i))

        assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, 0, 0] for i in range(0, 10)])

        session.execute("DELETE FROM test7 WHERE kind = 'ev1'")
        assert_none(session, "SELECT * FROM test7 WHERE kind = 'ev1'")

        assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [0])
    def compaction_strategy_switching_test(self):
        """Ensure that switching strategies does not result in problems.
        Insert data, switch strategies, then check against data loss.
        """
        strategies = ["LeveledCompactionStrategy", "SizeTieredCompactionStrategy", "DateTieredCompactionStrategy"]

        if self.strategy in strategies:
            strategies.remove(self.strategy)
            cluster = self.cluster
            cluster.populate(1).start(wait_for_binary_proto=True)
            [node1] = cluster.nodelist()

            for strat in strategies:
                session = self.patient_cql_connection(node1)
                self.create_ks(session, "ks", 1)

                session.execute(
                    "create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'"
                    + self.strategy
                    + "'};"
                )

                for x in range(0, 100):
                    session.execute("insert into ks.cf (key, val) values (" + str(x) + ",1)")

                node1.flush()

                for x in range(0, 10):
                    session.execute("delete from cf where key = " + str(x))

                session.execute("alter table ks.cf with compaction = {'class':'" + strat + "'};")

                for x in range(11, 100):
                    assert_one(session, "select * from ks.cf where key =" + str(x), [x, 1])

                for x in range(0, 10):
                    assert_none(session, "select * from cf where key = " + str(x))

                node1.flush()
                cluster.clear()
                time.sleep(5)
                cluster.start(wait_for_binary_proto=True)
    def udf_overload_test(self):

        session = self.prepare(nodes=3)

        session.execute("CREATE TABLE tab (k text PRIMARY KEY, v int)")
        session.execute("INSERT INTO tab (k, v) VALUES ('foo' , 1);")

        # create overloaded udfs
        session.execute("CREATE FUNCTION overloaded(v varchar) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(i int) called on null input RETURNS text LANGUAGE java AS 'return \"f2\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(v1 text, v2 text) called on null input RETURNS text LANGUAGE java AS 'return \"f3\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(v ascii) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")

        # ensure that works with correct specificity
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded('foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((text) 'foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((ascii) 'foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((varchar) 'foo')")

        # try non-existent functions
        assert_invalid(session, "DROP FUNCTION overloaded(boolean)")
        assert_invalid(session, "DROP FUNCTION overloaded(bigint)")

        # try dropping overloaded - should fail because ambiguous
        assert_invalid(session, "DROP FUNCTION overloaded")
        session.execute("DROP FUNCTION overloaded(varchar)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo')")
        session.execute("DROP FUNCTION overloaded(text, text)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo',(text)'bar')")
        session.execute("DROP FUNCTION overloaded(ascii)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((ascii)'foo')")
        # should now work - unambiguous
        session.execute("DROP FUNCTION overloaded")
    def compaction_delete_test(self):
        """
        Test that executing a delete properly tombstones a row.
        Insert data, delete a partition of data and check that the requesite rows are tombstoned.
        """
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node1] = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, "ks", 1)

        session.execute(
            "create table ks.cf (key int PRIMARY KEY, val int) with compaction = {'class':'"
            + self.strategy
            + "'} and gc_grace_seconds = 30;"
        )

        for x in range(0, 100):
            session.execute("insert into cf (key, val) values (" + str(x) + ",1)")

        node1.flush()
        for x in range(0, 10):
            session.execute("delete from cf where key = " + str(x))

        node1.flush()
        for x in range(0, 10):
            assert_none(session, "select * from cf where key = " + str(x))

        json_path = tempfile.mkstemp(suffix=".json")
        jname = json_path[1]
        with open(jname, "w") as f:
            node1.run_sstable2json(f)

        with open(jname, "r") as g:
            jsoninfo = g.read()

        numfound = jsoninfo.count("markedForDeleteAt")

        self.assertEqual(numfound, 10)
Ejemplo n.º 25
0
    def udf_overload_test(self):

        session = self.prepare(nodes=3)

        session.execute("CREATE TABLE tab (k text PRIMARY KEY, v int)")
        session.execute("INSERT INTO tab (k, v) VALUES ('foo' , 1);")

        # create overloaded udfs
        session.execute("CREATE FUNCTION overloaded(v varchar) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(i int) called on null input RETURNS text LANGUAGE java AS 'return \"f2\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(v1 text, v2 text) called on null input RETURNS text LANGUAGE java AS 'return \"f3\";'")
        session.execute("CREATE OR REPLACE FUNCTION overloaded(v ascii) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")

        # ensure that works with correct specificity
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded('foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((text) 'foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((ascii) 'foo')")
        assert_none(session, "SELECT v FROM tab WHERE k = overloaded((varchar) 'foo')")

        # try non-existent functions
        assert_invalid(session, "DROP FUNCTION overloaded(boolean)")
        assert_invalid(session, "DROP FUNCTION overloaded(bigint)")

        # try dropping overloaded - should fail because ambiguous
        assert_invalid(session, "DROP FUNCTION overloaded")
        session.execute("DROP FUNCTION overloaded(varchar)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo')")
        session.execute("DROP FUNCTION overloaded(text, text)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo',(text)'bar')")
        session.execute("DROP FUNCTION overloaded(ascii)")
        assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((ascii)'foo')")
        # should now work - unambiguous
        session.execute("DROP FUNCTION overloaded")
Ejemplo n.º 26
0
    def compaction_delete_test(self):
        """
        Test that executing a delete properly tombstones a row.
        Insert data, delete a partition of data and check that the requesite rows are tombstoned.
        """
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node1] = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 1)

        session.execute(
            "create table ks.cf (key int PRIMARY KEY, val int) with compaction = {'class':'"
            + self.strategy + "'} and gc_grace_seconds = 30;")

        for x in range(0, 100):
            session.execute('insert into cf (key, val) values (' + str(x) +
                            ',1)')

        node1.flush()
        for x in range(0, 10):
            session.execute('delete from cf where key = ' + str(x))

        node1.flush()
        for x in range(0, 10):
            assert_none(session, 'select * from cf where key = ' + str(x))

        json_path = tempfile.mkstemp(suffix='.json')
        jname = json_path[1]
        with open(jname, 'w') as f:
            node1.run_sstable2json(f)

        with open(jname, 'r') as g:
            jsoninfo = g.read()

        numfound = jsoninfo.count("markedForDeleteAt")

        self.assertEqual(numfound, 10)
Ejemplo n.º 27
0
    def ignore_failure_policy_test(self):
        """ Test the ignore commitlog failure policy """
        self.prepare(configuration={
            'commit_failure_policy': 'ignore'
        })

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log("ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
        self.assertTrue(self.node1.is_running(), "Node1 should still be running")

        with self.assertRaises((OperationTimedOut, WriteTimeout)):
            self.session1.execute("""
              INSERT INTO test (key, col1) VALUES (2, 2);
            """)
        # Should not exists
        assert_none(self.session1, "SELECT * FROM test where key=2;")

        # bring back the node commitlogs
        self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)

        self.session1.execute("""
          INSERT INTO test (key, col1) VALUES (3, 3);
        """)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=3;",
            [3, 3]
        )

        time.sleep(2)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=2;",
            [2, 2]
        )
    def base_replica_repair_test(self):
        """
        Test that a materialized view are consistent after the repair of the base replica.
        """

        self.prepare(rf=3)
        node1, node2, node3 = self.cluster.nodelist()
        session = self.patient_exclusive_cql_connection(node1)
        session.execute('USE ks')

        session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
        session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
                         "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))

        session.cluster.control_connection.wait_for_schema_agreement()

        debug('Write initial data')
        for i in xrange(1000):
            session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))

        self._replay_batchlogs()

        debug('Verify the data in the MV with CL=ALL')
        for i in xrange(1000):
            assert_one(
                session,
                "SELECT * FROM t_by_v WHERE v = {}".format(i),
                [i, i, 'a', 3.0],
                cl=ConsistencyLevel.ALL
            )

        debug('Shutdown node1')
        node1.stop(wait_other_notice=True)
        debug('Delete node1 data')
        node1.clear(clear_all=True)
        debug('Restarting node1')
        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
        debug('Shutdown node2 and node3')
        node2.stop(wait_other_notice=True)
        node3.stop(wait_other_notice=True)

        session = self.patient_exclusive_cql_connection(node1)
        session.execute('USE ks')

        debug('Verify that there is no data on node1')
        for i in xrange(1000):
            assert_none(
                session,
                "SELECT * FROM t_by_v WHERE v = {}".format(i)
            )

        debug('Restarting node2 and node3')
        node2.start(wait_other_notice=True, wait_for_binary_proto=True)
        node3.start(wait_other_notice=True, wait_for_binary_proto=True)

        # Just repair the base replica
        node1.nodetool("repair ks t")

        debug('Verify data with cl=ALL')
        for i in xrange(1000):
            assert_one(
                session,
                "SELECT * FROM t_by_v WHERE v = {}".format(i),
                [i, i, 'a', 3.0]
            )
Ejemplo n.º 29
0
    def conditional_deletes_on_static_columns_with_null_values_batch_test(
            self):
        session = self.prepare(3)

        table_name = "conditional_deletes_on_static_with_null_batch"
        session.execute("""
            CREATE TABLE {} (a int, b int, s1 int static, s2 int static, v int, PRIMARY KEY (a, b))
        """.format(table_name))

        assert_one(
            session, """
             BEGIN BATCH
                 INSERT INTO {table_name} (a, b, s1, v) values (2, 2, 2, 2);
                 DELETE s1 FROM {table_name} WHERE a = 2 IF s2 = null;
             APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 2".format(table_name),
                   [2, 2, None, None, 2])

        for operator in [">", "<", ">=", "<=", "="]:
            assert_one(
                session, """
                BEGIN BATCH
                    INSERT INTO {table_name} (a, b, s1, v) values (3, 3, 3, 3);
                    DELETE s1 FROM {table_name} WHERE a = 3 IF s2 {operator} 5;
                APPLY BATCH""".format(table_name=table_name,
                                      operator=operator), [False])

            assert_none(session,
                        "SELECT * FROM {} WHERE a = 3".format(table_name))

        assert_one(
            session, """
             BEGIN BATCH
                 INSERT INTO {table_name} (a, b, s1, v) values (6, 6, 6, 6);
                 DELETE s1 FROM {table_name} WHERE a = 6 IF s2 IN (1,2,3);
             APPLY BATCH""".format(table_name=table_name), [False])

        assert_none(session, "SELECT * FROM {} WHERE a = 6".format(table_name))

        assert_one(
            session, """
             BEGIN BATCH
                 INSERT INTO {table_name} (a, b, s1, v) values (4, 4, 4, 4);
                 DELETE s1 FROM {table_name} WHERE a = 4 IF s2 = null;
             APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 4".format(table_name),
                   [4, 4, None, None, 4])

        assert_one(
            session, """
            BEGIN BATCH
                INSERT INTO {table_name} (a, b, s1, v) VALUES (5, 5, 5, 5);
                DELETE s1 FROM {table_name} WHERE a = 5 IF s1 IN (1,2,null);
            APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 5".format(table_name),
                   [5, 5, None, None, 5])

        assert_one(
            session, """
            BEGIN BATCH
                INSERT INTO {table_name} (a, b, s1, v) values (7, 7, 7, 7);
                DELETE s1 FROM {table_name} WHERE a = 7 IF s2 != 7;
            APPLY BATCH""".format(table_name=table_name), [True])

        assert_one(session, "SELECT * FROM {} WHERE a = 7".format(table_name),
                   [7, 7, None, None, 7])
Ejemplo n.º 30
0
    def table_test(self):
        """
        Smoke test that basic table operations work:

        - create 2 tables, one with and one without COMPACT STORAGE
        - ALTER the table without COMPACT STORAGE, adding a column

        For each of those tables:

        - insert 10 values
        - SELECT * and assert the values are there
        - TRUNCATE the table
        - SELECT * and assert there are no values
        - DROP the table
        - SELECT * and assert the statement raises an InvalidRequest
        # TODO run SELECTs to make sure each statement works
        """
        session = self.prepare()

        ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster,
                                                  ks_name='ks')

        session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)")
        self.assertIn('test1', ks_meta.tables)
        session.execute(
            "CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE"
        )
        self.assertIn('test2', ks_meta.tables)

        t1_meta = UpdatingTableMetadataWrapper(session.cluster,
                                               ks_name='ks',
                                               table_name='test1')

        session.execute("ALTER TABLE test1 ADD v2 int")
        self.assertIn('v2', t1_meta.columns)

        for i in range(0, 10):
            session.execute(
                "INSERT INTO test1 (k, v1, v2) VALUES ({i}, {i}, {i})".format(
                    i=i))
            session.execute(
                "INSERT INTO test2 (k, c1, v1) VALUES ({i}, {i}, {i})".format(
                    i=i))

        assert_all(session,
                   "SELECT * FROM test1", [[i, i, i] for i in range(0, 10)],
                   ignore_order=True)

        assert_all(session,
                   "SELECT * FROM test2", [[i, i, i] for i in range(0, 10)],
                   ignore_order=True)

        session.execute("TRUNCATE test1")
        session.execute("TRUNCATE test2")

        assert_none(session, "SELECT * FROM test1")

        assert_none(session, "SELECT * FROM test2")

        session.execute("DROP TABLE test1")
        self.assertNotIn('test1', ks_meta.tables)
        session.execute("DROP TABLE test2")
        self.assertNotIn('test2', ks_meta.tables)
    def complex_repair_test(self):
        """
        Test that a materialized view are consistent after a more complex repair.
        """

        session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
        node1, node2, node3, node4, node5 = self.cluster.nodelist()

        # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
        session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
                        "WITH gc_grace_seconds = 5")
        session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
                         "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))

        session.cluster.control_connection.wait_for_schema_agreement()

        debug('Shutdown node2 and node3')
        node2.stop()
        node3.stop(wait_other_notice=True)

        debug('Write initial data to node1 (will be replicated to node4 and node5)')
        for i in xrange(1000):
            session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))

        debug('Verify the data in the MV on node1 with CL=ONE')
        for i in xrange(1000):
            assert_one(
                session,
                "SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
                [i, i, 'a', 3.0]
            )

        debug('Shutdown node1, node4 and node5')
        node1.stop()
        node4.stop()
        node5.stop()

        debug('Start nodes 2 and 3')
        node2.start()
        node3.start(wait_other_notice=True, wait_for_binary_proto=True)

        session2 = self.patient_cql_connection(node2)

        debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
        for i in xrange(1000):
            assert_none(
                session2,
                "SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
            )

        debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
        for i in xrange(1000):
            # we write i*2 as value, instead of i
            session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))

        debug('Verify the new data in the MV on node2 with CL=ONE')
        for i in xrange(1000):
            v = i * 2
            assert_one(
                session2,
                "SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
                [v, v, 'a', 3.0]
            )

        debug('Wait for batchlogs to expire from node2 and node3')
        time.sleep(5)

        debug('Start remaining nodes')
        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
        node4.start(wait_other_notice=True, wait_for_binary_proto=True)
        node5.start(wait_other_notice=True, wait_for_binary_proto=True)

        session = self.patient_cql_connection(node1)

        debug('Read data from MV at QUORUM (old data should be returned)')
        for i in xrange(1000):
            assert_one(
                session,
                "SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
                [i, i, 'a', 3.0],
                cl=ConsistencyLevel.QUORUM
            )

        debug('Run global repair on node1')
        node1.repair()

        debug('Read data from MV at quorum (new data should be returned after repair)')
        for i in xrange(1000):
            v = i * 2
            assert_one(
                session,
                "SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
                [v, v, 'a', 3.0],
                cl=ConsistencyLevel.QUORUM
            )
    def really_complex_repair_test(self):
        """
        Test that a materialized view are consistent after a more complex repair.
        """

        session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
        node1, node2, node3, node4, node5 = self.cluster.nodelist()

        # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
        session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
                        "WITH gc_grace_seconds = 1")
        session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
                         "WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
                         "v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))

        session.cluster.control_connection.wait_for_schema_agreement()

        debug('Shutdown node2 and node3')
        node2.stop(wait_other_notice=True)
        node3.stop(wait_other_notice=True)

        session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
        session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
        debug('Verify the data in the MV on node1 with CL=ONE')
        assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])

        session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
        session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
        debug('Verify the data in the MV on node1 with CL=ONE')
        assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])

        session.shutdown()

        debug('Shutdown node1, node4 and node5')
        node1.stop()
        node4.stop()
        node5.stop()

        debug('Start nodes 2 and 3')
        node2.start()
        node3.start(wait_other_notice=True, wait_for_binary_proto=True)

        session2 = self.patient_cql_connection(node2)
        session2.execute('USE ks')

        debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
        assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")

        debug('Write new data in node2 that overlap those in node1')
        session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
        session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
        assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])

        session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
        session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
        assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])

        debug("Composite delete of everything")
        session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
        session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")

        assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
        assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")

        debug('Wait for batchlogs to expire from node2 and node3')
        time.sleep(5)

        debug('Start remaining nodes')
        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
        node4.start(wait_other_notice=True, wait_for_binary_proto=True)
        node5.start(wait_other_notice=True, wait_for_binary_proto=True)

        # at this point the data isn't repaired so we have an inconsistency.
        # this value should return None
        assert_all(
            session2,
            "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
            cl=ConsistencyLevel.QUORUM
        )

        debug('Run global repair on node1')
        node1.repair()

        assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)