def stop_commit_failure_policy_test(self):
        """
        Test the stop_commit commitlog failure policy
        """
        self.prepare(configuration={
            'commit_failure_policy': 'stop_commit'
        })

        self.session1.execute("""
            INSERT INTO test (key, col1) VALUES (2, 2);
        """)

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log("Failed .+ commit log segments. Commit disk failure policy is stop_commit; terminating thread")
        debug(failure)
        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
        self.assertTrue(self.node1.is_running(), "Node1 should still be running")

        # Cannot write anymore after the failure
        debug('attempting to insert to node with failing commitlog; should fail')
        with self.assertRaises((OperationTimedOut, WriteTimeout)):
            self.session1.execute("""
              INSERT INTO test (key, col1) VALUES (2, 2);
            """)

        # Should be able to read
        debug('attempting to read from node with failing commitlog; should succeed')
        assert_one(
            self.session1,
            "SELECT * FROM test where key=2;",
            [2, 2]
        )
    def test_read_old_sstables_after_upgrade(self):
        """ from 2.1 the location of sstables changed (CASSANDRA-5202), but existing sstables continue
        to be read from the old location. Verify that this works for index sstables as well as regular
        data column families (CASSANDRA-9116)
        """
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="2.0.12")
        if "memtable_allocation_type" in cluster._config_options:
            cluster._config_options.__delitem__("memtable_allocation_type")
        cluster.populate(1).start()

        [node1] = cluster.nodelist()
        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'index_upgrade', 1)
        session.execute("CREATE TABLE index_upgrade.table1 (k int PRIMARY KEY, v int)")
        session.execute("CREATE INDEX ON index_upgrade.table1(v)")
        session.execute("INSERT INTO index_upgrade.table1 (k,v) VALUES (0,0)")

        query = "SELECT * FROM index_upgrade.table1 WHERE v=0"
        assert_one(session, query, [0, 0])

        # Upgrade to the 2.1.x version
        node1.drain()
        node1.watch_log_for("DRAINED")
        node1.stop(wait_other_notice=False)
        debug("Upgrading to current version")
        self.set_node_to_current_version(node1)
        node1.start(wait_other_notice=True)

        [node1] = cluster.nodelist()
        session = self.patient_cql_connection(node1)
        debug(cluster.cassandra_version())
        assert_one(session, query, [0, 0])
Пример #3
0
    def drop_column_and_restart_test(self):
        """
        Simply insert data in a table, drop a column involved in the insert and restart the node afterwards.
        This ensures that the dropped_columns system table is properly flushed on the alter or the restart
        fails as in CASSANDRA-11050.

        @jira_ticket CASSANDRA-11050
        """
        session = self.prepare()

        session.execute("USE ks")
        session.execute("CREATE TABLE t (k int PRIMARY KEY, c1 int, c2 int)")

        session.execute("INSERT INTO t (k, c1, c2) VALUES (0, 0, 0)")
        session.execute("ALTER TABLE t DROP c2")

        assert_one(session, "SELECT * FROM t", [0, 0])

        self.cluster.stop()
        self.cluster.start()

        session = self.patient_cql_connection(self.cluster.nodelist()[0])

        session.execute("USE ks")
        assert_one(session, "SELECT * FROM t", [0, 0])
    def stop_commit_failure_policy_test(self):
        """
        Test the stop_commit commitlog failure policy
        """
        self.prepare(configuration={'commit_failure_policy': 'stop_commit'})

        self.session1.execute("""
            INSERT INTO test (key, col1) VALUES (2, 2);
        """)

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log(
            "Failed .+ commit log segments. Commit disk failure policy is stop_commit; terminating thread"
        )
        debug(failure)
        self.assertTrue(failure,
                        "Cannot find the commitlog failure message in logs")
        self.assertTrue(self.node1.is_running(),
                        "Node1 should still be running")

        # Cannot write anymore after the failure
        debug(
            'attempting to insert to node with failing commitlog; should fail')
        with self.assertRaises((OperationTimedOut, WriteTimeout)):
            self.session1.execute("""
              INSERT INTO test (key, col1) VALUES (2, 2);
            """)

        # Should be able to read
        debug(
            'attempting to read from node with failing commitlog; should succeed'
        )
        assert_one(self.session1, "SELECT * FROM test where key=2;", [2, 2])
    def udf_with_udt_test(self):
        """
        Test UDFs that operate on non-frozen UDTs.
        @jira_ticket CASSANDRA-7423
        @since 3.6
        """
        session = self.prepare()
        session.execute("create type test (a text, b int);")
        session.execute("create function funk(udt test) called on null input returns int language java as 'return Integer.valueOf(udt.getInt(\"b\"));';")

        if self.cluster.version() >= LooseVersion('3.6'):
            frozen_vals = (False, True)
        else:
            frozen_vals = (True,)

        for frozen in frozen_vals:
            debug("Using {} UDTs".format("frozen" if frozen else "non-frozen"))

            table_name = "tab_frozen" if frozen else "tab"
            column_type = "frozen<test>" if frozen else "test"
            session.execute("create table {} (key int primary key, udt {});".format(table_name, column_type))

            session.execute("insert into %s (key, udt) values (1, {a: 'un', b:1});" % (table_name,))
            session.execute("insert into %s (key, udt) values (2, {a: 'deux', b:2});" % (table_name,))
            session.execute("insert into %s (key, udt) values (3, {a: 'trois', b:3});" % (table_name,))

            assert_one(session, "select sum(funk(udt)) from {}".format(table_name), [6])

            assert_invalid(session, "drop type test;")
Пример #6
0
    def test_validate_empty_column_name(self):
        cluster = self.cluster
        cluster.populate(1).start()
        node1 = cluster.nodelist()[0]
        session = self.patient_cql_connection(node1)
        create_ks(session, 'counter_tests', 1)

        session.execute("""
            CREATE TABLE compact_counter_table (
                pk int,
                ck text,
                value counter,
                PRIMARY KEY (pk, ck))
            WITH COMPACT STORAGE
            """)

        assert_invalid(
            session,
            "UPDATE compact_counter_table SET value = value + 1 WHERE pk = 0 AND ck = ''"
        )
        assert_invalid(
            session,
            "UPDATE compact_counter_table SET value = value - 1 WHERE pk = 0 AND ck = ''"
        )

        session.execute(
            "UPDATE compact_counter_table SET value = value + 5 WHERE pk = 0 AND ck = 'ck'"
        )
        session.execute(
            "UPDATE compact_counter_table SET value = value - 2 WHERE pk = 0 AND ck = 'ck'"
        )

        assert_one(session, "SELECT pk, ck, value FROM compact_counter_table",
                   [0, 'ck', 3])
Пример #7
0
    def drop_column_and_restart_test(self):
        """
        Simply insert data in a table, drop a column involved in the insert and restart the node afterwards.
        This ensures that the dropped_columns system table is properly flushed on the alter or the restart
        fails as in CASSANDRA-11050.

        @jira_ticket CASSANDRA-11050
        """
        session = self.prepare()

        session.execute("USE ks")
        session.execute("CREATE TABLE t (k int PRIMARY KEY, c1 int, c2 int)")

        session.execute("INSERT INTO t (k, c1, c2) VALUES (0, 0, 0)")
        session.execute("ALTER TABLE t DROP c2")

        assert_one(session, "SELECT * FROM t", [0, 0])

        self.cluster.stop()
        self.cluster.start()

        session = self.patient_cql_connection(self.cluster.nodelist()[0])

        session.execute("USE ks")
        assert_one(session, "SELECT * FROM t", [0, 0])
Пример #8
0
    def upgrade_with_unclustered_table(self, compact_storage=False):
        PARTITIONS = 5

        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)' +
                        (' WITH COMPACT STORAGE' if compact_storage else ''))

        for n in range(PARTITIONS):
            session.execute("INSERT INTO t(k, v1, v2, v3, v4) VALUES ({}, {}, {}, {}, {})".format(n, n + 1, n + 2, n + 3, n + 4))

        is40 = self.dtest_config.cassandra_version_from_build >= MAJOR_VERSION_4
        if compact_storage and is40:
            session.execute("ALTER TABLE t DROP COMPACT STORAGE;")

        session = self._do_upgrade()

        def maybe_add_compact_columns(expected):
            if is40 and compact_storage:
                expected.insert(1, None)
                expected.append(None)
            return expected

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), maybe_add_compact_columns([n, n + 1, n + 2, n + 3, n + 4]))

        self.cluster.compact()

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), maybe_add_compact_columns([n, n + 1, n + 2, n + 3, n + 4]))
    def upgrade_with_unclustered_table(self, compact_storage=False):
        PARTITIONS = 5

        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)' +
                        (' WITH COMPACT STORAGE' if compact_storage else ''))

        for n in range(PARTITIONS):
            session.execute("INSERT INTO t(k, v1, v2, v3, v4) VALUES ({}, {}, {}, {}, {})".format(n, n + 1, n + 2, n + 3, n + 4))

        is40 = self.dtest_config.cassandra_version_from_build >= MAJOR_VERSION_4
        if compact_storage and is40:
            session.execute("ALTER TABLE t DROP COMPACT STORAGE;")

        session = self._do_upgrade()

        def maybe_add_compact_columns(expected):
            if is40 and compact_storage:
                expected.insert(1, None)
                expected.append(None)
            return expected

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), maybe_add_compact_columns([n, n + 1, n + 2, n + 3, n + 4]))

        self.cluster.compact()

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), maybe_add_compact_columns([n, n + 1, n + 2, n + 3, n + 4]))
Пример #10
0
    def udf_with_udt_test(self):
        """
        Test UDFs that operate on non-frozen UDTs.
        @jira_ticket CASSANDRA-7423
        @since 3.6
        """
        session = self.prepare()
        session.execute("create type test (a text, b int);")
        session.execute("create function funk(udt test) called on null input returns int language java as 'return Integer.valueOf(udt.getInt(\"b\"));';")

        if LooseVersion(self.cluster.version()) >= LooseVersion('3.6'):
            frozen_vals = (False, True)
        else:
            frozen_vals = (True,)

        for frozen in frozen_vals:
            debug("Using {} UDTs".format("frozen" if frozen else "non-frozen"))

            table_name = "tab_frozen" if frozen else "tab"
            column_type = "frozen<test>" if frozen else "test"
            session.execute("create table {} (key int primary key, udt {});".format(table_name, column_type))

            session.execute("insert into %s (key, udt) values (1, {a: 'un', b:1});" % (table_name,))
            session.execute("insert into %s (key, udt) values (2, {a: 'deux', b:2});" % (table_name,))
            session.execute("insert into %s (key, udt) values (3, {a: 'trois', b:3});" % (table_name,))

            assert_one(session, "select sum(funk(udt)) from {}".format(table_name), [6])

            assert_invalid(session, "drop type test;")
Пример #11
0
    def test_aggregate_udf(self):
        session = self.prepare()
        session.execute("create table nums (key int primary key, val int);")

        for x in range(1, 4):
            session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" %
                            (x, x))
        session.execute(
            "create function plus(key int, val int) called on null input returns int language java as 'return Integer.valueOf(key.intValue() + val.intValue());'"
        )
        session.execute(
            "create function stri(key int) called on null input returns text language java as 'return key.toString();'"
        )
        session.execute(
            "create aggregate suma (int) sfunc plus stype int finalfunc stri initcond 10"
        )

        assert_one(session, "select suma(val) from nums", ["16"])

        session.execute(
            "create function test(a int, b double) called on null input returns int language javascript as 'a + b;'"
        )
        session.execute("create aggregate aggy(double) sfunc test stype int")

        assert_invalid(session,
                       "create aggregate aggtwo(int) sfunc aggy stype int")

        assert_invalid(
            session,
            "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo"
        )
    def test_alter_rf_and_run_read_repair(self):
        """
        @jira_ticket CASSANDRA-10655
        @jira_ticket CASSANDRA-10657

        Test that querying only a subset of all the columns in a row doesn't confuse read-repair to avoid
        the problem described in CASSANDRA-10655.
        """

        # session is only used to setup & do schema modification. Actual data queries are done directly on
        # each node, using an exclusive connection and CL.ONE
        session = self.patient_cql_connection(self.cluster.nodelist()[0])
        initial_replica, non_replicas = self.do_initial_setup(session)

        # Execute a query at CL.ALL on one of the nodes which was *not* the initial replica. It should trigger a
        # read repair and propagate the data to all 3 nodes.
        # Note: result of the read repair contains only the selected column (a), not all columns
        logger.debug("Executing 'SELECT a...' on non-initial replica to trigger read repair " + non_replicas[0].name)
        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
        assert_one(read_repair_session, "SELECT a FROM alter_rf_test.t1 WHERE k=1", [1], cl=ConsistencyLevel.ALL)

        # The read repair should have repaired the replicas, at least partially (see CASSANDRA-10655)
        # verify by querying each replica in turn.
        value_skipping_disabled = True if self.cluster.version() < '3.4' else False
        self.check_data_on_each_replica(expect_fully_repaired=value_skipping_disabled, initial_replica=initial_replica)

        # Now query again at CL.ALL but this time selecting all columns, which should ensure that 'b' also gets repaired
        query = "SELECT * FROM alter_rf_test.t1 WHERE k=1"
        logger.debug("Executing 'SELECT *...' on non-initial replica to trigger read repair " + non_replicas[0].name)
        assert_one(read_repair_session, query, [1, 1, 1], cl=ConsistencyLevel.ALL)

        # Check each replica individually again now that we expect the data to be fully repaired
        self.check_data_on_each_replica(expect_fully_repaired=True, initial_replica=initial_replica)
    def upgrade_with_unclustered_table(self, compact_storage=False):
        PARTITIONS = 5

        session = self._setup_cluster()

        session.execute(
            'CREATE TABLE t (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)'
            + (' WITH COMPACT STORAGE' if compact_storage else ''))

        for n in range(PARTITIONS):
            session.execute(
                "INSERT INTO t(k, v1, v2, v3, v4) VALUES ({}, {}, {}, {}, {})".
                format(n, n + 1, n + 2, n + 3, n + 4))

        session = self._do_upgrade()

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n),
                       [n, n + 1, n + 2, n + 3, n + 4])

        self.cluster.compact()

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n),
                       [n, n + 1, n + 2, n + 3, n + 4])
    def test_alter_rf_and_run_read_repair(self):
        """
        @jira_ticket CASSANDRA-10655
        @jira_ticket CASSANDRA-10657

        Test that querying only a subset of all the columns in a row doesn't confuse read-repair to avoid
        the problem described in CASSANDRA-10655.
        """

        # session is only used to setup & do schema modification. Actual data queries are done directly on
        # each node, using an exclusive connection and CL.ONE
        session = self.patient_cql_connection(self.cluster.nodelist()[0])
        initial_replica, non_replicas = self.do_initial_setup(session)

        # Execute a query at CL.ALL on one of the nodes which was *not* the initial replica. It should trigger a
        # read repair and propagate the data to all 3 nodes.
        # Note: result of the read repair contains only the selected column (a), not all columns
        logger.debug("Executing 'SELECT a...' on non-initial replica to trigger read repair " + non_replicas[0].name)
        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
        assert_one(read_repair_session, "SELECT a FROM alter_rf_test.t1 WHERE k=1", [1], cl=ConsistencyLevel.ALL)

        # The read repair should have repaired the replicas, at least partially (see CASSANDRA-10655)
        # verify by querying each replica in turn.
        value_skipping_disabled = True if self.cluster.version() < '3.4' else False
        self.check_data_on_each_replica(expect_fully_repaired=value_skipping_disabled, initial_replica=initial_replica)

        # Now query again at CL.ALL but this time selecting all columns, which should ensure that 'b' also gets repaired
        query = "SELECT * FROM alter_rf_test.t1 WHERE k=1"
        logger.debug("Executing 'SELECT *...' on non-initial replica to trigger read repair " + non_replicas[0].name)
        assert_one(read_repair_session, query, [1, 1, 1], cl=ConsistencyLevel.ALL)

        # Check each replica individually again now that we expect the data to be fully repaired
        self.check_data_on_each_replica(expect_fully_repaired=True, initial_replica=initial_replica)
Пример #15
0
    def test_13691(self):
        """
        2.0 -> 2.1 -> 3.0 counters upgrade test
        @jira_ticket CASSANDRA-13691
        """
        cluster = self.cluster
        default_install_dir = cluster.get_install_dir()

        #
        # set up a 2.0 cluster with 3 nodes and set up schema
        #

        cluster.set_install_dir(version='2.0.17')
        cluster.populate(3)
        cluster.start()

        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        session.execute("""
            CREATE KEYSPACE test
                WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
            """)
        session.execute("CREATE TABLE test.test (id int PRIMARY KEY, c counter);")

        #
        # generate some 2.0 counter columns with local shards
        #

        query = "UPDATE test.test SET c = c + 1 WHERE id = ?"
        prepared = session.prepare(query)
        for i in range(0, 1000):
            session.execute(prepared, [i])

        cluster.flush()
        cluster.stop()

        #
        # upgrade cluster to 2.1
        #

        cluster.set_install_dir(version='2.1.17')
        cluster.start();
        cluster.nodetool("upgradesstables")

        #
        # upgrade node3 to current (3.0.x or 3.11.x)
        #

        node3.stop(wait_other_notice=True)
        node3.set_install_dir(install_dir=default_install_dir)
        node3.start(wait_other_notice=True)

        #
        # with a 2.1 coordinator, try to read the table with CL.ALL
        #

        session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.ALL)
        assert_one(session, "SELECT COUNT(*) FROM test.test", [1000])
Пример #16
0
    def test_13691(self):
        """
        2.0 -> 2.1 -> 3.0 counters upgrade test
        @jira_ticket CASSANDRA-13691
        """
        cluster = self.cluster
        default_install_dir = cluster.get_install_dir()

        #
        # set up a 2.0 cluster with 3 nodes and set up schema
        #

        cluster.set_install_dir(version='2.0.17')
        cluster.populate(3)
        cluster.start()

        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        session.execute("""
            CREATE KEYSPACE test
                WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
            """)
        session.execute("CREATE TABLE test.test (id int PRIMARY KEY, c counter);")

        #
        # generate some 2.0 counter columns with local shards
        #

        query = "UPDATE test.test SET c = c + 1 WHERE id = ?"
        prepared = session.prepare(query)
        for i in range(0, 1000):
            session.execute(prepared, [i])

        cluster.flush()
        cluster.stop()

        #
        # upgrade cluster to 2.1
        #

        cluster.set_install_dir(version='2.1.17')
        cluster.start()
        cluster.nodetool("upgradesstables")

        #
        # upgrade node3 to current (3.0.x or 3.11.x)
        #

        node3.stop(wait_other_notice=True)
        node3.set_install_dir(install_dir=default_install_dir)
        node3.start(wait_other_notice=True)

        #
        # with a 2.1 coordinator, try to read the table with CL.ALL
        #

        session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.ALL)
        assert_one(session, "SELECT COUNT(*) FROM test.test", [1000])
 def _assert_one(self, query, row):
     """
     Assert query returns one row.
     @param query Query to run
     @param row Expected result row from query
     """
     decorated_query = self._decorate_query(query)
     assert_one(self.session, decorated_query, row)
Пример #18
0
    def test_simultaneous_bootstrap(self):
        """
        Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.

        Start a one node cluster and run a stress write workload.
        Start up a second node, and wait for the first node to detect it has joined the cluster.
        While the second node is bootstrapping, start a third node. This should fail.

        @jira_ticket CASSANDRA-7069
        @jira_ticket CASSANDRA-9484
        """

        bootstrap_error = "Other bootstrapping/leaving/moving nodes detected," \
                          " cannot bootstrap while cassandra.consistent.rangemovement is true"

        cluster = self.cluster
        cluster.set_environment_variable(
            'CASSANDRA_TOKEN_PREGENERATION_DISABLED', 'True')
        cluster.populate(1)
        cluster.start()

        node1, = cluster.nodelist()

        node1.stress([
            'write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=1)',
            '-rate', 'threads=10'
        ])

        node2 = new_node(cluster)
        node2.start()

        for _ in range(30):  # wait until node2 shows up
            ntout = node1.nodetool('status').stdout
            if re.search(r'UJ\s+' + node2.ip_addr, ntout):
                break
            time.sleep(0.1)

        node3 = new_node(cluster, remote_debug_port='2003')
        try:
            node3.start(wait_other_notice=False, verbose=False)
        except NodeError:
            pass  # node doesn't start as expected

        time.sleep(.5)
        node2.watch_log_for("Starting listening for CQL clients")

        node3.watch_log_for(bootstrap_error)

        session = self.patient_exclusive_cql_connection(node2)

        # Repeat the select count(*) query, to help catch
        # bugs like 9484, where count(*) fails at higher
        # data loads.
        for _ in range(5):
            assert_one(session,
                       "SELECT count(*) from keyspace1.standard1", [500000],
                       cl=ConsistencyLevel.ONE)
Пример #19
0
    def test_rf_gt_nodes_multidc_should_succeed(self):
        """
        Validating a KS with RF > N on multi DC doesn't break bootstrap
        @jira_ticket CASSANDRA-16296 CASSANDRA-16411
        """
        cluster = self.cluster
        cluster.set_environment_variable(
            'CASSANDRA_TOKEN_PREGENERATION_DISABLED', 'True')
        cluster.populate([1, 1])
        cluster.start()

        node1 = cluster.nodelist()[0]
        node2 = cluster.nodelist()[1]
        session = self.patient_exclusive_cql_connection(node1)
        session.execute(
            "CREATE KEYSPACE k WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : '3'}"
        )

        if cluster.version() >= '4.0':
            warning = 'Your replication factor 3 for keyspace k is higher than the number of nodes 1 for datacenter dc1'
            assert len(node1.grep_log(warning)) == 1
            assert len(node2.grep_log(warning)) == 0

        session.execute(
            "ALTER KEYSPACE k WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'dc1' : '2'}"
        )
        session.execute(
            "CREATE TABLE k.testgtrfmultidc (KEY text PRIMARY KEY)")
        session.execute(
            "INSERT INTO k.testgtrfmultidc (KEY) VALUES ('test_rf_gt_nodes_multidc_should_succeed')"
        )

        if cluster.version() >= '4.0':
            warning = 'Your replication factor 2 for keyspace k is higher than the number of nodes 1 for datacenter dc1'
            assert len(node1.grep_log(warning)) == 1
            assert len(node2.grep_log(warning)) == 0

        marks = map(lambda n: n.mark_log(), cluster.nodelist())
        node3 = Node(name='node3',
                     cluster=cluster,
                     auto_bootstrap=True,
                     thrift_interface=('127.0.0.3', 9160),
                     storage_interface=('127.0.0.3', 7000),
                     jmx_port='7300',
                     remote_debug_port='0',
                     initial_token=None,
                     binary_interface=('127.0.0.3', 9042))
        cluster.add(node3, is_seed=False, data_center="dc1")
        node3.start(wait_for_binary_proto=True)
        if cluster.version() >= '4.0':
            warning = 'is higher than the number of nodes'
            for (node, mark) in zip(cluster.nodelist(), marks):
                assert len(node.grep_log(warning, from_mark=mark)) == 0

        session3 = self.patient_exclusive_cql_connection(node3)
        assert_one(session3, "SELECT * FROM k.testgtrfmultidc",
                   ["test_rf_gt_nodes_multidc_should_succeed"])
    def test_assertions(self):
        # assert_exception_test
        mock_session = Mock(
            **
            {'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unavailable("Dummy Unavailabile message.")
        })
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{
            'execute.side_effect':
            InvalidRequest("Dummy InvalidRequest message.")
        })
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{
            'execute.side_effect':
            Unauthorized("Dummy Unauthorized message.")
        })
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i]
                                                  for i in range(0, 10)])
        assert_all(mock_session,
                   "SELECT k, v FROM test", [[i, i] for i in range(0, 10)],
                   ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
Пример #21
0
    def default_aggregate_test(self):
        session = self.prepare()
        session.execute("create table nums (key int primary key, val double);")

        for x in range(1, 10):
            session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x)))

        assert_one(session, "SELECT min(key) FROM nums", [1])
        assert_one(session, "SELECT max(val) FROM nums", [9.0])
        assert_one(session, "SELECT sum(key) FROM nums", [45])
        assert_one(session, "SELECT avg(val) FROM nums", [5.0])
        assert_one(session, "SELECT count(*) FROM nums", [9])
    def default_aggregate_test(self):
        session = self.prepare()
        session.execute("create table nums (key int primary key, val double);")

        for x in range(1, 10):
            session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x)))

        assert_one(session, "SELECT min(key) FROM nums", [1])
        assert_one(session, "SELECT max(val) FROM nums", [9.0])
        assert_one(session, "SELECT sum(key) FROM nums", [45])
        assert_one(session, "SELECT avg(val) FROM nums", [5.0])
        assert_one(session, "SELECT count(*) FROM nums", [9])
Пример #23
0
    def simultaneous_bootstrap_test(self):
        """
        Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.

        Start a one node cluster and run a stress write workload.
        Start up a second node, and wait for the first node to detect it has joined the cluster.
        While the second node is bootstrapping, start a third node. This should fail.

        @jira_ticket CASSANDRA-7069
        @jira_ticket CASSANDRA-9484
        """

        bootstrap_error = (
            "Other bootstrapping/leaving/moving nodes detected,"
            " cannot bootstrap while cassandra.consistent.rangemovement is true"
        )

        self.ignore_log_patterns.append(bootstrap_error)

        cluster = self.cluster
        cluster.populate(1)
        cluster.start(wait_for_binary_proto=True)

        node1, = cluster.nodelist()

        node1.stress([
            'write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=1)',
            '-rate', 'threads=10'
        ])

        node2 = new_node(cluster)
        node2.start(wait_other_notice=True)

        node3 = new_node(cluster, remote_debug_port='2003')
        process = node3.start(wait_other_notice=False)
        stdout, stderr = process.communicate()
        self.assertIn(bootstrap_error, stderr, msg=stderr)
        time.sleep(.5)
        self.assertFalse(node3.is_running(),
                         msg="Two nodes bootstrapped simultaneously")

        node2.watch_log_for("Starting listening for CQL clients")

        session = self.patient_exclusive_cql_connection(node2)

        # Repeat the select count(*) query, to help catch
        # bugs like 9484, where count(*) fails at higher
        # data loads.
        for _ in xrange(5):
            assert_one(session,
                       "SELECT count(*) from keyspace1.standard1", [500000],
                       cl=ConsistencyLevel.ONE)
Пример #24
0
    def test_multi_table_batch_for_10554(self):
        """ Test a batch on 2 tables having different columns, restarting the node afterwards, to reproduce CASSANDRA-10554 """
        session = self.prepare()

        # prepare() adds users and clicks but clicks is a counter table, so adding a random other table for this test.
        session.execute("""
            CREATE TABLE dogs (
                dogid int PRIMARY KEY,
                dogname text,
             );
         """)

        session.execute("""
            BEGIN BATCH
            INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
            INSERT INTO dogs (dogid, dogname) VALUES (0, 'Pluto')
            APPLY BATCH
        """)

        assert_one(session, "SELECT * FROM users", [0, 'Jack', 'Sparrow'])
        assert_one(session, "SELECT * FROM dogs", [0, 'Pluto'])

        # Flush and restart the node as it's how 10554 reproduces
        node1 = self.cluster.nodelist()[0]
        node1.flush()
        node1.stop()
        node1.start(wait_for_binary_proto=True)

        session = self.patient_cql_connection(node1, keyspace='ks')

        assert_one(session, "SELECT * FROM users", [0, 'Jack', 'Sparrow'])
        assert_one(session, "SELECT * FROM dogs", [0, 'Pluto'])
Пример #25
0
    def drop_column_queries_test(self):
        session = self.prepare()

        session.execute("USE ks")
        session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
        session.execute("CREATE INDEX ON cf(c2)")

        # insert some data.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")

        # drop and readd c1.
        session.execute("ALTER TABLE cf DROP c1")
        session.execute("ALTER TABLE cf ADD c1 int")

        # add another row.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")

        # test that old (pre-drop) c1 values aren't returned and new ones are.
        assert_all(session, "SELECT c1 FROM cf", [[None], [None], [None], [4]], ignore_order=True)

        assert_all(session, "SELECT * FROM cf", [[0, None, 2], [1, None, 3], [2, None, 4], [3, 4, 5]], ignore_order=True)

        assert_one(session, "SELECT c1 FROM cf WHERE key = 0", [None])

        assert_one(session, "SELECT c1 FROM cf WHERE key = 3", [4])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 2", [0, None, 2])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 5", [3, 4, 5])
Пример #26
0
def assert_bootstrap_state(tester, node, expected_bootstrap_state):
    """
    Assert that a node is on a given bootstrap state
    @param tester The dtest.Tester object to fetch the exclusive connection to the node
    @param node The node to check bootstrap state
    @param expected_bootstrap_state Bootstrap state to expect

    Examples:
    assert_bootstrap_state(self, node3, 'COMPLETED')
    """
    session = tester.patient_exclusive_cql_connection(node)
    assert_one(session,
               "SELECT bootstrapped FROM system.local WHERE key='local'",
               [expected_bootstrap_state])
Пример #27
0
    def multi_table_batch_for_10554_test(self):
        """ Test a batch on 2 tables having different columns, restarting the node afterwards, to reproduce CASSANDRA-10554 """

        session = self.prepare()

        # prepare() adds users and clicks but clicks is a counter table, so adding a random other table for this test.
        session.execute("""
            CREATE TABLE dogs (
                dogid int PRIMARY KEY,
                dogname text,
             );
         """)

        session.execute("""
            BEGIN BATCH
            INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')
            INSERT INTO dogs (dogid, dogname) VALUES (0, 'Pluto')
            APPLY BATCH
        """)

        assert_one(session, "SELECT * FROM users", [0, 'Jack', 'Sparrow'])
        assert_one(session, "SELECT * FROM dogs", [0, 'Pluto'])

        # Flush and restart the node as it's how 10554 reproduces
        node1 = self.cluster.nodelist()[0]
        node1.flush()
        node1.stop()
        node1.start(wait_for_binary_proto=True)

        session = self.patient_cql_connection(node1, keyspace='ks')

        assert_one(session, "SELECT * FROM users", [0, 'Jack', 'Sparrow'])
        assert_one(session, "SELECT * FROM dogs", [0, 'Pluto'])
    def test_compaction_strategy_switching(self, strategy):
        """
        Ensure that switching strategies does not result in problems.
        Insert data, switch strategies, then check against data loss.
        """
        strategies = [
            'LeveledCompactionStrategy', 'SizeTieredCompactionStrategy',
            'DateTieredCompactionStrategy'
        ]

        if strategy in strategies:
            strategies.remove(strategy)
            cluster = self.cluster
            cluster.populate(1).start(wait_for_binary_proto=True)
            [node1] = cluster.nodelist()

            for strat in strategies:
                session = self.patient_cql_connection(node1)
                create_ks(session, 'ks', 1)

                session.execute(
                    "create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'"
                    + strategy + "'};")

                for x in range(0, 100):
                    session.execute('insert into ks.cf (key, val) values (' +
                                    str(x) + ',1)')

                node1.flush()

                for x in range(0, 10):
                    session.execute('delete from cf where key = ' + str(x))

                session.execute(
                    "alter table ks.cf with compaction = {'class':'" + strat +
                    "'};")

                for x in range(11, 100):
                    assert_one(session,
                               "select * from ks.cf where key =" + str(x),
                               [x, 1])

                for x in range(0, 10):
                    assert_none(session,
                                'select * from cf where key = ' + str(x))

                node1.flush()
                cluster.clear()
                time.sleep(5)
                cluster.start(wait_for_binary_proto=True)
    def ignore_failure_policy_test(self):
        """
        Test the ignore commitlog failure policy
        """
        self.prepare(configuration={
            'commit_failure_policy': 'ignore'
        })

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log("ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
        self.assertTrue(failure, "Cannot find the commitlog failure message in logs")
        self.assertTrue(self.node1.is_running(), "Node1 should still be running")

        # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
        # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
        # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that
        # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit)
        query = "INSERT INTO test (key, col1) VALUES (2, 2);"
        if is_win():
            # We expect this to succeed
            self.session1.execute(query)
            self.assertFalse(self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored.")
            self.assertTrue(self.node1.is_running(), "Node1 should still be running after an ignore error on CL")
        else:
            with self.assertRaises((OperationTimedOut, WriteTimeout)):
                self.session1.execute(query)

            # Should not exist
            assert_none(self.session1, "SELECT * FROM test where key=2;")

        # bring back the node commitlogs
        self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)

        self.session1.execute("""
          INSERT INTO test (key, col1) VALUES (3, 3);
        """)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=3;",
            [3, 3]
        )

        time.sleep(2)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=2;",
            [2, 2]
        )
Пример #30
0
    def test_ignore_failure_policy(self):
        """
        Test the ignore commitlog failure policy
        """
        self.prepare(configuration={
            'commit_failure_policy': 'ignore'
        })

        self._provoke_commitlog_failure()
        failure = self.node1.grep_log(r"ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
        assert failure, "Cannot find the commitlog failure message in logs"
        assert self.node1.is_running(), "Node1 should still be running"

        # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
        # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
        # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that
        # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit)
        query = "INSERT INTO test (key, col1) VALUES (2, 2);"
        if is_win():
            # We expect this to succeed
            self.session1.execute(query)
            assert not self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored."
            assert self.node1.is_running(), "Node1 should still be running after an ignore error on CL"
        else:
            with pytest.raises((OperationTimedOut, WriteTimeout)):
                self.session1.execute(query)

            # Should not exist
            assert_none(self.session1, "SELECT * FROM test where key=2;")

        # bring back the node commitlogs
        self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)

        self.session1.execute("""
          INSERT INTO test (key, col1) VALUES (3, 3);
        """)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=3;",
            [3, 3]
        )

        time.sleep(2)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=2;",
            [2, 2]
        )
    def test_compaction(self):
        """
        Test we can major compact after an incremental repair
        * Launch a three node cluster
        * Create a keyspace with RF 3 and a table
        * Stop node3
        * Insert 100 rows
        * Restart node3
        * Issue an incremental repair
        * Insert 50 more rows
        * Perform a major compaction on node3
        * Verify all data is present
        # TODO: I have no idea what this is testing. The assertions do not verify anything meaningful.
        # TODO: Fix all the string formatting
        """
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        session.execute("create table tab(key int PRIMARY KEY, val int);")

        node3.stop()

        for x in range(0, 100):
            session.execute("insert into tab(key,val) values(" + str(x) +
                            ",0)")
        node1.flush()

        node3.start(wait_for_binary_proto=True)

        if cluster.version() >= "2.2":
            node3.repair()
        else:
            node3.nodetool("repair -par -inc")
        for x in range(0, 150):
            session.execute("insert into tab(key,val) values(" + str(x) +
                            ",1)")

        cluster.flush()

        node3.nodetool('compact')

        for x in range(0, 150):
            assert_one(session, "select val from tab where key =" + str(x),
                       [1])
Пример #32
0
    def test_upgrade_with_range_and_collection_tombstones(self):
        """
        Check sstable including collection tombstone (inserted through adding a collection) can be read after upgrade.

        @jira_ticket CASSANDRA-10743
        """
        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k text, t int, c list<int>, PRIMARY KEY (k, t))')

        session.execute("INSERT INTO t(k, t, c) VALUES ('some_key', 0, %s)" % str([i for i in range(10000)]))

        session = self._do_upgrade()

        self.cluster.compact()

        assert_one(session, "SELECT k FROM t", ['some_key'])
    def test_upgrade_with_range_and_collection_tombstones(self):
        """
        Check sstable including collection tombstone (inserted through adding a collection) can be read after upgrade.

        @jira_ticket CASSANDRA-10743
        """
        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k text, t int, c list<int>, PRIMARY KEY (k, t))')

        session.execute("INSERT INTO t(k, t, c) VALUES ('some_key', 0, %s)" % str([i for i in range(10000)]))

        session = self._do_upgrade()

        self.cluster.compact()

        assert_one(session, "SELECT k FROM t", ['some_key'])
Пример #34
0
    def do_initial_setup(self, session):
        """
        Create a keyspace with rf=1 and a table containing a single row with 2 non-primary key columns.
        Insert 1 row, placing the data on a single initial replica. Then, alter the keyspace to rf=3, but don't
        repair. Tests will execute various reads on the replicas and assert the effects of read repair.
        :param session: Used to perform the schema setup & insert the data
        :return: a tuple containing the node which initially acts as the replica, and a list of the other two nodes
        """
        # Disable speculative retry and [dclocal]read_repair in initial setup.
        session.execute("""CREATE KEYSPACE alter_rf_test
                           WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};"""
                        )

        options = "speculative_retry='NONE'"
        if self.cluster.version() < '4.0':
            options = options + " AND read_repair_chance=0 AND dclocal_read_repair_chance=0"
        session.execute(
            "CREATE TABLE alter_rf_test.t1 (k int PRIMARY KEY, a int, b int) WITH "
            + options)

        session.execute(
            "INSERT INTO alter_rf_test.t1 (k, a, b) VALUES (1, 1, 1);")

        # identify the initial replica and trigger a flush to ensure reads come from sstables
        initial_replica, non_replicas = self.identify_initial_placement()
        logger.debug("At RF=1 replica for data is " + initial_replica.name)
        initial_replica.flush()

        # Just some basic validation.
        # At RF=1, it shouldn't matter which node we query, as the actual data should always come from the
        # initial replica when reading at CL ONE
        for n in self.cluster.nodelist():
            logger.debug("Checking " + n.name)
            session = self.patient_exclusive_cql_connection(n)
            assert_one(session,
                       "SELECT * FROM alter_rf_test.t1 WHERE k=1", [1, 1, 1],
                       cl=ConsistencyLevel.ONE)

        # Alter so RF=n but don't repair, calling tests will execute queries to exercise read repair,
        # either at CL.ALL or after setting read_repair_chance to 100%.
        logger.debug("Changing RF from 1 to 3")
        session.execute("""ALTER KEYSPACE alter_rf_test
                           WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};"""
                        )

        return initial_replica, non_replicas
Пример #35
0
    def test_simultaneous_bootstrap(self):
        """
        Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.

        Start a one node cluster and run a stress write workload.
        Start up a second node, and wait for the first node to detect it has joined the cluster.
        While the second node is bootstrapping, start a third node. This should fail.

        @jira_ticket CASSANDRA-7069
        @jira_ticket CASSANDRA-9484
        """

        bootstrap_error = "Other bootstrapping/leaving/moving nodes detected," \
                          " cannot bootstrap while cassandra.consistent.rangemovement is true"

        cluster = self.cluster
        cluster.populate(1)
        cluster.start(wait_for_binary_proto=True)

        node1, = cluster.nodelist()

        node1.stress(['write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=1)',
                      '-rate', 'threads=10'])

        node2 = new_node(cluster)
        node2.start(wait_other_notice=True)

        node3 = new_node(cluster, remote_debug_port='2003')
        try:
            node3.start(wait_other_notice=False, verbose=False)
        except NodeError:
            pass  # node doesn't start as expected

        time.sleep(.5)
        node2.watch_log_for("Starting listening for CQL clients")

        node3.watch_log_for(bootstrap_error)

        session = self.patient_exclusive_cql_connection(node2)

        # Repeat the select count(*) query, to help catch
        # bugs like 9484, where count(*) fails at higher
        # data loads.
        for _ in range(5):
            assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
    def aggregate_udf_test(self):
        session = self.prepare()
        session.execute("create table nums (key int primary key, val int);")

        for x in range(1, 4):
            session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, x))
        session.execute("create function plus(key int, val int) called on null input returns int language java as 'return Integer.valueOf(key.intValue() + val.intValue());'")
        session.execute("create function stri(key int) called on null input returns text language java as 'return key.toString();'")
        session.execute("create aggregate suma (int) sfunc plus stype int finalfunc stri initcond 10")

        assert_one(session, "select suma(val) from nums", ["16"])

        session.execute("create function test(a int, b double) called on null input returns int language javascript as 'a + b;'")
        session.execute("create aggregate aggy(double) sfunc test stype int")

        assert_invalid(session, "create aggregate aggtwo(int) sfunc aggy stype int")

        assert_invalid(session, "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo")
Пример #37
0
    def _provoke_commitlog_failure(self):
        """
        Provoke the commitlog failure
        """
        debug('Provoking commitlog failure')
        # Test things are ok at this point
        self.session1.execute("""
            INSERT INTO test (key, col1) VALUES (1, 1);
        """)
        assert_one(self.session1, "SELECT * FROM test where key=1;", [1, 1])

        self._change_commitlog_perms(0)

        # Use stress_process to skip internal error handling in ccm. Grep node logs for specific errors in test method.
        self.node1.stress_process([
            'write', 'n=1M', 'no-warmup', '-col', 'size=FIXED(1000)', '-rate',
            'threads=25'
        ]).communicate()
    def compaction_test(self):
        """
        Test we can major compact after an incremental repair
        * Launch a three node cluster
        * Create a keyspace with RF 3 and a table
        * Stop node3
        * Insert 100 rows
        * Restart node3
        * Issue an incremental repair
        * Insert 50 more rows
        * Perform a major compaction on node3
        * Verify all data is present
        # TODO: I have no idea what this is testing. The assertions do not verify anything meaningful.
        # TODO: Fix all the string formatting
        """
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        session.execute("create table tab(key int PRIMARY KEY, val int);")

        node3.stop()

        for x in range(0, 100):
            session.execute("insert into tab(key,val) values(" + str(x) + ",0)")
        node1.flush()

        node3.start(wait_for_binary_proto=True)

        if cluster.version() >= "2.2":
            node3.repair()
        else:
            node3.nodetool("repair -par -inc")
        for x in range(0, 150):
            session.execute("insert into tab(key,val) values(" + str(x) + ",1)")

        cluster.flush()

        node3.nodetool('compact')

        for x in range(0, 150):
            assert_one(session, "select val from tab where key =" + str(x), [1])
    def upgrade_with_wide_partition(self, query_modifier=""):
        ROWS = 100

        session = self._setup_cluster()

        session.execute(
            'CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))'
        )

        # the blob is only here to make the row bigger internally so it sometimes span multiple index blocks
        bigish_blob = "0x"
        for i in range(1000):
            bigish_blob = bigish_blob + "0000"

        for r in range(ROWS):
            session.execute(
                "INSERT INTO t(k, t, v1, v2, v3) VALUES ({}, {}, {}, {}, {{{}, {}}})"
                .format(0, r, r, bigish_blob, r * 2, r * 3))

        self.cluster.flush()

        # delete every other row
        for r in range(0, ROWS, 2):
            session.execute("DELETE FROM t WHERE k=0 AND t={}".format(r))

        # delete the set from every other remaining row
        for r in range(1, ROWS, 4):
            session.execute(
                "UPDATE t SET v3={{}} WHERE k=0 AND t={}".format(r))

        session = self._do_upgrade()

        for r in range(0, ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(
                r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)

        self.cluster.compact()

        for r in range(ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(
                r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)
Пример #40
0
    def assertions_test(self):
        # assert_exception_test
        mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")})
        assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists)

        # assert_unavailable_test
        mock_session = Mock(**{'execute.side_effect': Unavailable("Dummy Unavailabile message.")})
        assert_unavailable(mock_session.execute)

        # assert_invalid_test
        mock_session = Mock(**{'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.")})
        assert_invalid(mock_session, "DUMMY QUERY")

        # assert_unauthorized_test
        mock_session = Mock(**{'execute.side_effect': Unauthorized("Dummy Unauthorized message.")})
        assert_unauthorized(mock_session, "DUMMY QUERY", None)

        # assert_one_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1, 1]])
        assert_one(mock_session, "SELECT * FROM test", [1, 1])

        # assert_none_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[])
        assert_none(mock_session, "SELECT * FROM test")

        # assert_all_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)])
        assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True)

        # assert_almost_equal_test
        assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0)

        # assert_row_count_test
        mock_session = Mock()
        mock_session.execute = Mock(return_value=[[1]])
        assert_row_count(mock_session, 'test', 1)

        # assert_length_equal_test
        check = [1, 2, 3, 4]
        assert_length_equal(check, 4)
    def _provoke_commitlog_failure(self):
        """
        Provoke the commitlog failure
        """
        debug('Provoking commitlog failure')
        # Test things are ok at this point
        self.session1.execute("""
            INSERT INTO test (key, col1) VALUES (1, 1);
        """)
        assert_one(
            self.session1,
            "SELECT * FROM test where key=1;",
            [1, 1]
        )

        self._change_commitlog_perms(0)

        # Use stress_process to skip internal error handling in ccm. Grep node logs for specific errors in test method.
        self.node1.stress_process(['write', 'n=1M', 'no-warmup', '-col', 'size=FIXED(1000)', '-rate', 'threads=25']).communicate()
    def update_and_drop_column_test(self):
        """
        Checks that dropped columns are properly handled in legacy sstables

        @jira_ticket CASSANDRA-11018
        """
        cursor = self._setup_cluster()

        cursor.execute('CREATE TABLE t (k text PRIMARY KEY, a int, b int)')

        cursor.execute("INSERT INTO t(k, a, b) VALUES ('some_key', 0, 0)")

        cursor = self._do_upgrade()

        cursor.execute("ALTER TABLE t DROP b")

        self.cluster.compact()

        assert_one(cursor, "SELECT * FROM t", ['some_key', 0])
    def test_update_and_drop_column(self):
        """
        Checks that dropped columns are properly handled in legacy sstables

        @jira_ticket CASSANDRA-11018
        """
        cursor = self._setup_cluster()

        cursor.execute('CREATE TABLE t (k text PRIMARY KEY, a int, b int)')

        cursor.execute("INSERT INTO t(k, a, b) VALUES ('some_key', 0, 0)")

        cursor = self._do_upgrade()

        cursor.execute("ALTER TABLE t DROP b")

        self.cluster.compact()

        assert_one(cursor, "SELECT * FROM t", ['some_key', 0])
    def upgrade_with_unclustered_table(self, compact_storage=False):
        PARTITIONS = 5

        session = self._setup_cluster()

        session.execute('CREATE TABLE t (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)' +
                        (' WITH COMPACT STORAGE' if compact_storage else ''))

        for n in range(PARTITIONS):
            session.execute("INSERT INTO t(k, v1, v2, v3, v4) VALUES ({}, {}, {}, {}, {})".format(n, n + 1, n + 2, n + 3, n + 4))

        session = self._do_upgrade()

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), [n, n + 1, n + 2, n + 3, n + 4])

        self.cluster.compact()

        for n in range(PARTITIONS):
            assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), [n, n + 1, n + 2, n + 3, n + 4])
    def upgrade_with_wide_partition(self, query_modifier=""):
        ROWS = 100

        session = self._setup_cluster()

        session.execute("CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))")

        # the blob is only here to make the row bigger internally so it sometimes span multiple index blocks
        bigish_blob = "0x"
        for i in range(1000):
            bigish_blob = bigish_blob + "0000"

        for r in range(ROWS):
            session.execute(
                "INSERT INTO t(k, t, v1, v2, v3) VALUES ({}, {}, {}, {}, {{{}, {}}})".format(
                    0, r, r, bigish_blob, r * 2, r * 3
                )
            )

        self.cluster.flush()

        # delete every other row
        for r in range(0, ROWS, 2):
            session.execute("DELETE FROM t WHERE k=0 AND t={}".format(r))

        # delete the set from every other remaining row
        for r in range(1, ROWS, 4):
            session.execute("UPDATE t SET v3={{}} WHERE k=0 AND t={}".format(r))

        session = self._do_upgrade()

        for r in range(0, ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)

        self.cluster.compact()

        for r in range(ROWS):
            query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format(r, query_modifier)
            if (r - 1) % 4 == 0:
                assert_one(session, query, [r, r, None])
            elif (r + 1) % 2 == 0:
                assert_one(session, query, [r, r, set([r * 2, r * 3])])
            else:
                assert_none(session, query)
Пример #46
0
    def test_snapshot_and_restore_dropping_a_column(self):
        """
        @jira_ticket CASSANDRA-13276

        Can't load snapshots of tables with dropped columns.
        """
        cluster = self.cluster
        cluster.populate(1).start()
        node1, = cluster.nodelist()
        session = self.patient_cql_connection(node1)

        # Create schema and insert some data
        create_ks(session, 'ks', 1)
        session.execute("CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)")
        session.execute("INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')")
        assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])

        # Drop a column
        session.execute("ALTER TABLE ks.cf DROP b")
        assert_one(session, "SELECT * FROM ks.cf", [1, "a"])

        # Take a snapshot and drop the table
        snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
        session.execute("DROP TABLE ks.cf")

        # Restore schema and data from snapshot
        self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')
        self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
        node1.nodetool('refresh ks cf')
        assert_one(session, "SELECT * FROM ks.cf", [1, "a"])

        # Clean up
        logger.debug("removing snapshot_dir: " + snapshot_dir)
        shutil.rmtree(snapshot_dir)
Пример #47
0
    def test_snapshot_and_restore_dropping_a_column(self):
        """
        @jira_ticket CASSANDRA-13276

        Can't load snapshots of tables with dropped columns.
        """
        cluster = self.cluster
        cluster.populate(1).start()
        node1, = cluster.nodelist()
        session = self.patient_cql_connection(node1)

        # Create schema and insert some data
        create_ks(session, 'ks', 1)
        session.execute(
            "CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)")
        session.execute("INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')")
        assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])

        # Drop a column
        session.execute("ALTER TABLE ks.cf DROP b")
        assert_one(session, "SELECT * FROM ks.cf", [1, "a"])

        # Take a snapshot and drop the table
        snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
        session.execute("DROP TABLE ks.cf")

        # Restore schema and data from snapshot
        self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')
        self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
        node1.nodetool('refresh ks cf')
        assert_one(session, "SELECT * FROM ks.cf", [1, "a"])

        # Clean up
        logger.debug("removing snapshot_dir: " + snapshot_dir)
        shutil.rmtree(snapshot_dir)
Пример #48
0
    def test_compaction_strategy_switching(self, strategy):
        """
        Ensure that switching strategies does not result in problems.
        Insert data, switch strategies, then check against data loss.
        """
        strategies = ['LeveledCompactionStrategy', 'SizeTieredCompactionStrategy', 'DateTieredCompactionStrategy']

        if strategy in strategies:
            strategies.remove(strategy)
            cluster = self.cluster
            cluster.populate(1).start(wait_for_binary_proto=True)
            [node1] = cluster.nodelist()

            for strat in strategies:
                session = self.patient_cql_connection(node1)
                create_ks(session, 'ks', 1)

                session.execute("create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'" + strategy + "'};")

                for x in range(0, 100):
                    session.execute('insert into ks.cf (key, val) values (' + str(x) + ',1)')

                node1.flush()

                for x in range(0, 10):
                    session.execute('delete from cf where key = ' + str(x))

                session.execute("alter table ks.cf with compaction = {'class':'" + strat + "'};")

                for x in range(11, 100):
                    assert_one(session, "select * from ks.cf where key =" + str(x), [x, 1])

                for x in range(0, 10):
                    assert_none(session, 'select * from cf where key = ' + str(x))

                node1.flush()
                cluster.clear()
                time.sleep(5)
                cluster.start(wait_for_binary_proto=True)
    def do_initial_setup(self, session):
        """
        Create a keyspace with rf=1 and a table containing a single row with 2 non-primary key columns.
        Insert 1 row, placing the data on a single initial replica. Then, alter the keyspace to rf=3, but don't
        repair. Tests will execute various reads on the replicas and assert the effects of read repair.
        :param session: Used to perform the schema setup & insert the data
        :return: a tuple containing the node which initially acts as the replica, and a list of the other two nodes
        """
        # Disable speculative retry and [dclocal]read_repair in initial setup.
        session.execute("""CREATE KEYSPACE alter_rf_test
                           WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};""")

        options = "speculative_retry='NONE'";
        if self.cluster.version() < '4.0':
            options = options + " AND read_repair_chance=0 AND dclocal_read_repair_chance=0"
        session.execute("CREATE TABLE alter_rf_test.t1 (k int PRIMARY KEY, a int, b int) WITH " + options)

        session.execute("INSERT INTO alter_rf_test.t1 (k, a, b) VALUES (1, 1, 1);")

        # identify the initial replica and trigger a flush to ensure reads come from sstables
        initial_replica, non_replicas = self.identify_initial_placement()
        logger.debug("At RF=1 replica for data is " + initial_replica.name)
        initial_replica.flush()

        # Just some basic validation.
        # At RF=1, it shouldn't matter which node we query, as the actual data should always come from the
        # initial replica when reading at CL ONE
        for n in self.cluster.nodelist():
            logger.debug("Checking " + n.name)
            session = self.patient_exclusive_cql_connection(n)
            assert_one(session, "SELECT * FROM alter_rf_test.t1 WHERE k=1", [1, 1, 1], cl=ConsistencyLevel.ONE)

        # Alter so RF=n but don't repair, calling tests will execute queries to exercise read repair,
        # either at CL.ALL or after setting read_repair_chance to 100%.
        logger.debug("Changing RF from 1 to 3")
        session.execute("""ALTER KEYSPACE alter_rf_test
                           WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};""")

        return initial_replica, non_replicas
Пример #50
0
    def validate_empty_column_name_test(self):
        cluster = self.cluster
        cluster.populate(1).start()
        node1 = cluster.nodelist()[0]
        session = self.patient_cql_connection(node1)
        create_ks(session, 'counter_tests', 1)

        session.execute("""
            CREATE TABLE compact_counter_table (
                pk int,
                ck text,
                value counter,
                PRIMARY KEY (pk, ck))
            WITH COMPACT STORAGE
            """)

        assert_invalid(session, "UPDATE compact_counter_table SET value = value + 1 WHERE pk = 0 AND ck = ''")
        assert_invalid(session, "UPDATE compact_counter_table SET value = value - 1 WHERE pk = 0 AND ck = ''")

        session.execute("UPDATE compact_counter_table SET value = value + 5 WHERE pk = 0 AND ck = 'ck'")
        session.execute("UPDATE compact_counter_table SET value = value - 2 WHERE pk = 0 AND ck = 'ck'")

        assert_one(session, "SELECT pk, ck, value FROM compact_counter_table", [0, 'ck', 3])
Пример #51
0
    def test_udf_scripting(self):
        session = self.prepare()
        session.execute("create table nums (key int primary key, val double);")

        for x in range(1, 4):
            session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" %
                            (x, float(x)))

        session.execute(
            "CREATE FUNCTION x_sin(val double) called on null input returns double language javascript as 'Math.sin(val)'"
        )

        assert_one(session,
                   "SELECT key, val, x_sin(val) FROM nums where key = %d" % 1,
                   [1, 1.0, math.sin(1.0)])
        assert_one(session,
                   "SELECT key, val, x_sin(val) FROM nums where key = %d" % 2,
                   [2, 2.0, math.sin(2.0)])
        assert_one(session,
                   "SELECT key, val, x_sin(val) FROM nums where key = %d" % 3,
                   [3, 3.0, math.sin(3.0)])

        session.execute(
            "create function y_sin(val double) called on null input returns double language javascript as 'Math.sin(val).toString()'"
        )

        assert_invalid(session,
                       "select y_sin(val) from nums where key = 1",
                       expected=FunctionFailure)

        assert_invalid(
            session,
            "create function compilefail(key int) called on null input returns double language javascript as 'foo bar';"
        )

        session.execute(
            "create function plustwo(key int) called on null input returns double language javascript as 'key+2'"
        )

        assert_one(session, "select plustwo(key) from nums where key = 3", [5])
    def udf_scripting_test(self):
        session = self.prepare()
        session.execute("create table nums (key int primary key, val double);")

        for x in range(1, 4):
            session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x)))

        session.execute("CREATE FUNCTION x_sin(val double) called on null input returns double language javascript as 'Math.sin(val)'")

        assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 1, [1, 1.0, math.sin(1.0)])
        assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 2, [2, 2.0, math.sin(2.0)])
        assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 3, [3, 3.0, math.sin(3.0)])

        session.execute("create function y_sin(val double) called on null input returns double language javascript as 'Math.sin(val).toString()'")

        assert_invalid(session, "select y_sin(val) from nums where key = 1", expected=FunctionFailure)

        assert_invalid(session, "create function compilefail(key int) called on null input returns double language javascript as 'foo bar';")

        session.execute("create function plustwo(key int) called on null input returns double language javascript as 'key+2'")

        assert_one(session, "select plustwo(key) from nums where key = 3", [5])
Пример #53
0
    def drop_column_queries_test(self):
        session = self.prepare()

        session.execute("USE ks")
        session.execute(
            "CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
        session.execute("CREATE INDEX ON cf(c2)")

        # insert some data.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")

        # drop and readd c1.
        session.execute("ALTER TABLE cf DROP c1")
        session.execute("ALTER TABLE cf ADD c1 int")

        # add another row.
        session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")

        # test that old (pre-drop) c1 values aren't returned and new ones are.
        assert_all(session,
                   "SELECT c1 FROM cf", [[None], [None], [None], [4]],
                   ignore_order=True)

        assert_all(session,
                   "SELECT * FROM cf",
                   [[0, None, 2], [1, None, 3], [2, None, 4], [3, 4, 5]],
                   ignore_order=True)

        assert_one(session, "SELECT c1 FROM cf WHERE key = 0", [None])

        assert_one(session, "SELECT c1 FROM cf WHERE key = 3", [4])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 2", [0, None, 2])

        assert_one(session, "SELECT * FROM cf WHERE c2 = 5", [3, 4, 5])
    def sstableloader_with_failing_2i_test(self):
        """
        @jira_ticket CASSANDRA-10130

        Simulates an index building failure during SSTables load.
        The table data should be loaded and the index should be marked for rebuilding during the next node start.
        """
        def create_schema_with_2i(session):
            create_ks(session, 'k', 1)
            session.execute(
                "CREATE TABLE k.t (p int, c int, v int, PRIMARY KEY(p, c))")
            session.execute("CREATE INDEX idx ON k.t(v)")

        cluster = self.cluster
        cluster.populate(
            1, install_byteman=True).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 1, 8)")

        # Stop node and copy SSTables
        node.nodetool('drain')
        node.stop()
        self.copy_sstables(cluster, node)

        # Wipe out data and restart
        cluster.clear()
        cluster.start()

        # Restore the schema
        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)

        # The table should exist and be empty, and the index should be empty and marked as built
        assert_one(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""",
            ['k', 'idx', None])
        assert_none(session, "SELECT * FROM k.t")
        assert_none(session, "SELECT * FROM k.t WHERE v = 8")

        # Add some additional data before loading the SSTable, to check that it will be still accessible
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 2, 8)")
        assert_one(session, "SELECT * FROM k.t", [0, 2, 8])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Load SSTables with a failure during index creation
        node.byteman_submit(['./byteman/index_build_failure.btm'])
        with self.assertRaises(Exception):
            self.load_sstables(cluster, node, 'k')

        # Check that the index isn't marked as built and the old SSTable data has been loaded but not indexed
        assert_none(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Restart the node to trigger index rebuild
        node.nodetool('drain')
        node.stop()
        cluster.start()
        session = self.patient_cql_connection(node)

        # Check that the index is marked as built and the index has been rebuilt
        assert_one(
            session,
            """SELECT * FROM system."IndexInfo" WHERE table_name='k'""",
            ['k', 'idx', None])
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_all(session, "SELECT * FROM k.t WHERE v = 8",
                   [[0, 1, 8], [0, 2, 8]])
    def test_migration(self):
        """ Test migration of user functions """
        cluster = self.cluster

        # Uses 3 nodes just to make sure function mutations are correctly serialized
        cluster.populate(3).start()
        node1 = cluster.nodelist()[0]
        node2 = cluster.nodelist()[1]
        node3 = cluster.nodelist()[2]
        time.sleep(0.2)

        # The latter three sessions use a whitelist policy, and then don't wait for schema agreement
        # So we create `schema_wait_session` to use for schema agreement blocking, and DDL changes
        schema_wait_session = self.patient_cql_connection(node1)
        create_ks(schema_wait_session, 'ks', 1)
        schema_wait_session.cluster.control_connection.wait_for_schema_agreement()

        node1_session = self.patient_exclusive_cql_connection(node1, keyspace='ks')
        node2_session = self.patient_exclusive_cql_connection(node2, keyspace='ks')
        node3_session = self.patient_exclusive_cql_connection(node3, keyspace='ks')

        schema_wait_session.execute("""
            CREATE TABLE udf_kv (
                key    int primary key,
                value  double
            );
        """)
        schema_wait_session.cluster.control_connection.wait_for_schema_agreement()

        node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(1, 1))
        node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(2, 2))
        node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(3, 3))

        schema_wait_session.execute("""
            create or replace function x_sin ( input double ) called on null input
            returns double language java as 'if (input==null) return null;
            return Double.valueOf(Math.sin(input.doubleValue()));'
            """)
        schema_wait_session.execute("""
            create or replace function x_cos ( input double ) called on null input
            returns double language java as 'if (input==null) return null;
            return Double.valueOf(Math.cos(input.doubleValue()));'
            """)
        schema_wait_session.execute("""
            create or replace function x_tan ( input double ) called on null input
            returns double language java as 'if (input==null) return null;
            return Double.valueOf(Math.tan(input.doubleValue()));'
            """)

        schema_wait_session.cluster.control_connection.wait_for_schema_agreement()

        assert_one(node1_session,
                   "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 1,
                   [1, 1.0, 0.8414709848078965, 0.5403023058681398, 1.5574077246549023])

        assert_one(node2_session,
                   "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 2,
                   [2, 2.0, math.sin(2.0), math.cos(2.0), math.tan(2.0)])

        assert_one(node3_session,
                   "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 3,
                   [3, 3.0, math.sin(3.0), math.cos(3.0), math.tan(3.0)])

        session4 = self.patient_cql_connection(node1)

        # check that functions are correctly confined to namespaces
        assert_invalid(session4,
                       "SELECT key, value, sin(value), cos(value), tan(value) FROM ks.udf_kv where key = 4",
                       "Unknown function 'sin'")

        # try giving existing function bad input, should error
        assert_invalid(node1_session,
                       "SELECT key, value, x_sin(key), foo_cos(KEYy), foo_tan(key) FROM ks.udf_kv where key = 1",
                       "Type error: key cannot be passed as argument 0 of function ks.x_sin of type double")

        node2_session.execute("drop function x_sin")
        node3_session.execute("drop function x_cos")
        node1_session.execute("drop function x_tan")

        schema_wait_session.cluster.control_connection.wait_for_schema_agreement()

        assert_invalid(node1_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1")
        assert_invalid(node2_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1")
        assert_invalid(node3_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1")

        # try creating function returning the wrong type, should error
        assert_invalid(node1_session,
                       "CREATE FUNCTION bad_sin ( input double ) CALLED ON NULL INPUT RETURNS uuid LANGUAGE java AS 'return Math.sin(input);';",
                       "Type mismatch: cannot convert from double to UUID")
    def crc_check_chance_upgrade_test(self):
        """
        Tests behavior of compression property crc_check_chance after upgrade to 3.0,
        when it was promoted to a top-level property

        @jira_ticket CASSANDRA-9839
        """
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="github:apache/cassandra-2.2")
        cluster.populate(2).start()

        node1, node2 = cluster.nodelist()

        # Create table
        session = self.patient_cql_connection(node1)
        session.execute("CREATE KEYSPACE ks WITH replication = {'class':'SimpleStrategy', 'replication_factor':1}")
        session.execute("""CREATE TABLE ks.cf1 (id int primary key, val int) WITH compression = {
                          'sstable_compression': 'DeflateCompressor',
                          'chunk_length_kb': 256,
                          'crc_check_chance': 0.6 }
                        """)

        # Insert and query data
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (0, 0)")
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (1, 0)")
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (2, 0)")
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (3, 0)")
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=0", [0, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=1", [1, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=2", [2, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=3", [3, 0])
        session.shutdown()

        self.verify_old_crc_check_chance(node1)
        self.verify_old_crc_check_chance(node2)

        # upgrade node1 to 3.0
        self.upgrade_to_version("cassandra-3.0", node1)

        self.verify_new_crc_check_chance(node1)
        self.verify_old_crc_check_chance(node2)

        # Insert and query data
        session = self.patient_cql_connection(node1)
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (4, 0)")
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (5, 0)")
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (6, 0)")
        session.execute("INSERT INTO ks.cf1(id, val) VALUES (7, 0)")
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=0", [0, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=1", [1, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=2", [2, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=3", [3, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=4", [4, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=5", [5, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=6", [6, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=7", [7, 0])
        session.shutdown()

        # upgrade node2 to 3.0
        self.upgrade_to_version("cassandra-3.0", node2)

        self.verify_new_crc_check_chance(node1)
        self.verify_new_crc_check_chance(node2)

        # read data again
        session = self.patient_cql_connection(node1)
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=0", [0, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=1", [1, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=2", [2, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=3", [3, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=4", [4, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=5", [5, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=6", [6, 0])
        assert_one(session, "SELECT * FROM ks.cf1 WHERE id=7", [7, 0])
        session.shutdown()

        debug('Test completed successfully')
    def multiple_repair_test(self):
        """
        * Launch a three node cluster
        * Create a keyspace with RF 3 and a table
        * Insert 49 rows
        * Stop node3
        * Insert 50 more rows
        * Restart node3
        * Issue an incremental repair on node3
        * Stop node2
        * Insert a final50 rows
        * Restart node2
        * Issue an incremental repair on node2
        * Replace node3 with a new node
        * Verify data integrity
        # TODO: Several more verifications of data need to be interspersed throughout the test. The final assertion is insufficient.
        @jira_ticket CASSANDRA-10644
        """
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})

        debug("insert data")

        insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)
        node1.flush()

        debug("bringing down node 3")
        node3.flush()
        node3.stop(gently=False)

        debug("inserting additional data into node 1 and 2")
        insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)
        node1.flush()
        node2.flush()

        debug("restarting and repairing node 3")
        node3.start(wait_for_binary_proto=True)

        if cluster.version() >= "2.2":
            node3.repair()
        else:
            node3.nodetool("repair -par -inc")

        # wait stream handlers to be closed on windows
        # after session is finished (See CASSANDRA-10644)
        if is_win:
            time.sleep(2)

        debug("stopping node 2")
        node2.stop(gently=False)

        debug("inserting data in nodes 1 and 3")
        insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)
        node1.flush()
        node3.flush()

        debug("start and repair node 2")
        node2.start(wait_for_binary_proto=True)

        if cluster.version() >= "2.2":
            node2.repair()
        else:
            node2.nodetool("repair -par -inc")

        debug("replace node and check data integrity")
        node3.stop(gently=False)
        node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))
        cluster.add(node5, False)
        node5.start(replace_address='127.0.0.3', wait_other_notice=True)

        assert_one(session, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
 def read_and_validate_data(session):
     for i in range(NUM_KEYS):
         query = "SELECT * FROM standard1 WHERE KEY='{}'".format(i)
         assert_one(session, query, [str(i), 'col', str(i)])
         query = "SELECT * FROM counter1 WHERE KEY='{}'".format(i)
         assert_one(session, query, [str(i), 1])
    def sstableloader_with_failing_2i_test(self):
        """
        @jira_ticket CASSANDRA-10130

        Simulates an index building failure during SSTables load.
        The table data should be loaded and the index should be marked for rebuilding during the next node start.
        """
        def create_schema_with_2i(session):
            create_ks(session, 'k', 1)
            session.execute("CREATE TABLE k.t (p int, c int, v int, PRIMARY KEY(p, c))")
            session.execute("CREATE INDEX idx ON k.t(v)")

        cluster = self.cluster
        cluster.populate(1, install_byteman=True).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 1, 8)")

        # Stop node and copy SSTables
        node.nodetool('drain')
        node.stop()
        self.copy_sstables(cluster, node)

        # Wipe out data and restart
        cluster.clear()
        cluster.start()

        # Restore the schema
        session = self.patient_cql_connection(node)
        create_schema_with_2i(session)

        # The table should exist and be empty, and the index should be empty and marked as built
        assert_one(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx', None])
        assert_none(session, "SELECT * FROM k.t")
        assert_none(session, "SELECT * FROM k.t WHERE v = 8")

        # Add some additional data before loading the SSTable, to check that it will be still accessible
        session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 2, 8)")
        assert_one(session, "SELECT * FROM k.t", [0, 2, 8])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Load SSTables with a failure during index creation
        node.byteman_submit(['./byteman/index_build_failure.btm'])
        with self.assertRaises(Exception):
            self.load_sstables(cluster, node, 'k')

        # Check that the index isn't marked as built and the old SSTable data has been loaded but not indexed
        assert_none(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])

        # Restart the node to trigger index rebuild
        node.nodetool('drain')
        node.stop()
        cluster.start()
        session = self.patient_cql_connection(node)

        # Check that the index is marked as built and the index has been rebuilt
        assert_one(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx', None])
        assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
        assert_all(session, "SELECT * FROM k.t WHERE v = 8", [[0, 1, 8], [0, 2, 8]])
Пример #60
0
    def alter_rf_and_run_read_repair_test(self):
        """
        @jira_ticket CASSANDRA-10655
        @jira_ticket CASSANDRA-10657

        Test that querying only a subset of all the columns in a row doesn't confuse read-repair to avoid
        the problem described in CASSANDRA-10655.
        """

        session = self.patient_cql_connection(self.cluster.nodelist()[0])
        session.execute("""CREATE KEYSPACE alter_rf_test
                           WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};""")
        session.execute("CREATE TABLE alter_rf_test.t1 (k int PRIMARY KEY, a int, b int);")
        session.execute("INSERT INTO alter_rf_test.t1 (k, a, b) VALUES (1, 1, 1);")
        cl_one_stmt = SimpleStatement("SELECT * FROM alter_rf_test.t1 WHERE k=1",
                                      consistency_level=ConsistencyLevel.ONE)

        # identify the initial replica and trigger a flush to ensure reads come from sstables
        initial_replica, non_replicas = self.identify_initial_placement('alter_rf_test', 't1', 1)
        debug("At RF=1 replica for data is " + initial_replica.name)
        initial_replica.flush()

        # At RF=1, it shouldn't matter which node we query, as the actual data should always come from the
        # initial replica when reading at CL ONE
        for n in self.cluster.nodelist():
            debug("Checking " + n.name)
            session = self.patient_exclusive_cql_connection(n)
            assert_one(session, "SELECT * FROM alter_rf_test.t1 WHERE k=1", [1, 1, 1], cl=ConsistencyLevel.ONE)

        # Alter so RF=n but don't repair, then execute a query which selects only a subset of the columns. Run this at
        # CL ALL on one of the nodes which doesn't currently have the data, triggering a read repair.
        # The expectation will be that every replicas will have been repaired for that column (but we make no assumptions
        # on the other columns).
        debug("Changing RF from 1 to 3")
        session.execute("""ALTER KEYSPACE alter_rf_test
                           WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};""")
        cl_all_stmt = SimpleStatement("SELECT a FROM alter_rf_test.t1 WHERE k=1",
                                      consistency_level=ConsistencyLevel.ALL)
        debug("Executing SELECT on non-initial replica to trigger read repair " + non_replicas[0].name)
        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
        # result of the CL ALL query contains only the selected column
        assert_one(read_repair_session, "SELECT a FROM alter_rf_test.t1 WHERE k=1", [1], cl=ConsistencyLevel.ALL)

        # Check the results of the read repair by querying each replica again at CL ONE
        debug("Re-running SELECTs at CL ONE to verify read repair")
        for n in self.cluster.nodelist():
            debug("Checking " + n.name)
            session = self.patient_exclusive_cql_connection(n)
            res = rows_to_list(session.execute(cl_one_stmt))
            # Column a must be 1 everywhere, and column b must be either 1 or None everywhere
            self.assertIn(res[0][:2], [[1, 1], [1, None]])

        # Now query at ALL but selecting all columns
        query = "SELECT * FROM alter_rf_test.t1 WHERE k=1"
        debug("Executing SELECT on non-initial replica to trigger read repair " + non_replicas[0].name)
        read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
        assert_one(session, query, [1, 1, 1], cl=ConsistencyLevel.ALL)

        # Check all replica is fully up to date
        debug("Re-running SELECTs at CL ONE to verify read repair")
        for n in self.cluster.nodelist():
            debug("Checking " + n.name)
            session = self.patient_exclusive_cql_connection(n)
            assert_one(session, query, [1, 1, 1], cl=ConsistencyLevel.ONE)