def conditional_updates_on_static_columns_with_non_existing_values_test( self): session = self.prepare(3) table_name = "conditional_updates_on_static_columns_with_ne" session.execute(""" CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """.format(table_name)) self._validate_non_existing_or_null_values(table_name, session) assert_one( session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format( table_name), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name)) for operator in [">", "<", ">=", "<=", "="]: assert_one( session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format( table_name, operator), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 5".format(table_name))
def aggregate_udf_test(self): session = self.prepare() session.execute("create table nums (key int primary key, val int);") for x in range(1, 4): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, x)) session.execute( "create function plus(key int, val int) called on null input returns int language java as 'return Integer.valueOf(key.intValue() + val.intValue());'" ) session.execute( "create function stri(key int) called on null input returns text language java as 'return key.toString();'" ) session.execute( "create aggregate suma (int) sfunc plus stype int finalfunc stri initcond 10" ) assert_one(session, "select suma(val) from nums", ["16"]) session.execute( "create function test(a int, b double) called on null input returns int language javascript as 'a + b;'" ) session.execute("create aggregate aggy(double) sfunc test stype int") assert_invalid(session, "create aggregate aggtwo(int) sfunc aggy stype int") assert_invalid( session, "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo" )
def compaction_test(self): cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 3) session.execute("create table tab(key int PRIMARY KEY, val int);") node3.stop() for x in range(0, 100): session.execute("insert into tab(key,val) values(" + str(x) + ",0)") node1.flush() node3.start(wait_for_binary_proto=True) if cluster.version() >= "2.2": node3.repair() else: node3.nodetool("repair -par -inc") for x in range(0, 150): session.execute("insert into tab(key,val) values(" + str(x) + ",1)") node1.flush() node2.flush() node3.flush() node3.nodetool('compact') for x in range(0, 150): assert_one(session, "select val from tab where key =" + str(x), [1])
def index_test(self): """ Smoke test CQL statements related to indexes: - CREATE a table - CREATE an index on that table - INSERT 10 values into the table - SELECT from the table over the indexed value and assert the expected values come back - drop the index - assert SELECTing over the indexed value raises an InvalidRequest # TODO run SELECTs to make sure each statement works """ session = self.prepare() session.execute( "CREATE TABLE test3 (k int PRIMARY KEY, v1 int, v2 int)") table_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test3') session.execute("CREATE INDEX testidx ON test3 (v1)") self.assertIn('testidx', table_meta.indexes) for i in range(0, 10): session.execute( "INSERT INTO test3 (k, v1, v2) VALUES ({i}, {i}, {i})".format( i=i)) assert_one(session, "SELECT * FROM test3 WHERE v1 = 0", [0, 0, 0]) session.execute("DROP INDEX testidx") self.assertNotIn('testidx', table_meta.indexes)
def index_test(self): """ Smoke test CQL statements related to indexes: - CREATE a table - CREATE an index on that table - INSERT 10 values into the table - SELECT from the table over the indexed value and assert the expected values come back - drop the index - assert SELECTing over the indexed value raises an InvalidRequest # TODO run SELECTs to make sure each statement works """ session = self.prepare() session.execute("CREATE TABLE test3 (k int PRIMARY KEY, v1 int, v2 int)") table_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test3') session.execute("CREATE INDEX testidx ON test3 (v1)") self.assertIn('testidx', table_meta.indexes) for i in range(0, 10): session.execute("INSERT INTO test3 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i)) assert_one(session, "SELECT * FROM test3 WHERE v1 = 0", [0, 0, 0]) session.execute("DROP INDEX testidx") self.assertNotIn('testidx', table_meta.indexes)
def rename_test(self): session = self.prepare(start_rpc=True) node = self.cluster.nodelist()[0] host, port = node.network_interfaces['thrift'] client = get_thrift_client(host, port) client.transport.open() cfdef = CfDef() cfdef.keyspace = 'ks' cfdef.name = 'test' cfdef.column_type = 'Standard' cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)' cfdef.key_validation_class = 'UTF8Type' cfdef.default_validation_class = 'UTF8Type' client.set_keyspace('ks') client.system_add_column_family(cfdef) session.execute( "INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')" ) time.sleep(1) session.execute( "ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3" ) assert_one(session, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
def rename_test(self): """ Check that a thrift-created table can be renamed via CQL: - create a table via the thrift interface - INSERT a row via CQL - ALTER the name of the table via CQL - SELECT from the table and assert the values inserted are there """ session = self.prepare(start_rpc=True) node = self.cluster.nodelist()[0] host, port = node.network_interfaces['thrift'] client = get_thrift_client(host, port) client.transport.open() cfdef = CfDef() cfdef.keyspace = 'ks' cfdef.name = 'test' cfdef.column_type = 'Standard' cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)' cfdef.key_validation_class = 'UTF8Type' cfdef.default_validation_class = 'UTF8Type' client.set_keyspace('ks') client.system_add_column_family(cfdef) session.execute("INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')") session.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3") assert_one(session, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
def drop_column_and_restart_test(self): """ Simply insert data in a table, drop a column involved in the insert and restart the node afterwards. This ensures that the dropped_columns system table is properly flushed on the alter or the restart fails as in CASSANDRA-11050. @jira_ticket CASSANDRA-11050 """ session = self.prepare() session.execute("USE ks") session.execute("CREATE TABLE t (k int PRIMARY KEY, c1 int, c2 int)") session.execute("INSERT INTO t (k, c1, c2) VALUES (0, 0, 0)") session.execute("ALTER TABLE t DROP c2") assert_one(session, "SELECT * FROM t", [0, 0]) self.cluster.stop() self.cluster.start() session = self.patient_cql_connection(self.cluster.nodelist()[0]) session.execute("USE ks") assert_one(session, "SELECT * FROM t", [0, 0])
def test_read_old_sstables_after_upgrade(self): """ from 2.1 the location of sstables changed (CASSANDRA-5202), but existing sstables continue to be read from the old location. Verify that this works for index sstables as well as regular data column families (CASSANDRA-9116) """ cluster = self.cluster # Forcing cluster version on purpose cluster.set_install_dir(version="2.0.12") if "memtable_allocation_type" in cluster._config_options: cluster._config_options.__delitem__("memtable_allocation_type") cluster.populate(1).start() [node1] = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, "index_upgrade", 1) session.execute("CREATE TABLE index_upgrade.table1 (k int PRIMARY KEY, v int)") session.execute("CREATE INDEX ON index_upgrade.table1(v)") session.execute("INSERT INTO index_upgrade.table1 (k,v) VALUES (0,0)") query = "SELECT * FROM index_upgrade.table1 WHERE v=0" assert_one(session, query, [0, 0]) # Upgrade to the 2.1.x version node1.drain() node1.watch_log_for("DRAINED") node1.stop(wait_other_notice=False) debug("Upgrading to current version") self.set_node_to_current_version(node1) node1.start(wait_other_notice=True) [node1] = cluster.nodelist() session = self.patient_cql_connection(node1) debug(cluster.cassandra_version()) assert_one(session, query, [0, 0])
def stop_commit_failure_policy_test(self): """ Test the stop_commit commitlog failure policy """ self.prepare(configuration={'commit_failure_policy': 'stop_commit'}) self.session1.execute(""" INSERT INTO test (key, col1) VALUES (2, 2); """) self._provoke_commitlog_failure() failure = self.node1.grep_log( "Failed .+ commit log segments. Commit disk failure policy is stop_commit; terminating thread" ) debug(failure) self.assertTrue(failure, "Cannot find the commitlog failure message in logs") self.assertTrue(self.node1.is_running(), "Node1 should still be running") # Cannot write anymore after the failure debug( 'attempting to insert to node with failing commitlog; should fail') with self.assertRaises((OperationTimedOut, WriteTimeout)): self.session1.execute(""" INSERT INTO test (key, col1) VALUES (2, 2); """) # Should be able to read debug( 'attempting to read from node with failing commitlog; should succeed' ) assert_one(self.session1, "SELECT * FROM test where key=2;", [2, 2])
def validate_empty_column_name_test(self): cluster = self.cluster cluster.populate(1).start() node1 = cluster.nodelist()[0] session = self.patient_cql_connection(node1) self.create_ks(session, 'counter_tests', 1) session.execute(""" CREATE TABLE compact_counter_table ( pk int, ck text, value counter, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE """) assert_invalid( session, "UPDATE compact_counter_table SET value = value + 1 WHERE pk = 0 AND ck = ''" ) assert_invalid( session, "UPDATE compact_counter_table SET value = value - 1 WHERE pk = 0 AND ck = ''" ) session.execute( "UPDATE compact_counter_table SET value = value + 5 WHERE pk = 0 AND ck = 'ck'" ) session.execute( "UPDATE compact_counter_table SET value = value - 2 WHERE pk = 0 AND ck = 'ck'" ) assert_one(session, "SELECT pk, ck, value FROM compact_counter_table", [0, 'ck', 3])
def rename_test(self): session = self.prepare(start_rpc=True) node = self.cluster.nodelist()[0] host, port = node.network_interfaces['thrift'] client = get_thrift_client(host, port) client.transport.open() cfdef = CfDef() cfdef.keyspace = 'ks' cfdef.name = 'test' cfdef.column_type = 'Standard' cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)' cfdef.key_validation_class = 'UTF8Type' cfdef.default_validation_class = 'UTF8Type' client.set_keyspace('ks') client.system_add_column_family(cfdef) session.execute("INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')") time.sleep(1) session.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3") assert_one(session, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
def upgrade_with_unclustered_table(self, compact_storage=False): PARTITIONS = 5 session = self._setup_cluster() session.execute( 'CREATE TABLE t (k int PRIMARY KEY, v1 int, v2 int, v3 int, v4 int)' + (' WITH COMPACT STORAGE' if compact_storage else '')) for n in range(PARTITIONS): session.execute( "INSERT INTO t(k, v1, v2, v3, v4) VALUES ({}, {}, {}, {}, {})". format(n, n + 1, n + 2, n + 3, n + 4)) session = self._do_upgrade() for n in range(PARTITIONS): assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), [n, n + 1, n + 2, n + 3, n + 4]) self.cluster.compact() for n in range(PARTITIONS): assert_one(session, "SELECT * FROM t WHERE k = {}".format(n), [n, n + 1, n + 2, n + 3, n + 4])
def query_all_new_column_test(self): """ Test that a materialized view created with a 'SELECT *' works as expected when adding a new column @expected_result The new column is present in the view. """ session = self.prepare(user_table=True) self._insert_data(session) assert_one( session, "SELECT * FROM users_by_state WHERE state = 'TX' AND username = '******'", ['TX', 'user1', 1968, 'f', 'ch@ngem3a', None] ) session.execute("ALTER TABLE users ADD first_name varchar;") results = session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = '******'") self.assertEqual(len(results), 1) self.assertTrue(hasattr(results[0], 'first_name'), 'Column "first_name" not found') assert_one( session, "SELECT * FROM users_by_state WHERE state = 'TX' AND username = '******'", ['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None] )
def udf_with_udt_test(self): """ Test UDFs that operate on non-frozen UDTs. @jira_ticket CASSANDRA-7423 @since 3.6 """ session = self.prepare() session.execute("create type test (a text, b int);") session.execute("create function funk(udt test) called on null input returns int language java as 'return Integer.valueOf(udt.getInt(\"b\"));';") if LooseVersion(self.cluster.version()) >= LooseVersion('3.6'): frozen_vals = (False, True) else: frozen_vals = (True,) for frozen in frozen_vals: debug("Using {} UDTs".format("frozen" if frozen else "non-frozen")) table_name = "tab_frozen" if frozen else "tab" column_type = "frozen<test>" if frozen else "test" session.execute("create table {} (key int primary key, udt {});".format(table_name, column_type)) session.execute("insert into %s (key, udt) values (1, {a: 'un', b:1});" % (table_name,)) session.execute("insert into %s (key, udt) values (2, {a: 'deux', b:2});" % (table_name,)) session.execute("insert into %s (key, udt) values (3, {a: 'trois', b:3});" % (table_name,)) assert_one(session, "select sum(funk(udt)) from {}".format(table_name), [6]) assert_invalid(session, "drop type test;")
def rename_test(self): """ Check that a thrift-created table can be renamed via CQL: - create a table via the thrift interface - INSERT a row via CQL - ALTER the name of the table via CQL - SELECT from the table and assert the values inserted are there """ session = self.prepare(start_rpc=True) node = self.cluster.nodelist()[0] host, port = node.network_interfaces['thrift'] client = get_thrift_client(host, port) client.transport.open() cfdef = CfDef() cfdef.keyspace = 'ks' cfdef.name = 'test' cfdef.column_type = 'Standard' cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)' cfdef.key_validation_class = 'UTF8Type' cfdef.default_validation_class = 'UTF8Type' client.set_keyspace('ks') client.system_add_column_family(cfdef) session.execute( "INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')" ) session.execute( "ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3" ) assert_one(session, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
def query_new_column_test(self): """ Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column @expected_result The new column is not present in the view. """ session = self.prepare(user_table=True) session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT username FROM users " "WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)")) self._insert_data(session) assert_one( session, "SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = '******'", ['TX', 'user1'] ) session.execute("ALTER TABLE users ADD first_name varchar;") results = session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = '******'") self.assertEqual(len(results), 1) self.assertFalse(hasattr(results[0], 'first_name'), 'Column "first_name" found in view') assert_one( session, "SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = '******'", ['TX', 'user1'] )
def compaction_test(self): cluster = self.cluster cluster.populate(3).start() [node1,node2,node3] = cluster.nodelist() cursor = self.patient_cql_connection(node1) self.create_ks(cursor, 'ks', 3) cursor.execute("create table tab(key int PRIMARY KEY, val int);") node3.stop() for x in range(0, 100): cursor.execute("insert into tab(key,val) values(" + str(x) + ",0)") node1.flush() node3.start() if cluster.version() >= "3.0": node3.repair() else: node3.nodetool("repair -par -inc") for x in range(0, 150): cursor.execute("insert into tab(key,val) values(" + str(x) + ",1)") node1.flush() node2.flush() node3.flush() node3.nodetool('compact') for x in range(0, 150): assert_one(cursor, "select val from tab where key =" + str(x), [1])
def stop_commit_failure_policy_test(self): """ Test the stop_commit commitlog failure policy """ self.prepare(configuration={ 'commit_failure_policy': 'stop_commit' }) self.session1.execute(""" INSERT INTO test (key, col1) VALUES (2, 2); """) self._provoke_commitlog_failure() failure = self.node1.grep_log("Failed .+ commit log segments. Commit disk failure policy is stop_commit; terminating thread") debug(failure) self.assertTrue(failure, "Cannot find the commitlog failure message in logs") self.assertTrue(self.node1.is_running(), "Node1 should still be running") # Cannot write anymore after the failure with self.assertRaises((OperationTimedOut, WriteTimeout)): self.session1.execute(""" INSERT INTO test (key, col1) VALUES (2, 2); """) # Should be able to read assert_one( self.session1, "SELECT * FROM test where key=2;", [2, 2] )
def test_read_old_sstables_after_upgrade(self): """ from 2.1 the location of sstables changed (CASSANDRA-5202), but existing sstables continue to be read from the old location. Verify that this works for index sstables as well as regular data column families (CASSANDRA-9116) """ cluster = self.cluster # Forcing cluster version on purpose cluster.set_install_dir(version="2.0.12") if "memtable_allocation_type" in cluster._config_options: cluster._config_options.__delitem__("memtable_allocation_type") cluster.populate(1).start() [node1] = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'index_upgrade', 1) session.execute("CREATE TABLE index_upgrade.table1 (k int PRIMARY KEY, v int)") session.execute("CREATE INDEX ON index_upgrade.table1(v)") session.execute("INSERT INTO index_upgrade.table1 (k,v) VALUES (0,0)") query = "SELECT * FROM index_upgrade.table1 WHERE v=0" assert_one(session, query, [0, 0]) # Upgrade to the 2.1.x version node1.drain() node1.watch_log_for("DRAINED") node1.stop(wait_other_notice=False) debug("Upgrading to current version") self.set_node_to_current_version(node1) node1.start(wait_other_notice=True) [node1] = cluster.nodelist() session = self.patient_cql_connection(node1) debug(cluster.cassandra_version()) assert_one(session, query, [0, 0])
def multiple_repair_test(self): cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 3) self.create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'}) debug("insert data") insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL) node1.flush() debug("bringing down node 3") node3.flush() node3.stop(gently=False) debug("inserting additional data into node 1 and 2") insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO) node1.flush() node2.flush() debug("restarting and repairing node 3") node3.start(wait_for_binary_proto=True) if cluster.version() >= "2.2": node3.repair() else: node3.nodetool("repair -par -inc") # wait stream handlers to be closed on windows # after session is finished (See CASSANDRA-10644) if is_win: time.sleep(2) debug("stopping node 2") node2.stop(gently=False) debug("inserting data in nodes 1 and 3") insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO) node1.flush() node3.flush() debug("start and repair node 2") node2.start(wait_for_binary_proto=True) if cluster.version() >= "2.2": node2.repair() else: node2.nodetool("repair -par -inc") debug("replace node and check data integrity") node3.stop(gently=False) node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042)) cluster.add(node5, False) node5.start(replace_address='127.0.0.3', wait_other_notice=True) assert_one(session, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
def assertions_test(self): # assert_exception_test mock_session = Mock( ** {'execute.side_effect': AlreadyExists("Dummy exception message.")}) assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists) # assert_unavailable_test mock_session = Mock(**{ 'execute.side_effect': Unavailable("Dummy Unavailabile message.") }) assert_unavailable(mock_session.execute) # assert_invalid_test mock_session = Mock(**{ 'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.") }) assert_invalid(mock_session, "DUMMY QUERY") # assert_unauthorized_test mock_session = Mock(**{ 'execute.side_effect': Unauthorized("Dummy Unauthorized message.") }) assert_unauthorized(mock_session, "DUMMY QUERY", None) # assert_one_test mock_session = Mock() mock_session.execute = Mock(return_value=[[1, 1]]) assert_one(mock_session, "SELECT * FROM test", [1, 1]) # assert_none_test mock_session = Mock() mock_session.execute = Mock(return_value=[]) assert_none(mock_session, "SELECT * FROM test") # assert_all_test mock_session = Mock() mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)]) assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True) # assert_almost_equal_test assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0) # assert_row_count_test mock_session = Mock() mock_session.execute = Mock(return_value=[[1]]) assert_row_count(mock_session, 'test', 1) # assert_length_equal_test check = [1, 2, 3, 4] assert_length_equal(check, 4)
def multiple_repair_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor = self.patient_cql_connection(node1) self.create_ks(cursor, 'ks', 3) self.create_cf(cursor, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'}) debug("insert data") for x in range(1, 50): insert_c1c2(cursor, x, ConsistencyLevel.ALL) node1.flush() debug("bringing down node 3") node3.flush() node3.stop(gently=False) debug("inserting additional data into node 1 and 2") for y in range(50, 100): insert_c1c2(cursor, y, ConsistencyLevel.TWO) node1.flush() node2.flush() debug("restarting and repairing node 3") node3.start() if cluster.version() >= "3.0": node3.repair() else: node3.nodetool("repair -par -inc") debug("stopping node 2") node2.stop(gently=False) debug("inserting data in nodes 1 and 3") for z in range(100, 150): insert_c1c2(cursor, z, ConsistencyLevel.TWO) node1.flush() node3.flush() debug("start and repair node 2") node2.start() if cluster.version() >= "3.0": node2.repair() else: node2.nodetool("repair -par -inc") debug("replace node and check data integrity") node3.stop(gently=False) node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5',9042)) cluster.add(node5, False) node5.start(replace_address = '127.0.0.3', wait_other_notice=True) assert_one(cursor, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
def interrupt_build_process_test(self): """Test that an interupted MV build process is resumed as it should""" session = self.prepare(options={'hinted_handoff_enabled': False}) node1, node2, node3 = self.cluster.nodelist() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") debug("Inserting initial data") for i in xrange(10000): session.execute( "INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i) ) debug("Create a MV") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) debug("Stop the cluster. Interrupt the MV build process.") self.cluster.stop() debug("Restart the cluster") self.cluster.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node1) session.execute("USE ks") debug("MV shouldn't be built yet.") assert_none(session, "SELECT * FROM t_by_v WHERE v=10000;") debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.") start = time.time() while True: try: result = session.execute("SELECT count(*) FROM t_by_v;") self.assertNotEqual(result[0].count, 10000) except AssertionError: debug("MV build process is finished") break elapsed = (time.time() - start) / 60 if elapsed > 2: break time.sleep(5) debug("Verify all data") result = session.execute("SELECT count(*) FROM t_by_v;") self.assertEqual(result[0].count, 10000) for i in xrange(10000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0], cl=ConsistencyLevel.ALL )
def simple_repair_test(self): """ Test that a materialized view are consistent after a simple repair. """ session = self.prepare(rf=3, options={'hinted_handoff_enabled': False}) node1, node2, node3 = self.cluster.nodelist() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) session.cluster.control_connection.wait_for_schema_agreement() debug('Shutdown node2') node2.stop(wait_other_notice=True) for i in xrange(1000): session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i)) self._replay_batchlogs() debug('Verify the data in the MV with CL=ONE') for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0] ) debug('Verify the data in the MV with CL=ALL. All should be unavailable.') for i in xrange(1000): statement = SimpleStatement( "SELECT * FROM t_by_v WHERE v = {}".format(i), consistency_level=ConsistencyLevel.ALL ) assert_unavailable( session.execute, statement ) debug('Start node2, and repair') node2.start(wait_other_notice=True, wait_for_binary_proto=True) node1.repair() debug('Verify the data in the MV with CL=ONE. All should be available now.') for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0], cl=ConsistencyLevel.ONE )
def assert_bootstrap_state(tester, node, expected_bootstrap_state): """ Assert that a node is on a given bootstrap state @param tester The dtest.Tester object to fetch the exclusive connection to the node @param node The node to check bootstrap state @param expected_bootstrap_state Bootstrap state to expect Examples: assert_bootstrap_state(self, node3, 'COMPLETED') """ session = tester.patient_exclusive_cql_connection(node) assert_one(session, "SELECT bootstrapped FROM system.local WHERE key='local'", [expected_bootstrap_state])
def default_aggregate_test(self): session = self.prepare() session.execute("create table nums (key int primary key, val double);") for x in range(1, 10): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x))) assert_one(session, "SELECT min(key) FROM nums", [1]) assert_one(session, "SELECT max(val) FROM nums", [9.0]) assert_one(session, "SELECT sum(key) FROM nums", [45]) assert_one(session, "SELECT avg(val) FROM nums", [5.0]) assert_one(session, "SELECT count(*) FROM nums", [9])
def simultaneous_bootstrap_test(self): """ Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere. Start a one node cluster and run a stress write workload. Start up a second node, and wait for the first node to detect it has joined the cluster. While the second node is bootstrapping, start a third node. This should fail. @jira_ticket CASSANDRA-7069 @jira_ticket CASSANDRA-9484 """ bootstrap_error = ( "Other bootstrapping/leaving/moving nodes detected," " cannot bootstrap while cassandra.consistent.rangemovement is true" ) self.ignore_log_patterns.append(bootstrap_error) cluster = self.cluster cluster.populate(1) cluster.start(wait_for_binary_proto=True) node1, = cluster.nodelist() node1.stress([ 'write', 'n=500K', '-schema', 'replication(factor=1)', '-rate', 'threads=10' ]) node2 = new_node(cluster) node2.start(wait_other_notice=True) node3 = new_node(cluster, remote_debug_port='2003') process = node3.start(wait_other_notice=False) stdout, stderr = process.communicate() self.assertIn(bootstrap_error, stderr, msg=stderr) time.sleep(.5) self.assertFalse(node3.is_running(), msg="Two nodes bootstrapped simultaneously") node2.watch_log_for("Starting listening for CQL clients") session = self.patient_exclusive_cql_connection(node2) # Repeat the select count(*) query, to help catch # bugs like 9484, where count(*) fails at higher # data loads. for _ in xrange(5): assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
def multi_table_batch_for_10554_test(self): """ Test a batch on 2 tables having different columns, restarting the node afterwards, to reproduce CASSANDRA-10554 """ session = self.prepare() # prepare() adds users and clicks but clicks is a counter table, so adding a random other table for this test. session.execute(""" CREATE TABLE dogs ( dogid int PRIMARY KEY, dogname text, ); """) session.execute(""" BEGIN BATCH INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') INSERT INTO dogs (dogid, dogname) VALUES (0, 'Pluto') APPLY BATCH """) assert_one(session, "SELECT * FROM users", [0, 'Jack', 'Sparrow']) assert_one(session, "SELECT * FROM dogs", [0, 'Pluto']) # Flush and restart the node as it's how 10554 reproduces node1 = self.cluster.nodelist()[0] node1.flush() node1.stop() node1.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node1, keyspace='ks') assert_one(session, "SELECT * FROM users", [0, 'Jack', 'Sparrow']) assert_one(session, "SELECT * FROM dogs", [0, 'Pluto'])
def statements_test(self): """ Smoke test SELECT and UPDATE statements: - create a table - insert 20 rows into the table - run SELECT COUNT queries and assert they return the correct values - bare and with IN and equality conditions - run SELECT * queries with = conditions - run UPDATE queries - SELECT * and assert the UPDATEd values are there - DELETE with a = condition - SELECT the deleted values and make sure nothing is returned # TODO run SELECTs to make sure each statement works """ session = self.prepare() session.execute( "CREATE TABLE test7 (kind text, time int, v1 int, v2 int, PRIMARY KEY(kind, time) )" ) for i in range(0, 10): session.execute( "INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev1', {i}, {i}, {i})" .format(i=i)) session.execute( "INSERT INTO test7 (kind, time, v1, v2) VALUES ('ev2', {i}, {i}, {i})" .format(i=i)) assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [10]) assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2')", [20]) assert_one( session, "SELECT COUNT(*) FROM test7 WHERE kind IN ('ev1', 'ev2') AND time=0", [2]) assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, i, i] for i in range(0, 10)]) assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev2'", [['ev2', i, i, i] for i in range(0, 10)]) for i in range(0, 10): session.execute( "UPDATE test7 SET v1 = 0, v2 = 0 where kind = 'ev1' AND time={i}" .format(i=i)) assert_all(session, "SELECT * FROM test7 WHERE kind = 'ev1'", [['ev1', i, 0, 0] for i in range(0, 10)]) session.execute("DELETE FROM test7 WHERE kind = 'ev1'") assert_none(session, "SELECT * FROM test7 WHERE kind = 'ev1'") assert_one(session, "SELECT COUNT(*) FROM test7 WHERE kind = 'ev1'", [0])
def drop_column_queries_test(self): session = self.prepare() session.execute("USE ks") session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)") session.execute("CREATE INDEX ON cf(c2)") # insert some data. session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)") session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)") session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)") # drop and readd c1. session.execute("ALTER TABLE cf DROP c1") session.execute("ALTER TABLE cf ADD c1 int") # add another row. session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)") # test that old (pre-drop) c1 values aren't returned and new ones are. assert_all(session, "SELECT c1 FROM cf", [[None], [None], [None], [4]], ignore_order=True) assert_all(session, "SELECT * FROM cf", [[0, None, 2], [1, None, 3], [2, None, 4], [3, 4, 5]], ignore_order=True) assert_one(session, "SELECT c1 FROM cf WHERE key = 0", [None]) assert_one(session, "SELECT c1 FROM cf WHERE key = 3", [4]) assert_one(session, "SELECT * FROM cf WHERE c2 = 2", [0, None, 2]) assert_one(session, "SELECT * FROM cf WHERE c2 = 5", [3, 4, 5])
def conditional_updates_on_static_columns_with_null_values_batch_test(self): session = self.prepare(3) table_name = "lwt_on_static_columns_with_null_batch" session.execute(""" CREATE TABLE {table_name} (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """.format(table_name=table_name)) for i in range(1, 7): session.execute("INSERT INTO {table_name} (a, b) VALUES ({i}, {i})".format(table_name=table_name, i=i)) self._validate_non_existing_or_null_values_batch(table_name, session) for operator in [">", "<", ">=", "<=", "="]: assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (3, 3, 40, 'a') UPDATE {table_name} SET s = 30 WHERE a = 3 IF s {operator} 5; APPLY BATCH""".format(table_name=table_name, operator=operator), [False]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 3".format(table_name=table_name), [3, 3, None, None]) assert_one(session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (6, 6, 70, 'a') UPDATE {table_name} SET s = 60 WHERE a = 6 IF s IN (1,2,3) APPLY BATCH""".format(table_name=table_name), [False]) assert_one(session, "SELECT * FROM {table_name} WHERE a = 6".format(table_name=table_name), [6, 6, None, None])
def compaction_strategy_switching_test(self): """Ensure that switching strategies does not result in problems. Insert data, switch strategies, then check against data loss. """ strategies = [ 'LeveledCompactionStrategy', 'SizeTieredCompactionStrategy', 'DateTieredCompactionStrategy' ] if self.strategy in strategies: strategies.remove(self.strategy) cluster = self.cluster cluster.populate(1).start(wait_for_binary_proto=True) [node1] = cluster.nodelist() for strat in strategies: session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute( "create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds = 0 and compaction= {'class':'" + self.strategy + "'};") for x in range(0, 100): session.execute('insert into ks.cf (key, val) values (' + str(x) + ',1)') node1.flush() for x in range(0, 10): session.execute('delete from cf where key = ' + str(x)) session.execute( "alter table ks.cf with compaction = {'class':'" + strat + "'};") for x in range(11, 100): assert_one(session, "select * from ks.cf where key =" + str(x), [x, 1]) for x in range(0, 10): assert_none(session, 'select * from cf where key = ' + str(x)) node1.flush() cluster.clear() time.sleep(5) cluster.start(wait_for_binary_proto=True)
def ignore_failure_policy_test(self): """ Test the ignore commitlog failure policy """ self.prepare(configuration={ 'commit_failure_policy': 'ignore' }) self._provoke_commitlog_failure() failure = self.node1.grep_log("ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments") self.assertTrue(failure, "Cannot find the commitlog failure message in logs") self.assertTrue(self.node1.is_running(), "Node1 should still be running") # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM, # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit) query = "INSERT INTO test (key, col1) VALUES (2, 2);" if is_win(): # We expect this to succeed self.session1.execute(query) self.assertFalse(self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored.") self.assertTrue(self.node1.is_running(), "Node1 should still be running after an ignore error on CL") else: with self.assertRaises((OperationTimedOut, WriteTimeout)): self.session1.execute(query) # Should not exist assert_none(self.session1, "SELECT * FROM test where key=2;") # bring back the node commitlogs self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC) self.session1.execute(""" INSERT INTO test (key, col1) VALUES (3, 3); """) assert_one( self.session1, "SELECT * FROM test where key=3;", [3, 3] ) time.sleep(2) assert_one( self.session1, "SELECT * FROM test where key=2;", [2, 2] )
def compaction_test(self): """ Test we can major compact after an incremental repair * Launch a three node cluster * Create a keyspace with RF 3 and a table * Stop node3 * Insert 100 rows * Restart node3 * Issue an incremental repair * Insert 50 more rows * Perform a major compaction on node3 * Verify all data is present # TODO: I have no idea what this is testing. The assertions do not verify anything meaningful. # TODO: Fix all the string formatting """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 3) session.execute("create table tab(key int PRIMARY KEY, val int);") node3.stop() for x in range(0, 100): session.execute("insert into tab(key,val) values(" + str(x) + ",0)") node1.flush() node3.start(wait_for_binary_proto=True) if cluster.version() >= "2.2": node3.repair() else: node3.nodetool("repair -par -inc") for x in range(0, 150): session.execute("insert into tab(key,val) values(" + str(x) + ",1)") cluster.flush() node3.nodetool('compact') for x in range(0, 150): assert_one(session, "select val from tab where key =" + str(x), [1])
def upgrade_with_range_and_collection_tombstones_test(self): """ Check sstable including collection tombstone (inserted through adding a collection) can be read after upgrade. @jira_ticket CASSANDRA-10743 """ session = self._setup_cluster() session.execute('CREATE TABLE t (k text, t int, c list<int>, PRIMARY KEY (k, t))') session.execute("INSERT INTO t(k, t, c) VALUES ('some_key', 0, %s)" % str([i for i in range(10000)])) session = self._do_upgrade() self.cluster.compact() assert_one(session, "SELECT k FROM t", ['some_key'])
def ignore_failure_policy_test(self): """ Test the ignore commitlog failure policy """ self.prepare(configuration={'commit_failure_policy': 'ignore'}) self._provoke_commitlog_failure() failure = self.node1.grep_log( "ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments") self.assertTrue(failure, "Cannot find the commitlog failure message in logs") self.assertTrue(self.node1.is_running(), "Node1 should still be running") # on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM, # and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files # as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that # we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit) query = "INSERT INTO test (key, col1) VALUES (2, 2);" if is_win(): # We expect this to succeed self.session1.execute(query) self.assertFalse( self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored.") self.assertTrue( self.node1.is_running(), "Node1 should still be running after an ignore error on CL") else: with self.assertRaises((OperationTimedOut, WriteTimeout)): self.session1.execute(query) # Should not exist assert_none(self.session1, "SELECT * FROM test where key=2;") # bring back the node commitlogs self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC) self.session1.execute(""" INSERT INTO test (key, col1) VALUES (3, 3); """) assert_one(self.session1, "SELECT * FROM test where key=3;", [3, 3]) time.sleep(2) assert_one(self.session1, "SELECT * FROM test where key=2;", [2, 2])
def _provoke_commitlog_failure(self): """ Provoke the commitlog failure """ # Test things are ok at this point self.session1.execute(""" INSERT INTO test (key, col1) VALUES (1, 1); """) assert_one( self.session1, "SELECT * FROM test where key=1;", [1, 1] ) self._change_commitlog_perms(0) with open(os.devnull, 'w') as devnull: self.node1.stress(['write', 'n=1M', '-col', 'size=FIXED(1000)', '-rate', 'threads=25'], stdout=devnull, stderr=subprocess.STDOUT)
def simultaneous_bootstrap_test(self): """ Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere. Start a one node cluster and run a stress write workload. Start up a second node, and wait for the first node to detect it has joined the cluster. While the second node is bootstrapping, start a third node. This should fail. @jira_ticket CASSANDRA-7069 @jira_ticket CASSANDRA-9484 """ bootstrap_error = ("Other bootstrapping/leaving/moving nodes detected," " cannot bootstrap while cassandra.consistent.rangemovement is true") self.ignore_log_patterns.append(bootstrap_error) cluster = self.cluster cluster.populate(1) cluster.start(wait_for_binary_proto=True) node1, = cluster.nodelist() node1.stress(['write', 'n=500K', '-schema', 'replication(factor=1)', '-rate', 'threads=10']) node2 = new_node(cluster) node2.start(wait_other_notice=True) node3 = new_node(cluster, remote_debug_port='2003') process = node3.start(wait_other_notice=False) stdout, stderr = process.communicate() self.assertIn(bootstrap_error, stderr, msg=stderr) time.sleep(.5) self.assertFalse(node3.is_running(), msg="Two nodes bootstrapped simultaneously") node2.watch_log_for("Starting listening for CQL clients") session = self.patient_exclusive_cql_connection(node2) # Repeat the select count(*) query, to help catch # bugs like 9484, where count(*) fails at higher # data loads. for _ in xrange(5): assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
def conditional_updates_on_static_columns_with_non_existing_values_test(self): session = self.prepare(3) table_name = "conditional_updates_on_static_columns_with_ne" session.execute(""" CREATE TABLE {} (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """.format(table_name)) self._validate_non_existing_or_null_values(table_name, session) assert_one(session, "UPDATE {} SET s = 30 WHERE a = 3 IF s IN (10,20,30)".format(table_name), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 3".format(table_name)) for operator in [">", "<", ">=", "<=", "="]: assert_one(session, "UPDATE {} SET s = 50 WHERE a = 5 IF s {} 3".format(table_name, operator), [False]) assert_none(session, "SELECT * FROM {} WHERE a = 5".format(table_name))
def _provoke_commitlog_failure(self): """ Provoke the commitlog failure """ debug('Provoking commitlog failure') # Test things are ok at this point self.session1.execute(""" INSERT INTO test (key, col1) VALUES (1, 1); """) assert_one(self.session1, "SELECT * FROM test where key=1;", [1, 1]) self._change_commitlog_perms(0) # Use stress_process to skip internal error handling in ccm. Grep node logs for specific errors in test method. self.node1.stress_process([ 'write', 'n=1M', 'no-warmup', '-col', 'size=FIXED(1000)', '-rate', 'threads=25' ]).communicate()
def udf_with_udt_test(self): session = self.prepare() session.execute("create type test (a text, b int);") assert_invalid(session, "create table tab (key int primary key, udt test);") session.execute("create table tab (key int primary key, udt frozen<test>);") session.execute("insert into tab (key, udt) values (1, {a: 'un', b:1});") session.execute("insert into tab (key, udt) values (2, {a: 'deux', b:2});") session.execute("insert into tab (key, udt) values (3, {a: 'trois', b:3});") session.execute("create function funk(udt test) called on null input returns int language java as 'return Integer.valueOf(udt.getInt(\"b\"));';") assert_one(session, "select sum(funk(udt)) from tab", [6]) assert_invalid(session, "drop type test;")
def aggregate_udf_test(self): session = self.prepare() session.execute("create table nums (key int primary key, val int);") for x in range(1, 4): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, x)) session.execute("create function plus(key int, val int) called on null input returns int language java as 'return Integer.valueOf(key.intValue() + val.intValue());'") session.execute("create function stri(key int) called on null input returns text language java as 'return key.toString();'") session.execute("create aggregate suma (int) sfunc plus stype int finalfunc stri initcond 10") assert_one(session, "select suma(val) from nums", ["16"]) session.execute("create function test(a int, b double) called on null input returns int language javascript as 'a + b;'") session.execute("create aggregate aggy(double) sfunc test stype int") assert_invalid(session, "create aggregate aggtwo(int) sfunc aggy stype int") assert_invalid(session, "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo")
def populate_mv_after_insert_test(self): """Test that a view is OK when created with existing data""" session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") for i in xrange(1000): session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i)) session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL " "AND id IS NOT NULL PRIMARY KEY (v, id)")) debug("wait that all batchlogs are replayed") self._replay_batchlogs() for i in xrange(1000): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
def _provoke_commitlog_failure(self): """ Provoke the commitlog failure """ # Test things are ok at this point self.session1.execute(""" INSERT INTO test (key, col1) VALUES (1, 1); """) assert_one(self.session1, "SELECT * FROM test where key=1;", [1, 1]) self._change_commitlog_perms(0) with open(os.devnull, 'w') as devnull: self.node1.stress([ 'write', 'n=1M', '-col', 'size=FIXED(1000)', '-rate', 'threads=25' ], stdout=devnull, stderr=subprocess.STDOUT)
def upgrade_with_range_and_collection_tombstones_test(self): """ Check sstable including collection tombstone (inserted through adding a collection) can be read after upgrade. @jira_ticket CASSANDRA-10743 """ session = self._setup_cluster() session.execute( 'CREATE TABLE t (k text, t int, c list<int>, PRIMARY KEY (k, t))') session.execute("INSERT INTO t(k, t, c) VALUES ('some_key', 0, %s)" % str([i for i in range(10000)])) session = self._do_upgrade() self.cluster.compact() assert_one(session, "SELECT k FROM t", ['some_key'])
def _provoke_commitlog_failure(self): """ Provoke the commitlog failure """ debug('Provoking commitlog failure') # Test things are ok at this point self.session1.execute(""" INSERT INTO test (key, col1) VALUES (1, 1); """) assert_one( self.session1, "SELECT * FROM test where key=1;", [1, 1] ) self._change_commitlog_perms(0) # Use stress_process to skip internal error handling in ccm. Grep node logs for specific errors in test method. self.node1.stress_process(['write', 'n=1M', 'no-warmup', '-col', 'size=FIXED(1000)', '-rate', 'threads=25']).communicate()
def cql3_insert_thrift_test(self): """ Check that we can insert from thrift into a CQL3 table: - CREATE a table via CQL - insert values via thrift - SELECT the inserted values and assert they are there as expected @jira_ticket CASSANDRA-4377 """ session = self.prepare(start_rpc=True) session.execute(""" CREATE TABLE test ( k int, c int, v int, PRIMARY KEY (k, c) ) """) node = self.cluster.nodelist()[0] host, port = node.network_interfaces['thrift'] client = get_thrift_client(host, port) client.transport.open() client.set_keyspace('ks') key = struct.pack('>i', 2) column_name_component = struct.pack('>i', 4) # component length + component + EOC + component length + component + EOC column_name = '\x00\x04' + column_name_component + '\x00' + '\x00\x01' + 'v' + '\x00' value = struct.pack('>i', 8) client.batch_mutate( { key: { 'test': [ Mutation( ColumnOrSuperColumn(column=Column( name=column_name, value=value, timestamp=100))) ] } }, ThriftConsistencyLevel.ONE) assert_one(session, "SELECT * FROM test", [2, 4, 8])
def upgrade_with_wide_partition(self, query_modifier=""): ROWS = 100 session = self._setup_cluster() session.execute( 'CREATE TABLE t (k int, t int, v1 int, v2 blob, v3 set<int>, PRIMARY KEY (k, t))' ) # the blob is only here to make the row bigger internally so it sometimes span multiple index blocks bigish_blob = "0x" for i in range(1000): bigish_blob = bigish_blob + "0000" for r in range(ROWS): session.execute( "INSERT INTO t(k, t, v1, v2, v3) VALUES ({}, {}, {}, {}, {{{}, {}}})" .format(0, r, r, bigish_blob, r * 2, r * 3)) self.cluster.flush() # delete every other row for r in range(0, ROWS, 2): session.execute("DELETE FROM t WHERE k=0 AND t={}".format(r)) # delete the set from every other remaining row for r in range(1, ROWS, 4): session.execute( "UPDATE t SET v3={{}} WHERE k=0 AND t={}".format(r)) session = self._do_upgrade() for r in range(0, ROWS): query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format( r, query_modifier) if (r - 1) % 4 == 0: assert_one(session, query, [r, r, None]) elif (r + 1) % 2 == 0: assert_one(session, query, [r, r, set([r * 2, r * 3])]) else: assert_none(session, query) self.cluster.compact() for r in range(ROWS): query = "SELECT t, v1, v3 FROM t WHERE k = 0 AND t={}{}".format( r, query_modifier) if (r - 1) % 4 == 0: assert_one(session, query, [r, r, None]) elif (r + 1) % 2 == 0: assert_one(session, query, [r, r, set([r * 2, r * 3])]) else: assert_none(session, query)
def update_and_drop_column_test(self): """ Checks that dropped columns are properly handled in legacy sstables @jira_ticket CASSANDRA-11018 """ cursor = self._setup_cluster() cursor.execute('CREATE TABLE t (k text PRIMARY KEY, a int, b int)') cursor.execute("INSERT INTO t(k, a, b) VALUES ('some_key', 0, 0)") cursor = self._do_upgrade() cursor.execute("ALTER TABLE t DROP b") self.cluster.compact() assert_one(cursor, "SELECT * FROM t", ['some_key', 0])
def secondary_index_test(self): """Test that we can use secondary indexes with a materialized view""" session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) session.execute("CREATE INDEX ON t_by_v (v2)") for i in xrange(1000): session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i)) for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {} AND v2 = 'a'".format(i), [i, i, 'a', 3.0] )
def udf_scripting_test(self): session = self.prepare() session.execute("create table nums (key int primary key, val double);") for x in range(1, 4): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x))) session.execute( "CREATE FUNCTION x_sin(val double) called on null input returns double language javascript as 'Math.sin(val)'" ) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 1, [1, 1.0, math.sin(1.0)]) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 2, [2, 2.0, math.sin(2.0)]) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 3, [3, 3.0, math.sin(3.0)]) session.execute( "create function y_sin(val double) called on null input returns double language javascript as 'Math.sin(val).toString()'" ) assert_invalid(session, "select y_sin(val) from nums where key = 1", expected=FunctionFailure) assert_invalid( session, "create function compilefail(key int) called on null input returns double language javascript as 'foo bar';" ) session.execute( "create function plustwo(key int) called on null input returns double language javascript as 'key+2'" ) assert_one(session, "select plustwo(key) from nums where key = 3", [5])
def conditional_updates_on_static_columns_with_null_values_batch_test( self): session = self.prepare(3) table_name = "lwt_on_static_columns_with_null_batch" session.execute(""" CREATE TABLE {table_name} (a int, b int, s int static, d text, PRIMARY KEY (a, b)) """.format(table_name=table_name)) for i in range(1, 7): session.execute( "INSERT INTO {table_name} (a, b) VALUES ({i}, {i})".format( table_name=table_name, i=i)) self._validate_non_existing_or_null_values_batch(table_name, session) for operator in [">", "<", ">=", "<=", "="]: assert_one( session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (3, 3, 40, 'a') UPDATE {table_name} SET s = 30 WHERE a = 3 IF s {operator} 5; APPLY BATCH""".format(table_name=table_name, operator=operator), [False]) assert_one( session, "SELECT * FROM {table_name} WHERE a = 3".format( table_name=table_name), [3, 3, None, None]) assert_one( session, """ BEGIN BATCH INSERT INTO {table_name} (a, b, s, d) values (6, 6, 70, 'a') UPDATE {table_name} SET s = 60 WHERE a = 6 IF s IN (1,2,3) APPLY BATCH""".format(table_name=table_name), [False]) assert_one( session, "SELECT * FROM {table_name} WHERE a = 6".format( table_name=table_name), [6, 6, None, None])