def test_aggregate_udf(self): session = self.prepare() session.execute("create table nums (key int primary key, val int);") for x in range(1, 4): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, x)) session.execute( "create function plus(key int, val int) called on null input returns int language java as 'return Integer.valueOf(key.intValue() + val.intValue());'" ) session.execute( "create function stri(key int) called on null input returns text language java as 'return key.toString();'" ) session.execute( "create aggregate suma (int) sfunc plus stype int finalfunc stri initcond 10" ) assert_one(session, "select suma(val) from nums", ["16"]) session.execute( "create function test(a int, b double) called on null input returns int language javascript as 'a + b;'" ) session.execute("create aggregate aggy(double) sfunc test stype int") assert_invalid(session, "create aggregate aggtwo(int) sfunc aggy stype int") assert_invalid( session, "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo" )
def udf_with_udt_test(self): """ Test UDFs that operate on non-frozen UDTs. @jira_ticket CASSANDRA-7423 @since 3.6 """ session = self.prepare() session.execute("create type test (a text, b int);") session.execute("create function funk(udt test) called on null input returns int language java as 'return Integer.valueOf(udt.getInt(\"b\"));';") if LooseVersion(self.cluster.version()) >= LooseVersion('3.6'): frozen_vals = (False, True) else: frozen_vals = (True,) for frozen in frozen_vals: debug("Using {} UDTs".format("frozen" if frozen else "non-frozen")) table_name = "tab_frozen" if frozen else "tab" column_type = "frozen<test>" if frozen else "test" session.execute("create table {} (key int primary key, udt {});".format(table_name, column_type)) session.execute("insert into %s (key, udt) values (1, {a: 'un', b:1});" % (table_name,)) session.execute("insert into %s (key, udt) values (2, {a: 'deux', b:2});" % (table_name,)) session.execute("insert into %s (key, udt) values (3, {a: 'trois', b:3});" % (table_name,)) assert_one(session, "select sum(funk(udt)) from {}".format(table_name), [6]) assert_invalid(session, "drop type test;")
def test_udf_with_udt_keyspace_isolation(self): """ Ensure functions dont allow a UDT from another keyspace @jira_ticket CASSANDRA-9409 @since 2.2 """ session = self.prepare() session.execute("create type udt (a text, b int);") create_ks(session, 'user_ks', 1) # ensure we cannot use a udt from another keyspace as function argument assert_invalid( session, "CREATE FUNCTION overloaded(v ks.udt) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'", "Statement on keyspace user_ks cannot refer to a user type in keyspace ks" ) # ensure we cannot use a udt from another keyspace as return value assert_invalid( session, ("CREATE FUNCTION test(v text) called on null input RETURNS ks.udt " "LANGUAGE java AS 'return null;';"), "Statement on keyspace user_ks cannot refer to a user type in keyspace ks" )
def drop_column_compact_test(self): session = self.prepare() session.execute("USE ks") session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int) WITH COMPACT STORAGE") assert_invalid(session, "ALTER TABLE cf DROP c1", "Cannot drop columns from a")
def check_permissions(self, node, upgraded): # use an exclusive connection to ensure we only talk to the specified node klaus = self.patient_exclusive_cql_connection(node, user='******', password='******', timeout=20) # klaus is a superuser, so should be able to list all permissions # the output of LIST PERMISSIONS changes slightly with #7653 adding # a new role column to results, so we need to tailor our check # based on whether the node has been upgraded or not if not upgraded: assert_all(klaus, 'LIST ALL PERMISSIONS', [['michael', '<table ks.cf1>', 'MODIFY'], ['michael', '<table ks.cf2>', 'SELECT']], timeout=60) else: assert_all(klaus, 'LIST ALL PERMISSIONS', [['michael', 'michael', '<table ks.cf1>', 'MODIFY'], ['michael', 'michael', '<table ks.cf2>', 'SELECT']], timeout=60) klaus.cluster.shutdown() michael = self.patient_exclusive_cql_connection(node, user='******', password='******') michael.execute('INSERT INTO ks.cf1 (id, val) VALUES (0,0)') michael.execute('SELECT * FROM ks.cf2') assert_invalid(michael, 'SELECT * FROM ks.cf1', 'User michael has no SELECT permission on <table ks.cf1> or any of its parents', Unauthorized) michael.cluster.shutdown()
def test_validate_empty_column_name(self): cluster = self.cluster cluster.populate(1).start() node1 = cluster.nodelist()[0] session = self.patient_cql_connection(node1) create_ks(session, 'counter_tests', 1) session.execute(""" CREATE TABLE compact_counter_table ( pk int, ck text, value counter, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE """) assert_invalid( session, "UPDATE compact_counter_table SET value = value + 1 WHERE pk = 0 AND ck = ''" ) assert_invalid( session, "UPDATE compact_counter_table SET value = value - 1 WHERE pk = 0 AND ck = ''" ) session.execute( "UPDATE compact_counter_table SET value = value + 5 WHERE pk = 0 AND ck = 'ck'" ) session.execute( "UPDATE compact_counter_table SET value = value - 2 WHERE pk = 0 AND ck = 'ck'" ) assert_one(session, "SELECT pk, ck, value FROM compact_counter_table", [0, 'ck', 3])
def udf_with_udt_test(self): """ Test UDFs that operate on non-frozen UDTs. @jira_ticket CASSANDRA-7423 @since 3.6 """ session = self.prepare() session.execute("create type test (a text, b int);") session.execute("create function funk(udt test) called on null input returns int language java as 'return Integer.valueOf(udt.getInt(\"b\"));';") if self.cluster.version() >= LooseVersion('3.6'): frozen_vals = (False, True) else: frozen_vals = (True,) for frozen in frozen_vals: debug("Using {} UDTs".format("frozen" if frozen else "non-frozen")) table_name = "tab_frozen" if frozen else "tab" column_type = "frozen<test>" if frozen else "test" session.execute("create table {} (key int primary key, udt {});".format(table_name, column_type)) session.execute("insert into %s (key, udt) values (1, {a: 'un', b:1});" % (table_name,)) session.execute("insert into %s (key, udt) values (2, {a: 'deux', b:2});" % (table_name,)) session.execute("insert into %s (key, udt) values (3, {a: 'trois', b:3});" % (table_name,)) assert_one(session, "select sum(funk(udt)) from {}".format(table_name), [6]) assert_invalid(session, "drop type test;")
def udf_with_udt_keyspace_isolation_test(self): """ Ensure functions dont allow a UDT from another keyspace @jira_ticket CASSANDRA-9409 @since 2.2 """ session = self.prepare() session.execute("create type udt (a text, b int);") create_ks(session, 'user_ks', 1) # ensure we cannot use a udt from another keyspace as function argument assert_invalid( session, "CREATE FUNCTION overloaded(v ks.udt) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'", "Statement on keyspace user_ks cannot refer to a user type in keyspace ks" ) # ensure we cannot use a udt from another keyspace as return value assert_invalid( session, ("CREATE FUNCTION test(v text) called on null input RETURNS ks.udt " "LANGUAGE java AS 'return null;';"), "Statement on keyspace user_ks cannot refer to a user type in keyspace ks" )
def check_permissions(self, node, upgraded): # use an exclusive connection to ensure we only talk to the specified node klaus = self.patient_exclusive_cql_connection(node, user='******', password='******', timeout=20) # klaus is a superuser, so should be able to list all permissions # the output of LIST PERMISSIONS changes slightly with #7653 adding # a new role column to results, so we need to tailor our check # based on whether the node has been upgraded or not if not upgraded: assert_all(klaus, 'LIST ALL PERMISSIONS', [['michael', '<table ks.cf1>', 'MODIFY'], ['michael', '<table ks.cf2>', 'SELECT']]) else: assert_all(klaus, 'LIST ALL PERMISSIONS', [['michael', 'michael', '<table ks.cf1>', 'MODIFY'], ['michael', 'michael', '<table ks.cf2>', 'SELECT']]) klaus.cluster.shutdown() michael = self.patient_exclusive_cql_connection(node, user='******', password='******') michael.execute('INSERT INTO ks.cf1 (id, val) VALUES (0,0)') michael.execute('SELECT * FROM ks.cf2') assert_invalid( michael, 'SELECT * FROM ks.cf1', 'User michael has no SELECT permission on <table ks.cf1> or any of its parents', Unauthorized) michael.cluster.shutdown()
def test_multi_index_filtering_query(self): """ asserts that having multiple indexes that cover all predicates still requires ALLOW FILTERING to also be present """ cluster = self.cluster cluster.populate(1).start() node1, = cluster.nodelist() session = self.patient_cql_connection(node1) session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': '1'};") session.execute("USE ks;") session.execute("CREATE TABLE tbl (id uuid primary key, c0 text, c1 text, c2 text);") session.execute("CREATE INDEX ix_tbl_c0 ON tbl(c0);") session.execute("CREATE INDEX ix_tbl_c1 ON tbl(c1);") session.execute("INSERT INTO tbl (id, c0, c1, c2) values (uuid(), 'a', 'b', 'c');") session.execute("INSERT INTO tbl (id, c0, c1, c2) values (uuid(), 'a', 'b', 'c');") session.execute("INSERT INTO tbl (id, c0, c1, c2) values (uuid(), 'q', 'b', 'c');") session.execute("INSERT INTO tbl (id, c0, c1, c2) values (uuid(), 'a', 'e', 'f');") session.execute("INSERT INTO tbl (id, c0, c1, c2) values (uuid(), 'a', 'e', 'f');") rows = list(session.execute("SELECT * FROM tbl WHERE c0 = 'a';")) self.assertEqual(4, len(rows)) stmt = "SELECT * FROM tbl WHERE c0 = 'a' AND c1 = 'b';" assert_invalid(session, stmt, "Cannot execute this query as it might involve data filtering and thus may have " "unpredictable performance. If you want to execute this query despite the " "performance unpredictability, use ALLOW FILTERING") rows = list(session.execute("SELECT * FROM tbl WHERE c0 = 'a' AND c1 = 'b' ALLOW FILTERING;")) self.assertEqual(2, len(rows))
def test_reloadlocalschema(self): """ @jira_ticket CASSANDRA-13954 Test that `nodetool reloadlocalschema` works as intended """ cluster = self.cluster cluster.populate(1) node = cluster.nodelist()[0] remove_perf_disable_shared_mem(node) # for jmx cluster.start() session = self.patient_cql_connection(node) query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};" session.execute(query) query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));' session.execute(query) ss = make_mbean('db', type='StorageService') schema_version = '' # get initial schema version with JolokiaAgent(node) as jmx: schema_version = jmx.read_attribute(ss, 'SchemaVersion') # manually add a regular column 'val' to test.test query = """ INSERT INTO system_schema.columns (keyspace_name, table_name, column_name, clustering_order, column_name_bytes, kind, position, type) VALUES ('test', 'test', 'val', 'none', 0x76616c, 'regular', -1, 'int');""" session.execute(query) # validate that schema version wasn't automatically updated with JolokiaAgent(node) as jmx: self.assertEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion')) # make sure the new column wasn't automagically picked up assert_invalid( session, 'INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);') # force the node to reload schema from disk node.nodetool('reloadlocalschema') # validate that schema version changed with JolokiaAgent(node) as jmx: self.assertNotEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion')) # try an insert with the new column again and validate it succeeds this time session.execute( 'INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);') assert_all(session, 'SELECT pk, ck, val FROM test.test;', [[0, 1, 2]])
def only_one_timestamp_is_valid_test(self): """ Test that TIMESTAMP must not be used in the statements within the batch. """ session = self.prepare() assert_invalid(session, """ BEGIN BATCH USING TIMESTAMP 1111111111111111 INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 2 INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner') APPLY BATCH """, matching="Timestamp must be set either on BATCH or individual statements")
def test_assertions(self): # assert_exception_test mock_session = Mock( ** {'execute.side_effect': AlreadyExists("Dummy exception message.")}) assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists) # assert_unavailable_test mock_session = Mock(**{ 'execute.side_effect': Unavailable("Dummy Unavailabile message.") }) assert_unavailable(mock_session.execute) # assert_invalid_test mock_session = Mock(**{ 'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.") }) assert_invalid(mock_session, "DUMMY QUERY") # assert_unauthorized_test mock_session = Mock(**{ 'execute.side_effect': Unauthorized("Dummy Unauthorized message.") }) assert_unauthorized(mock_session, "DUMMY QUERY", None) # assert_one_test mock_session = Mock() mock_session.execute = Mock(return_value=[[1, 1]]) assert_one(mock_session, "SELECT * FROM test", [1, 1]) # assert_none_test mock_session = Mock() mock_session.execute = Mock(return_value=[]) assert_none(mock_session, "SELECT * FROM test") # assert_all_test mock_session = Mock() mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)]) assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True) # assert_almost_equal_test assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0) # assert_row_count_test mock_session = Mock() mock_session.execute = Mock(return_value=[[1]]) assert_row_count(mock_session, 'test', 1) # assert_length_equal_test check = [1, 2, 3, 4] assert_length_equal(check, 4)
def drop_column_compact_test(self): session = self.prepare() session.execute("USE ks") session.execute( "CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int) WITH COMPACT STORAGE" ) assert_invalid(session, "ALTER TABLE cf DROP c1", "Cannot drop columns from a")
def test_reloadlocalschema(self): """ @jira_ticket CASSANDRA-13954 Test that `nodetool reloadlocalschema` works as intended """ cluster = self.cluster cluster.populate(1) node = cluster.nodelist()[0] remove_perf_disable_shared_mem(node) # for jmx cluster.start() session = self.patient_cql_connection(node) query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};" session.execute(query) query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));' session.execute(query) ss = make_mbean('db', type='StorageService') schema_version = '' # get initial schema version with JolokiaAgent(node) as jmx: schema_version = jmx.read_attribute(ss, 'SchemaVersion') # manually add a regular column 'val' to test.test query = """ INSERT INTO system_schema.columns (keyspace_name, table_name, column_name, clustering_order, column_name_bytes, kind, position, type) VALUES ('test', 'test', 'val', 'none', 0x76616c, 'regular', -1, 'int');""" session.execute(query) # validate that schema version wasn't automatically updated with JolokiaAgent(node) as jmx: self.assertEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion')) # make sure the new column wasn't automagically picked up assert_invalid(session, 'INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);') # force the node to reload schema from disk node.nodetool('reloadlocalschema') # validate that schema version changed with JolokiaAgent(node) as jmx: self.assertNotEqual(schema_version, jmx.read_attribute(ss, 'SchemaVersion')) # try an insert with the new column again and validate it succeeds this time session.execute('INSERT INTO test.test (pk, ck, val) VALUES (0, 1, 2);') assert_all(session, 'SELECT pk, ck, val FROM test.test;', [[0, 1, 2]])
def test_nested_type_dropping(self): """ Confirm a user type can't be dropped when being used by another user type. """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) create_ks(session, "nested_user_type_dropping", 2) session.default_consistency_level = ConsistencyLevel.LOCAL_QUORUM stmt = """ USE nested_user_type_dropping """ session.execute(stmt) stmt = """ CREATE TYPE simple_type ( user_number int, user_text text ) """ session.execute(stmt) stmt = """ CREATE TYPE another_type ( somefield frozen<simple_type> ) """ session.execute(stmt) stmt = """ DROP TYPE simple_type; """ assert_invalid( session, stmt, "Cannot drop user type nested_user_type_dropping.simple_type as it is still used by user type another_type", ) # drop the type that's impeding the drop, and then try again stmt = """ DROP TYPE another_type; """ session.execute(stmt) stmt = """ DROP TYPE simple_type; """ session.execute(stmt) # now let's have a look at the system schema and make sure no user types are defined self.assertNoTypes(session)
def unlogged_batch_rejects_counter_mutations_test(self): """ Test that unlogged batch rejects counter mutations """ session = self.prepare() err = "Counter and non-counter mutations cannot exist in the same batch" assert_invalid(session, """ BEGIN UNLOGGED BATCH INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') INSERT INTO users (id, firstname, lastname) VALUES (2, 'Elizabeth', 'Swann') UPDATE clicks SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com' APPLY BATCH """, matching=err)
def logged_batch_rejects_counter_mutations_test(self): """ Test that logged batch rejects counter mutations """ session = self.prepare() err = "Cannot include a counter statement in a logged batch" assert_invalid(session, """ BEGIN BATCH INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner') UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com' APPLY BATCH """, matching=err)
def test_nested_type_dropping(self): """ Confirm a user type can't be dropped when being used by another user type. """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection( node1, consistency_level=ConsistencyLevel.LOCAL_QUORUM) create_ks(session, 'nested_user_type_dropping', 2) stmt = """ USE nested_user_type_dropping """ session.execute(stmt) stmt = """ CREATE TYPE simple_type ( user_number int, user_text text ) """ session.execute(stmt) stmt = """ CREATE TYPE another_type ( somefield frozen<simple_type> ) """ session.execute(stmt) stmt = """ DROP TYPE simple_type; """ assert_invalid( session, stmt, 'Cannot drop user type nested_user_type_dropping.simple_type as it is still used by user type another_type' ) # drop the type that's impeding the drop, and then try again stmt = """ DROP TYPE another_type; """ session.execute(stmt) stmt = """ DROP TYPE simple_type; """ session.execute(stmt) # now let's have a look at the system schema and make sure no user types are defined self.assertNoTypes(session)
def test_type_enforcement(self): """ Confirm error when incorrect data type used for user type """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.cql_connection(node1) create_ks(session, "user_type_enforcement", 2) session.default_consistency_level = ConsistencyLevel.LOCAL_QUORUM stmt = """ USE user_type_enforcement """ session.execute(stmt) stmt = """ CREATE TYPE simple_type ( user_number int ) """ session.execute(stmt) stmt = """ CREATE TABLE simple_table ( id uuid PRIMARY KEY, number frozen<simple_type> ) """ session.execute(stmt) # Make sure the schema propagate time.sleep(2) # here we will attempt an insert statement which should fail # because the user type is an int, but the insert statement is # providing text _id = uuid.uuid4() stmt = """ INSERT INTO simple_table (id, number) VALUES ({id}, {{user_number: 'uh oh....this is not a number'}}); """.format( id=_id ) assert_invalid(session, stmt, "field user_number is not of type int") # let's check the rowcount and make sure the data # didn't get inserted when the exception asserted above was thrown stmt = """ SELECT * FROM simple_table; """ rows = list(session.execute(stmt)) self.assertEqual(0, len(rows))
def counter_batch_rejects_regular_mutations_test(self): """ Test that counter batch rejects non-counter mutations """ session = self.prepare() err = "Cannot include non-counter statement in a counter batch" assert_invalid(session, """ BEGIN COUNTER BATCH UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://foo.com' UPDATE clicks SET total = total + 1 WHERE userid = 1 and url = 'http://bar.com' UPDATE clicks SET total = total + 1 WHERE userid = 2 and url = 'http://baz.com' INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') APPLY BATCH """, matching=err)
def test_multiple_indexes_on_single_map_column(self): """ verifying functionality of multiple unique secondary indexes on a single column @jira_ticket CASSANDRA-7771 @since 3.0 """ cluster = self.cluster cluster.populate(1).start() [node1] = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'map_double_index', 1) session.execute(""" CREATE TABLE map_tbl ( id uuid primary key, amap map<text, int> ) """) session.execute("CREATE INDEX map_keys ON map_tbl(keys(amap))") session.execute("CREATE INDEX map_values ON map_tbl(amap)") session.execute("CREATE INDEX map_entries ON map_tbl(entries(amap))") # multiple indexes on a single column are allowed but identical duplicate indexes are not assert_invalid(session, "CREATE INDEX map_values_2 ON map_tbl(amap)", 'Index map_values_2 is a duplicate of existing index map_values') session.execute("INSERT INTO map_tbl (id, amap) values (uuid(), {'foo': 1, 'bar': 2});") session.execute("INSERT INTO map_tbl (id, amap) values (uuid(), {'faz': 1, 'baz': 2});") value_search = list(session.execute("SELECT * FROM map_tbl WHERE amap CONTAINS 1")) self.assertEqual(2, len(value_search), "incorrect number of rows when querying on map values") key_search = list(session.execute("SELECT * FROM map_tbl WHERE amap CONTAINS KEY 'foo'")) self.assertEqual(1, len(key_search), "incorrect number of rows when querying on map keys") entries_search = list(session.execute("SELECT * FROM map_tbl WHERE amap['foo'] = 1")) self.assertEqual(1, len(entries_search), "incorrect number of rows when querying on map entries") session.cluster.refresh_schema_metadata() table_meta = session.cluster.metadata.keyspaces["map_double_index"].tables["map_tbl"] self.assertEqual(3, len(table_meta.indexes)) self.assertItemsEqual(['map_keys', 'map_values', 'map_entries'], table_meta.indexes) self.assertEqual(3, len(session.cluster.metadata.keyspaces["map_double_index"].indexes)) self.assertTrue('map_keys' in table_meta.export_as_string()) self.assertTrue('map_values' in table_meta.export_as_string()) self.assertTrue('map_entries' in table_meta.export_as_string()) session.execute("DROP TABLE map_tbl") session.cluster.refresh_schema_metadata() self.assertEqual(0, len(session.cluster.metadata.keyspaces["map_double_index"].indexes))
def test_type_enforcement(self): """ Confirm error when incorrect data type used for user type """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.cql_connection( node1, consistency_level=ConsistencyLevel.LOCAL_QUORUM) create_ks(session, 'user_type_enforcement', 2) stmt = """ USE user_type_enforcement """ session.execute(stmt) stmt = """ CREATE TYPE simple_type ( user_number int ) """ session.execute(stmt) stmt = """ CREATE TABLE simple_table ( id uuid PRIMARY KEY, number frozen<simple_type> ) """ session.execute(stmt) # Make sure the schema propagate time.sleep(2) # here we will attempt an insert statement which should fail # because the user type is an int, but the insert statement is # providing text _id = uuid.uuid4() stmt = """ INSERT INTO simple_table (id, number) VALUES ({id}, {{user_number: 'uh oh....this is not a number'}}); """.format(id=_id) assert_invalid(session, stmt, 'field user_number is not of type int') # let's check the rowcount and make sure the data # didn't get inserted when the exception asserted above was thrown stmt = """ SELECT * FROM simple_table; """ rows = list(session.execute(stmt)) self.assertEqual(0, len(rows))
def aggregate_with_udt_keyspace_isolation_test(self): """ Ensure aggregates dont allow a UDT from another keyspace @jira_ticket CASSANDRA-9409 """ session = self.prepare() session.execute("create type udt (a int);") create_ks(session, 'user_ks', 1) assert_invalid( session, "create aggregate suma (ks.udt) sfunc plus stype int finalfunc stri initcond 10", "Statement on keyspace user_ks cannot refer to a user type in keyspace ks" )
def test_aggregate_with_udt_keyspace_isolation(self): """ Ensure aggregates dont allow a UDT from another keyspace @jira_ticket CASSANDRA-9409 """ session = self.prepare() session.execute("create type udt (a int);") create_ks(session, 'user_ks', 1) assert_invalid( session, "create aggregate suma (ks.udt) sfunc plus stype int finalfunc stri initcond 10", "Statement on keyspace user_ks cannot refer to a user type in keyspace ks" )
def aggregate_udf_test(self): session = self.prepare() session.execute("create table nums (key int primary key, val int);") for x in range(1, 4): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, x)) session.execute("create function plus(key int, val int) called on null input returns int language java as 'return Integer.valueOf(key.intValue() + val.intValue());'") session.execute("create function stri(key int) called on null input returns text language java as 'return key.toString();'") session.execute("create aggregate suma (int) sfunc plus stype int finalfunc stri initcond 10") assert_one(session, "select suma(val) from nums", ["16"]) session.execute("create function test(a int, b double) called on null input returns int language javascript as 'a + b;'") session.execute("create aggregate aggy(double) sfunc test stype int") assert_invalid(session, "create aggregate aggtwo(int) sfunc aggy stype int") assert_invalid(session, "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo")
def drop_counter_column_test(self): """Test for CASSANDRA-7831""" cluster = self.cluster cluster.populate(1).start() node1, = cluster.nodelist() session = self.patient_cql_connection(node1) create_ks(session, 'counter_tests', 1) session.execute("CREATE TABLE counter_bug (t int, c counter, primary key(t))") session.execute("UPDATE counter_bug SET c = c + 1 where t = 1") row = list(session.execute("SELECT * from counter_bug")) self.assertEqual(rows_to_list(row)[0], [1, 1]) self.assertEqual(len(row), 1) session.execute("ALTER TABLE counter_bug drop c") assert_invalid(session, "ALTER TABLE counter_bug add c counter", "Cannot re-add previously dropped counter column c")
def test_no_counters_in_user_types(self): # CASSANDRA-7672 cluster = self.cluster cluster.populate(1).start() [node1] = cluster.nodelist() session = self.patient_cql_connection(node1) create_ks(session, 'user_types', 1) stmt = """ USE user_types """ session.execute(stmt) stmt = """ CREATE TYPE t_item ( sub_one COUNTER ) """ assert_invalid(session, stmt, 'A user type cannot contain counters')
def assertions_test(self): # assert_exception_test mock_session = Mock(**{'execute.side_effect': AlreadyExists("Dummy exception message.")}) assert_exception(mock_session, "DUMMY QUERY", expected=AlreadyExists) # assert_unavailable_test mock_session = Mock(**{'execute.side_effect': Unavailable("Dummy Unavailabile message.")}) assert_unavailable(mock_session.execute) # assert_invalid_test mock_session = Mock(**{'execute.side_effect': InvalidRequest("Dummy InvalidRequest message.")}) assert_invalid(mock_session, "DUMMY QUERY") # assert_unauthorized_test mock_session = Mock(**{'execute.side_effect': Unauthorized("Dummy Unauthorized message.")}) assert_unauthorized(mock_session, "DUMMY QUERY", None) # assert_one_test mock_session = Mock() mock_session.execute = Mock(return_value=[[1, 1]]) assert_one(mock_session, "SELECT * FROM test", [1, 1]) # assert_none_test mock_session = Mock() mock_session.execute = Mock(return_value=[]) assert_none(mock_session, "SELECT * FROM test") # assert_all_test mock_session = Mock() mock_session.execute = Mock(return_value=[[i, i] for i in range(0, 10)]) assert_all(mock_session, "SELECT k, v FROM test", [[i, i] for i in range(0, 10)], ignore_order=True) # assert_almost_equal_test assert_almost_equal(1, 1.1, 1.2, 1.9, error=1.0) # assert_row_count_test mock_session = Mock() mock_session.execute = Mock(return_value=[[1]]) assert_row_count(mock_session, 'test', 1) # assert_length_equal_test check = [1, 2, 3, 4] assert_length_equal(check, 4)
def drop_counter_column_test(self): """Test for CASSANDRA-7831""" cluster = self.cluster cluster.populate(1).start() node1, = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'counter_tests', 1) session.execute("CREATE TABLE counter_bug (t int, c counter, primary key(t))") session.execute("UPDATE counter_bug SET c = c + 1 where t = 1") row = list(session.execute("SELECT * from counter_bug")) self.assertEqual(rows_to_list(row)[0], [1, 1]) self.assertEqual(len(row), 1) session.execute("ALTER TABLE counter_bug drop c") assert_invalid(session, "ALTER TABLE counter_bug add c counter", "Cannot re-add previously dropped counter column c")
def test_udf_scripting(self): session = self.prepare() session.execute("create table nums (key int primary key, val double);") for x in range(1, 4): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x))) session.execute( "CREATE FUNCTION x_sin(val double) called on null input returns double language javascript as 'Math.sin(val)'" ) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 1, [1, 1.0, math.sin(1.0)]) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 2, [2, 2.0, math.sin(2.0)]) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 3, [3, 3.0, math.sin(3.0)]) session.execute( "create function y_sin(val double) called on null input returns double language javascript as 'Math.sin(val).toString()'" ) assert_invalid(session, "select y_sin(val) from nums where key = 1", expected=FunctionFailure) assert_invalid( session, "create function compilefail(key int) called on null input returns double language javascript as 'foo bar';" ) session.execute( "create function plustwo(key int) called on null input returns double language javascript as 'key+2'" ) assert_one(session, "select plustwo(key) from nums where key = 3", [5])
def udt_test(self): """ Test (somewhat indirectly) that user queries involving UDT's are properly encoded (due to driver not recognizing UDT syntax) """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() time.sleep(.5) session = self.patient_cql_connection(node1) create_ks(session, 'ks', 3) # create udt and insert correctly (should be successful) session.execute('CREATE TYPE address (city text,zip int);') session.execute('CREATE TABLE user_profiles (login text PRIMARY KEY, addresses map<text, frozen<address>>);') session.execute("INSERT INTO user_profiles(login, addresses) VALUES ('tsmith', { 'home': {city: 'San Fransisco',zip: 94110 }});") # note here address looks likes a map -> which is what the driver thinks it is. udt is encoded server side, we test that if addresses is changed slightly whether encoder recognizes the errors # try adding a field - see if will be encoded to a udt (should return error) assert_invalid(session, "INSERT INTO user_profiles(login, addresses) VALUES ('jsmith', { 'home': {street: 'El Camino Real', city: 'San Fransisco', zip: 94110 }});", "Unknown field 'street' in value of user defined type address") # try modifying a field name - see if will be encoded to a udt (should return error) assert_invalid(session, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {cityname: 'San Fransisco', zip: 94110 }});", "Unknown field 'cityname' in value of user defined type address") # try modifying a type within the collection - see if will be encoded to a udt (should return error) assert_invalid(session, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {city: 'San Fransisco', zip: '94110' }});", "Invalid map literal for addresses")
def udt_test(self): """ Test (somewhat indirectly) that user queries involving UDT's are properly encoded (due to driver not recognizing UDT syntax) """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() time.sleep(.5) session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 3) # create udt and insert correctly (should be successful) session.execute('CREATE TYPE address (city text,zip int);') session.execute('CREATE TABLE user_profiles (login text PRIMARY KEY, addresses map<text, frozen<address>>);') session.execute("INSERT INTO user_profiles(login, addresses) VALUES ('tsmith', { 'home': {city: 'San Fransisco',zip: 94110 }});") # note here address looks likes a map -> which is what the driver thinks it is. udt is encoded server side, we test that if addresses is changed slightly whether encoder recognizes the errors # try adding a field - see if will be encoded to a udt (should return error) assert_invalid(session, "INSERT INTO user_profiles(login, addresses) VALUES ('jsmith', { 'home': {street: 'El Camino Real', city: 'San Fransisco', zip: 94110 }});", "Unknown field 'street' in value of user defined type address") # try modifying a field name - see if will be encoded to a udt (should return error) assert_invalid(session, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {cityname: 'San Fransisco', zip: 94110 }});", "Unknown field 'cityname' in value of user defined type address") # try modifying a type within the collection - see if will be encoded to a udt (should return error) assert_invalid(session, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {city: 'San Fransisco', zip: '94110' }});", "Invalid map literal for addresses")
def test_type_as_part_of_pkey(self): """Tests user types as part of a composite pkey""" # make sure we can define a table with a user type as part of the pkey # and do a basic insert/query of data in that table. cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.LOCAL_QUORUM) create_ks(session, 'user_type_pkeys', 2) stmt = """ CREATE TYPE t_person_name ( first text, middle text, last text ) """ session.execute(stmt) stmt = """ CREATE TABLE person_likes ( id uuid, name frozen<t_person_name>, like text, PRIMARY KEY ((id, name)) ) """ session.execute(stmt) # Make sure the schema propagate time.sleep(2) _id = uuid.uuid4() stmt = """ INSERT INTO person_likes (id, name, like) VALUES ({id}, {{first:'Nero', middle:'Claudius Caesar Augustus', last:'Germanicus'}}, 'arson'); """.format(id=_id) session.execute(stmt) # attempt to query without the user type portion of the pkey and confirm there is an error stmt = """ SELECT id, name.first from person_likes where id={id}; """.format(id=_id) if self.cluster.version() >= '3.10': assert_invalid(session, stmt, 'Cannot execute this query as it might involve data filtering') elif self.cluster.version() >= '2.2': assert_invalid(session, stmt, 'Partition key parts: name must be restricted as other parts are') else: assert_invalid(session, stmt, 'Partition key part name must be restricted since preceding part is') stmt = """ SELECT id, name.first, like from person_likes where id={id} and name = {{first:'Nero', middle: 'Claudius Caesar Augustus', last: 'Germanicus'}}; """.format(id=_id) rows = session.execute(stmt) row_uuid, first_name, like = rows[0] assert first_name == 'Nero' assert like == 'arson'
def udf_scripting_test(self): session = self.prepare() session.execute("create table nums (key int primary key, val double);") for x in range(1, 4): session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x))) session.execute("CREATE FUNCTION x_sin(val double) called on null input returns double language javascript as 'Math.sin(val)'") assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 1, [1, 1.0, math.sin(1.0)]) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 2, [2, 2.0, math.sin(2.0)]) assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 3, [3, 3.0, math.sin(3.0)]) session.execute("create function y_sin(val double) called on null input returns double language javascript as 'Math.sin(val).toString()'") assert_invalid(session, "select y_sin(val) from nums where key = 1", expected=FunctionFailure) assert_invalid(session, "create function compilefail(key int) called on null input returns double language javascript as 'foo bar';") session.execute("create function plustwo(key int) called on null input returns double language javascript as 'key+2'") assert_one(session, "select plustwo(key) from nums where key = 3", [5])
def test_user_type_isolation(self): """ Ensure UDT cannot be used from another keyspace @jira_ticket CASSANDRA-9409 @since 2.2 """ cluster = self.cluster cluster.populate(1).start() node1 = cluster.nodelist()[0] session = self.patient_cql_connection(node1) create_ks(session, 'user_types', 1) # create a user defined type in a keyspace session.execute("CREATE TYPE udt (first text, second int, third int)") # ensure we cannot use a udt from another keyspace create_ks(session, 'user_ks', 1) assert_invalid( session, "CREATE TABLE t (id int PRIMARY KEY, v frozen<user_types.udt>)", "Statement on keyspace user_ks cannot refer to a user type in keyspace user_types" )
def validate_empty_column_name_test(self): cluster = self.cluster cluster.populate(1).start() node1 = cluster.nodelist()[0] session = self.patient_cql_connection(node1) create_ks(session, 'counter_tests', 1) session.execute(""" CREATE TABLE compact_counter_table ( pk int, ck text, value counter, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE """) assert_invalid(session, "UPDATE compact_counter_table SET value = value + 1 WHERE pk = 0 AND ck = ''") assert_invalid(session, "UPDATE compact_counter_table SET value = value - 1 WHERE pk = 0 AND ck = ''") session.execute("UPDATE compact_counter_table SET value = value + 5 WHERE pk = 0 AND ck = 'ck'") session.execute("UPDATE compact_counter_table SET value = value - 2 WHERE pk = 0 AND ck = 'ck'") assert_one(session, "SELECT pk, ck, value FROM compact_counter_table", [0, 'ck', 3])
def udf_overload_test(self): session = self.prepare(nodes=3) session.execute("CREATE TABLE tab (k text PRIMARY KEY, v int)") session.execute("INSERT INTO tab (k, v) VALUES ('foo' , 1);") # create overloaded udfs session.execute("CREATE FUNCTION overloaded(v varchar) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'") session.execute("CREATE OR REPLACE FUNCTION overloaded(i int) called on null input RETURNS text LANGUAGE java AS 'return \"f2\";'") session.execute("CREATE OR REPLACE FUNCTION overloaded(v1 text, v2 text) called on null input RETURNS text LANGUAGE java AS 'return \"f3\";'") session.execute("CREATE OR REPLACE FUNCTION overloaded(v ascii) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'") # ensure that works with correct specificity assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded('foo')") assert_none(session, "SELECT v FROM tab WHERE k = overloaded((text) 'foo')") assert_none(session, "SELECT v FROM tab WHERE k = overloaded((ascii) 'foo')") assert_none(session, "SELECT v FROM tab WHERE k = overloaded((varchar) 'foo')") # try non-existent functions assert_invalid(session, "DROP FUNCTION overloaded(boolean)") assert_invalid(session, "DROP FUNCTION overloaded(bigint)") # try dropping overloaded - should fail because ambiguous assert_invalid(session, "DROP FUNCTION overloaded") session.execute("DROP FUNCTION overloaded(varchar)") assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo')") session.execute("DROP FUNCTION overloaded(text, text)") assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo',(text)'bar')") session.execute("DROP FUNCTION overloaded(ascii)") assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((ascii)'foo')") # should now work - unambiguous session.execute("DROP FUNCTION overloaded")
def test_migration(self): """ Test migration of user functions """ cluster = self.cluster # Uses 3 nodes just to make sure function mutations are correctly serialized cluster.populate(3).start() node1 = cluster.nodelist()[0] node2 = cluster.nodelist()[1] node3 = cluster.nodelist()[2] time.sleep(0.2) # The latter three sessions use a whitelist policy, and then don't wait for schema agreement # So we create `schema_wait_session` to use for schema agreement blocking, and DDL changes schema_wait_session = self.patient_cql_connection(node1) create_ks(schema_wait_session, 'ks', 1) schema_wait_session.cluster.control_connection.wait_for_schema_agreement( ) node1_session = self.patient_exclusive_cql_connection(node1, keyspace='ks') node2_session = self.patient_exclusive_cql_connection(node2, keyspace='ks') node3_session = self.patient_exclusive_cql_connection(node3, keyspace='ks') schema_wait_session.execute(""" CREATE TABLE udf_kv ( key int primary key, value double ); """) schema_wait_session.cluster.control_connection.wait_for_schema_agreement( ) node1_session.execute( "INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(1, 1)) node1_session.execute( "INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(2, 2)) node1_session.execute( "INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(3, 3)) schema_wait_session.execute(""" create or replace function x_sin ( input double ) called on null input returns double language java as 'if (input==null) return null; return Double.valueOf(Math.sin(input.doubleValue()));' """) schema_wait_session.execute(""" create or replace function x_cos ( input double ) called on null input returns double language java as 'if (input==null) return null; return Double.valueOf(Math.cos(input.doubleValue()));' """) schema_wait_session.execute(""" create or replace function x_tan ( input double ) called on null input returns double language java as 'if (input==null) return null; return Double.valueOf(Math.tan(input.doubleValue()));' """) schema_wait_session.cluster.control_connection.wait_for_schema_agreement( ) assert_one( node1_session, "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 1, [ 1, 1.0, 0.8414709848078965, 0.5403023058681398, 1.5574077246549023 ]) assert_one( node2_session, "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 2, [2, 2.0, math.sin(2.0), math.cos(2.0), math.tan(2.0)]) assert_one( node3_session, "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 3, [3, 3.0, math.sin(3.0), math.cos(3.0), math.tan(3.0)]) session4 = self.patient_cql_connection(node1) # check that functions are correctly confined to namespaces assert_invalid( session4, "SELECT key, value, sin(value), cos(value), tan(value) FROM ks.udf_kv where key = 4", "Unknown function 'sin'") # try giving existing function bad input, should error assert_invalid( node1_session, "SELECT key, value, x_sin(key), foo_cos(KEYy), foo_tan(key) FROM ks.udf_kv where key = 1", "Type error: key cannot be passed as argument 0 of function ks.x_sin of type double" ) node2_session.execute("drop function x_sin") node3_session.execute("drop function x_cos") node1_session.execute("drop function x_tan") schema_wait_session.cluster.control_connection.wait_for_schema_agreement( ) assert_invalid( node1_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1" ) assert_invalid( node2_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1" ) assert_invalid( node3_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1" ) # try creating function returning the wrong type, should error assert_invalid( node1_session, "CREATE FUNCTION bad_sin ( input double ) CALLED ON NULL INPUT RETURNS uuid LANGUAGE java AS 'return Math.sin(input);';", "Type mismatch: cannot convert from double to UUID")
def test_type_dropping(self): """ Tests that a type cannot be dropped when in use, and otherwise can be dropped. """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection( node1, consistency_level=ConsistencyLevel.LOCAL_QUORUM) create_ks(session, 'user_type_dropping', 2) stmt = """ USE user_type_dropping """ session.execute(stmt) stmt = """ CREATE TYPE simple_type ( user_number int ) """ session.execute(stmt) stmt = """ CREATE TABLE simple_table ( id uuid PRIMARY KEY, number frozen<simple_type> ) """ session.execute(stmt) # Make sure the schema propagate time.sleep(2) _id = uuid.uuid4() stmt = """ INSERT INTO simple_table (id, number) VALUES ({id}, {{user_number: 1}}); """.format(id=_id) session.execute(stmt) stmt = """ DROP TYPE simple_type; """ assert_invalid( session, stmt, 'Cannot drop user type user_type_dropping.simple_type as it is still used by table user_type_dropping.simple_table' ) # now that we've confirmed that a user type cannot be dropped while in use # let's remove the offending table # TODO: uncomment below after CASSANDRA-6472 is resolved # and add another check to make sure the table/type drops succeed stmt = """ DROP TABLE simple_table; """.format(id=_id) session.execute(stmt) stmt = """ DROP TYPE simple_type; """ session.execute(stmt) # now let's have a look at the system schema and make sure no user types are defined self.assertNoTypes(session)
def test_migration(self): """ Test migration of user functions """ cluster = self.cluster # Uses 3 nodes just to make sure function mutations are correctly serialized cluster.populate(3).start() node1 = cluster.nodelist()[0] node2 = cluster.nodelist()[1] node3 = cluster.nodelist()[2] time.sleep(0.2) # The latter three sessions use a whitelist policy, and then don't wait for schema agreement # So we create `schema_wait_session` to use for schema agreement blocking, and DDL changes schema_wait_session = self.patient_cql_connection(node1) create_ks(schema_wait_session, 'ks', 1) schema_wait_session.cluster.control_connection.wait_for_schema_agreement() node1_session = self.patient_exclusive_cql_connection(node1, keyspace='ks') node2_session = self.patient_exclusive_cql_connection(node2, keyspace='ks') node3_session = self.patient_exclusive_cql_connection(node3, keyspace='ks') schema_wait_session.execute(""" CREATE TABLE udf_kv ( key int primary key, value double ); """) schema_wait_session.cluster.control_connection.wait_for_schema_agreement() node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(1, 1)) node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(2, 2)) node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(3, 3)) schema_wait_session.execute(""" create or replace function x_sin ( input double ) called on null input returns double language java as 'if (input==null) return null; return Double.valueOf(Math.sin(input.doubleValue()));' """) schema_wait_session.execute(""" create or replace function x_cos ( input double ) called on null input returns double language java as 'if (input==null) return null; return Double.valueOf(Math.cos(input.doubleValue()));' """) schema_wait_session.execute(""" create or replace function x_tan ( input double ) called on null input returns double language java as 'if (input==null) return null; return Double.valueOf(Math.tan(input.doubleValue()));' """) schema_wait_session.cluster.control_connection.wait_for_schema_agreement() assert_one(node1_session, "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 1, [1, 1.0, 0.8414709848078965, 0.5403023058681398, 1.5574077246549023]) assert_one(node2_session, "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 2, [2, 2.0, math.sin(2.0), math.cos(2.0), math.tan(2.0)]) assert_one(node3_session, "SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 3, [3, 3.0, math.sin(3.0), math.cos(3.0), math.tan(3.0)]) session4 = self.patient_cql_connection(node1) # check that functions are correctly confined to namespaces assert_invalid(session4, "SELECT key, value, sin(value), cos(value), tan(value) FROM ks.udf_kv where key = 4", "Unknown function 'sin'") # try giving existing function bad input, should error assert_invalid(node1_session, "SELECT key, value, x_sin(key), foo_cos(KEYy), foo_tan(key) FROM ks.udf_kv where key = 1", "Type error: key cannot be passed as argument 0 of function ks.x_sin of type double") node2_session.execute("drop function x_sin") node3_session.execute("drop function x_cos") node1_session.execute("drop function x_tan") schema_wait_session.cluster.control_connection.wait_for_schema_agreement() assert_invalid(node1_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1") assert_invalid(node2_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1") assert_invalid(node3_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1") # try creating function returning the wrong type, should error assert_invalid(node1_session, "CREATE FUNCTION bad_sin ( input double ) CALLED ON NULL INPUT RETURNS uuid LANGUAGE java AS 'return Math.sin(input);';", "Type mismatch: cannot convert from double to UUID")
def test_type_dropping(self): """ Tests that a type cannot be dropped when in use, and otherwise can be dropped. """ cluster = self.cluster cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.LOCAL_QUORUM) create_ks(session, 'user_type_dropping', 2) stmt = """ USE user_type_dropping """ session.execute(stmt) stmt = """ CREATE TYPE simple_type ( user_number int ) """ session.execute(stmt) stmt = """ CREATE TABLE simple_table ( id uuid PRIMARY KEY, number frozen<simple_type> ) """ session.execute(stmt) # Make sure the schema propagate time.sleep(2) _id = uuid.uuid4() stmt = """ INSERT INTO simple_table (id, number) VALUES ({id}, {{user_number: 1}}); """.format(id=_id) session.execute(stmt) stmt = """ DROP TYPE simple_type; """ assert_invalid(session, stmt, 'Cannot drop user type user_type_dropping.simple_type as it is still used by table user_type_dropping.simple_table') # now that we've confirmed that a user type cannot be dropped while in use # let's remove the offending table # TODO: uncomment below after CASSANDRA-6472 is resolved # and add another check to make sure the table/type drops succeed stmt = """ DROP TABLE simple_table; """.format(id=_id) session.execute(stmt) stmt = """ DROP TYPE simple_type; """ session.execute(stmt) # now let's have a look at the system schema and make sure no user types are defined self.assertNoTypes(session)
def test_map_indexes(self): """ Checks that secondary indexes on maps work for querying on both keys and values """ cluster = self.cluster cluster.populate(1).start() [node1] = cluster.nodelist() session = self.patient_cql_connection(node1) self.create_ks(session, 'map_index_search', 1) stmt = ("CREATE TABLE map_index_search.users (" "user_id uuid PRIMARY KEY," "email text," "uuids map<uuid, uuid>);") session.execute(stmt) # add index on keys and query again (even though there are no rows in the table yet) stmt = "CREATE INDEX user_uuids on map_index_search.users (KEYS(uuids));" session.execute(stmt) stmt = "SELECT * from map_index_search.users where uuids contains key {some_uuid}".format(some_uuid=uuid.uuid4()) rows = list(session.execute(stmt)) self.assertEqual(0, len(rows)) # add a row which doesn't specify data for the indexed column, and query again user1_uuid = uuid.uuid4() stmt = ("INSERT INTO map_index_search.users (user_id, email)" "values ({user_id}, '*****@*****.**')" ).format(user_id=user1_uuid) session.execute(stmt) stmt = ("SELECT * from map_index_search.users where uuids contains key {some_uuid}").format(some_uuid=uuid.uuid4()) rows = list(session.execute(stmt)) self.assertEqual(0, len(rows)) _id = uuid.uuid4() # alter the row to add a single item to the indexed map stmt = ("UPDATE map_index_search.users set uuids = {{{id}:{user_id}}} where user_id = {user_id}" ).format(id=_id, user_id=user1_uuid) session.execute(stmt) stmt = ("SELECT * from map_index_search.users where uuids contains key {some_uuid}").format(some_uuid=_id) rows = list(session.execute(stmt)) self.assertEqual(1, len(rows)) # add a bunch of user records and query them back shared_uuid = uuid.uuid4() # this uuid will be on all records log = [] for i in range(50000): user_uuid = uuid.uuid4() unshared_uuid1 = uuid.uuid4() unshared_uuid2 = uuid.uuid4() # give each record a unique email address using the int index, add unique ids for keys and values stmt = ("INSERT INTO map_index_search.users (user_id, email, uuids)" "values ({user_uuid}, '{prefix}@example.com', {{{u_uuid1}:{u_uuid2}, {s_uuid}:{s_uuid}}})" ).format(user_uuid=user_uuid, prefix=i, s_uuid=shared_uuid, u_uuid1=unshared_uuid1, u_uuid2=unshared_uuid2) session.execute(stmt) log.append( {'user_id': user_uuid, 'email': str(i) + '@example.com', 'unshared_uuid1': unshared_uuid1, 'unshared_uuid2': unshared_uuid2} ) # confirm there is now 50k rows with the 'shared' uuid above in the secondary index stmt = ("SELECT * from map_index_search.users where uuids contains key {shared_uuid}" ).format(shared_uuid=shared_uuid) rows = session.execute(stmt) result = [row for row in rows] self.assertEqual(50000, len(result)) # shuffle the log in-place, and double-check a slice of records by querying the secondary index on keys random.shuffle(log) for log_entry in log[:1000]: stmt = ("SELECT user_id, email, uuids FROM map_index_search.users where uuids contains key {unshared_uuid1}" ).format(unshared_uuid1=log_entry['unshared_uuid1']) row = session.execute(stmt) result = list(row) rows = self.assertEqual(1, len(result)) db_user_id, db_email, db_uuids = result[0] self.assertEqual(db_user_id, log_entry['user_id']) self.assertEqual(db_email, log_entry['email']) self.assertTrue(shared_uuid in db_uuids) self.assertTrue(log_entry['unshared_uuid1'] in db_uuids) # attempt to add an index on map values as well (should fail pre 3.0) stmt = "CREATE INDEX user_uuids_values on map_index_search.users (uuids);" if self.cluster.version() < '3.0': if self.cluster.version() >= '2.2': matching = "Cannot create index on values\(uuids\): an index on keys\(uuids\) already exists and indexing a map on more than one dimension at the same time is not currently supported" else: matching = "Cannot create index on uuids values, an index on uuids keys already exists and indexing a map on both keys and values at the same time is not currently supported" assert_invalid(session, stmt, matching) else: session.execute(stmt) if self.cluster.version() < '3.0': # since cannot have index on map keys and values remove current index on keys stmt = "DROP INDEX user_uuids;" session.execute(stmt) # add index on values (will index rows added prior) stmt = "CREATE INDEX user_uuids_values on map_index_search.users (uuids);" session.execute(stmt) start = time.time() while time.time() < start + 30: debug("waiting for index to build") time.sleep(1) if index_is_built(node1, session, 'map_index_search', 'users', 'user_uuids_values'): break else: raise DtestTimeoutError() # shuffle the log in-place, and double-check a slice of records by querying the secondary index random.shuffle(log) time.sleep(10) # since we already inserted unique ids for values as well, check that appropriate records are found for log_entry in log[:1000]: stmt = ("SELECT user_id, email, uuids FROM map_index_search.users where uuids contains {unshared_uuid2}" ).format(unshared_uuid2=log_entry['unshared_uuid2']) rows = list(session.execute(stmt)) self.assertEqual(1, len(rows), rows) db_user_id, db_email, db_uuids = rows[0] self.assertEqual(db_user_id, log_entry['user_id']) self.assertEqual(db_email, log_entry['email']) self.assertTrue(shared_uuid in db_uuids) self.assertTrue(log_entry['unshared_uuid2'] in db_uuids.values())
def test_udf_overload(self): session = self.prepare(nodes=3) session.execute("CREATE TABLE tab (k text PRIMARY KEY, v int)") session.execute("INSERT INTO tab (k, v) VALUES ('foo' , 1);") # create overloaded udfs session.execute( "CREATE FUNCTION overloaded(v varchar) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'" ) session.execute( "CREATE OR REPLACE FUNCTION overloaded(i int) called on null input RETURNS text LANGUAGE java AS 'return \"f2\";'" ) session.execute( "CREATE OR REPLACE FUNCTION overloaded(v1 text, v2 text) called on null input RETURNS text LANGUAGE java AS 'return \"f3\";'" ) session.execute( "CREATE OR REPLACE FUNCTION overloaded(v ascii) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'" ) # ensure that works with correct specificity assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded('foo')") assert_none(session, "SELECT v FROM tab WHERE k = overloaded((text) 'foo')") assert_none(session, "SELECT v FROM tab WHERE k = overloaded((ascii) 'foo')") assert_none(session, "SELECT v FROM tab WHERE k = overloaded((varchar) 'foo')") # try non-existent functions assert_invalid(session, "DROP FUNCTION overloaded(boolean)") assert_invalid(session, "DROP FUNCTION overloaded(bigint)") # try dropping overloaded - should fail because ambiguous assert_invalid(session, "DROP FUNCTION overloaded") session.execute("DROP FUNCTION overloaded(varchar)") assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo')") session.execute("DROP FUNCTION overloaded(text, text)") assert_invalid( session, "SELECT v FROM tab WHERE k = overloaded((text)'foo',(text)'bar')") session.execute("DROP FUNCTION overloaded(ascii)") assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((ascii)'foo')") # should now work - unambiguous session.execute("DROP FUNCTION overloaded")