コード例 #1
0
    def test_dense_supercolumn(self):
        cursor = self.prepare(nodes=2, rf=2, row_factory=dict_factory)
        cluster = self.cluster

        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")
        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        _create_dense_super_cf(client, 'dense_super_1')

        for i in range(1, 3):
            client.insert('k1'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
            client.insert('k2'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)

        _validate_dense_cql(cursor)
        _validate_dense_thrift(client)

        if self.upgrade_is_version_4_or_greater():  # 4.0 doesn't support compact storage
            cursor.execute("ALTER TABLE ks.dense_super_1 DROP COMPACT STORAGE;")

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
            is_version_4_or_greater = node.get_cassandra_version() >= CASSANDRA_4_0
            if not is_version_4_or_greater:
                client = get_thrift_client(host, port)
                _validate_dense_thrift(client)
            _validate_dense_cql(cursor, is_version_4_or_greater=is_version_4_or_greater)
コード例 #2
0
    def test_dense_supercolumn_3_0_created(self):
        cluster = self.prepare(cassandra_version='github:apache/cassandra-3.0')
        node = self.cluster.nodelist()[0]
        cursor = self.patient_cql_connection(node, row_factory=dict_factory)

        cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")

        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        client.system_add_column_family(_create_dense_super_cf('dense_super_1'))

        for i in range(1, 3):
            client.insert('k1', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(100), 'value1', 0), ConsistencyLevel.ONE)
            client.insert('k2', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(200), 'value2', 0), ConsistencyLevel.ONE)

        _validate_dense_thrift(client, cf='dense_super_1')

        node.stop()
        self.set_node_to_current_version(node)
        node.set_configuration_options(values={'start_rpc': 'true'})
        node.start()

        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
        client = get_thrift_client(host, port)

        _validate_dense_thrift(client, cf='dense_super_1')
        _validate_dense_cql(cursor, cf='dense_super_1')
コード例 #3
0
    def test_dense_supercolumn_with_renames(self):
        cursor = self.prepare(row_factory=dict_factory)
        cluster = self.cluster

        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")

        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        client.system_add_column_family(_create_dense_super_cf('dense_super_2'))

        for i in range(1, 3):
            client.insert('k1', ColumnParent('dense_super_2', 'key{}'.format(i)), Column(_i64(100), 'value1', 0), ConsistencyLevel.ONE)
            client.insert('k2', ColumnParent('dense_super_2', 'key{}'.format(i)), Column(_i64(200), 'value2', 0), ConsistencyLevel.ONE)

        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME key TO renamed_key")
        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME column1 TO renamed_column1")
        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME column2 TO renamed_column2")
        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME value TO renamed_value")

        _validate_dense_cql(cursor, cf='dense_super_2', key='renamed_key', column1='renamed_column1', column2='renamed_column2', value='renamed_value')
        _validate_dense_thrift(client, cf='dense_super_2')

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
            client = get_thrift_client(host, port)
            _validate_dense_cql(cursor, cf='dense_super_2', key='renamed_key', column1='renamed_column1', column2='renamed_column2', value='renamed_value')
            _validate_dense_thrift(client, cf='dense_super_2')
コード例 #4
0
    def test_dense_supercolumn(self):
        cursor = self.prepare(nodes=2, rf=2, row_factory=dict_factory)
        cluster = self.cluster

        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")
        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        _create_dense_super_cf(client, 'dense_super_1')

        for i in range(1, 3):
            client.insert('k1'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
            client.insert('k2'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)

        _validate_dense_cql(cursor)
        _validate_dense_thrift(client)

        version_string = self.upgrade_version_string()
        is_version_4_or_greater = version_string == 'trunk' or version_string >= '4.0'
        #4.0 doesn't support compact storage
        if is_version_4_or_greater:
            cursor.execute("ALTER TABLE ks.dense_super_1 DROP COMPACT STORAGE;")

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
            if not is_version_4_or_greater:
                client = get_thrift_client(host, port)
                _validate_dense_thrift(client)
            _validate_dense_cql(cursor, is_version_4_or_greater=is_version_4_or_greater)
コード例 #5
0
    def test_sparse_supercolumn(self):
        cursor = self.prepare(row_factory=dict_factory)
        cluster = self.cluster

        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")

        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        cf = _create_sparse_super_cf('sparse_super_2')
        client.system_add_column_family(cf)

        for i in range(1, 3):
            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value1", _i64(100), 0), ConsistencyLevel.ONE)
            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1", _i64(200), 0), ConsistencyLevel.ONE)
            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2", _i64(300), 0), ConsistencyLevel.ONE)

            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value2", _i64(100), 0), ConsistencyLevel.ONE)
            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1", _i64(200), 0), ConsistencyLevel.ONE)
            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2", _i64(300), 0), ConsistencyLevel.ONE)

        _validate_sparse_thrift(client, cf='sparse_super_2')
        _validate_sparse_cql(cursor, cf='sparse_super_2')

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
            client = get_thrift_client(host, port)
            _validate_sparse_thrift(client, cf='sparse_super_2')
            _validate_sparse_cql(cursor, cf='sparse_super_2')
コード例 #6
0
    def test_dense_supercolumn(self):
        cluster = self.prepare()
        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")
        cursor = self.patient_cql_connection(node, row_factory=dict_factory)

        cursor.execute(
            "CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };"
        )

        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        _create_dense_super_cf(client, 'dense_super_1')

        for i in range(1, 3):
            client.insert(
                'k1'.encode(),
                ColumnParent('dense_super_1', 'key{}'.format(i).encode()),
                Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
            client.insert(
                'k2'.encode(),
                ColumnParent('dense_super_1', 'key{}'.format(i).encode()),
                Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)

        _validate_dense_thrift(client, cf='dense_super_1')
        _validate_dense_cql(cursor, cf='dense_super_1')

        self.upgrade_to_version('github:apache/cassandra-3.0')

        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
        client = get_thrift_client(host, port)

        _validate_dense_thrift(client, cf='dense_super_1')

        self.set_node_to_current_version(node)
        #4.0 doesn't support compact storage
        if node.get_cassandra_version() >= '4':
            cursor.execute(
                "ALTER TABLE ks.dense_super_1 DROP COMPACT STORAGE;")

        node.stop()
        if node.get_cassandra_version() < '4':
            node.set_configuration_options(values={'start_rpc': 'true'})
        node.start()

        if node.get_cassandra_version() < '4':
            client = get_thrift_client(host, port)
            _validate_dense_thrift(client, cf='dense_super_1')

        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
        _validate_dense_cql(
            cursor,
            cf='dense_super_1',
            is_version_4_or_greater=node.get_cassandra_version() >=
            CASSANDRA_4_0)
コード例 #7
0
    def test_sparse_supercolumn(self):
        cluster = self.prepare()
        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")
        cursor = self.patient_cql_connection(node, row_factory=dict_factory)

        cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")

        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        _create_sparse_super_cf(client, 'sparse_super_2')

        for i in range(1, 3):
            client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("value1".encode(), _i64(100), 0), ConsistencyLevel.ONE)
            client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
            client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)

            client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("value2".encode(), _i64(100), 0), ConsistencyLevel.ONE)
            client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
            client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)

        _validate_sparse_thrift(client, cf='sparse_super_2')
        _validate_sparse_cql(cursor, cf='sparse_super_2')

        self.upgrade_to_version('github:apache/cassandra-3.0')

        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
        client = get_thrift_client(host, port)

        _validate_sparse_thrift(client, cf='sparse_super_2')

        self.set_node_to_current_version(node)
        is_version_4_or_greater = node.get_cassandra_version() >= '4'
        #4.0 doesn't support compact storage
        if is_version_4_or_greater:
            cursor.execute("ALTER TABLE ks.sparse_super_2 DROP COMPACT STORAGE;")

        node.stop()
        if not is_version_4_or_greater:
            node.set_configuration_options(values={'start_rpc': 'true'})
        node.start()

        if not is_version_4_or_greater:
            client = get_thrift_client(host, port)
            _validate_sparse_thrift(client, cf='sparse_super_2')

        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
        _validate_sparse_cql(cursor, cf='sparse_super_2', is_version_4_or_greater=is_version_4_or_greater)
コード例 #8
0
    def prepare(self, start_version, num_nodes=1, rf=1):
        """
        Prepare the test, starting a cluster on the initial version, creating
        a keyspace (named 'ks') and returning a CQL and a thrift connection to
        the first node (and set on the created keyspace).
        :param start_version: the version to set the node at initially.
        :param num_nodes: the number of nodes to use.
        :param rf: replication factor for the keyspace created.
        :return: a pair (cql, thrift) of a CQL connection and an open thrift
            connection to the first node in the cluster.
        """
        self.cluster.set_install_dir(version=start_version)
        self.fixture_dtest_setup.reinitialize_cluster_for_different_version()

        self.cluster.populate(num_nodes)
        for node in self.cluster.nodelist():
            node.set_configuration_options(values={'start_rpc': 'true', 'enable_drop_compact_storage': 'true'})

        self.cluster.start()
        logger.debug("Started node on %s", start_version)

        node = self.cluster.nodelist()[0]
        cql = self.patient_cql_connection(node)

        cql.execute("CREATE KEYSPACE ks WITH replication = {{ 'class': 'SimpleStrategy', 'replication_factor': '{}' }}".format(rf))
        cql.execute("USE ks")

        host, port = node.network_interfaces['thrift']
        thrift = get_thrift_client(host, port)
        thrift.transport.open()
        thrift.set_keyspace('ks')
        return cql, thrift
コード例 #9
0
    def test_upgrade_with_range_tombstone_eoc_0(self):
        """
        Check sstable upgrading when the sstable contains a range tombstone with EOC=0.

        @jira_ticket CASSANDRA-12423
        """
        session = self._setup_cluster(cluster_options={'start_rpc': 'true'})

        session.execute("CREATE TABLE rt (id INT, c1 TEXT, c2 TEXT, v INT, PRIMARY KEY (id, c1, c2)) "
                        "with compact storage and compression = {'sstable_compression': ''};")

        range_delete = {
            i32(1): {
                'rt': [Mutation(deletion=Deletion(2470761440040513,
                                                  predicate=SlicePredicate(slice_range=SliceRange(
                                                      start=composite('a', eoc=b'\x00'),
                                                      finish=composite('asd', eoc=b'\x00')))))]
            }
        }

        client = get_thrift_client()
        client.transport.open()
        client.set_keyspace('ks')
        client.batch_mutate(range_delete, ConsistencyLevel.ONE)
        client.transport.close()

        session.execute("INSERT INTO rt (id, c1, c2, v) VALUES (1, 'asd', '', 0) USING TIMESTAMP 1470761451368658")
        session.execute("INSERT INTO rt (id, c1, c2, v) VALUES (1, 'asd', 'asd', 0) USING TIMESTAMP 1470761449416613")

        session = self._do_upgrade()

        ret = list(session.execute('SELECT * FROM rt'))
        assert_length_equal(ret, 2)
コード例 #10
0
    def test_upgrade_with_range_tombstone_eoc_0(self):
        """
        Check sstable upgrading when the sstable contains a range tombstone with EOC=0.

        @jira_ticket CASSANDRA-12423
        """
        session = self._setup_cluster(cluster_options={'start_rpc': 'true'})

        session.execute("CREATE TABLE rt (id INT, c1 TEXT, c2 TEXT, v INT, PRIMARY KEY (id, c1, c2)) "
                        "with compact storage and compression = {'sstable_compression': ''};")

        range_delete = {
            i32(1): {
                'rt': [Mutation(deletion=Deletion(2470761440040513,
                                                  predicate=SlicePredicate(slice_range=SliceRange(
                                                      start=composite('a', eoc='\x00'),
                                                      finish=composite('asd', eoc='\x00')))))]
            }
        }

        client = get_thrift_client()
        client.transport.open()
        client.set_keyspace('ks')
        client.batch_mutate(range_delete, ConsistencyLevel.ONE)
        client.transport.close()

        session.execute("INSERT INTO rt (id, c1, c2, v) VALUES (1, 'asd', '', 0) USING TIMESTAMP 1470761451368658")
        session.execute("INSERT INTO rt (id, c1, c2, v) VALUES (1, 'asd', 'asd', 0) USING TIMESTAMP 1470761449416613")

        session = self._do_upgrade()

        ret = list(session.execute('SELECT * FROM rt'))
        assert_length_equal(ret, 2)
コード例 #11
0
    def test_sparse_supercolumn_with_renames(self):
        cursor = self.prepare(row_factory=dict_factory)
        cluster = self.cluster

        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")

        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        _create_sparse_super_cf(client, 'sparse_super_1')

        cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME key TO renamed_key")
        cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME column1 TO renamed_column1")

        for i in range(1, 3):
            client.insert('k1'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("value1".encode(), _i64(100), 0), ConsistencyLevel.ONE)
            client.insert('k1'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
            client.insert('k1'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)

            client.insert('k2'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("value2".encode(), _i64(100), 0), ConsistencyLevel.ONE)
            client.insert('k2'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
            client.insert('k2'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)

        _validate_sparse_thrift(client)
        _validate_sparse_cql(cursor, column1='renamed_column1', key='renamed_key')

        version_string = self.upgrade_version_string()
        is_version_4_or_greater = version_string == 'trunk' or version_string >= '4.0'
        #4.0 doesn't support compact storage
        if is_version_4_or_greater:
            cursor.execute("ALTER TABLE ks.sparse_super_1 DROP COMPACT STORAGE;")

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
            logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
            if not is_version_4_or_greater:
                client = get_thrift_client(host, port)
                _validate_sparse_thrift(client)
            _validate_sparse_cql(cursor, column1='renamed_column1', key='renamed_key', is_version_4_or_greater=is_version_4_or_greater)
コード例 #12
0
    def verify_with_thrift(self):
        # No more thrift in 4.0
        if self.cluster.version() >= '4':
            return

        node = self.cluster.nodelist()[0]
        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()
        client.set_keyspace('supcols')
        p = SlicePredicate(slice_range=SliceRange(''.encode(), ''.encode(), False, 1000))
        for name in NAMES:
            super_col_value = client.get_slice(name, ColumnParent("cols"), p, ConsistencyLevel.ONE)
            logger.debug("get_slice(%s) returned %s" % (name, super_col_value))
            assert name == super_col_value[0].column.value
コード例 #13
0
    def verify_with_thrift(self):
        # No more thrift in 4.0
        if self.cluster.version() >= '4':
            return

        node = self.cluster.nodelist()[0]
        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()
        client.set_keyspace('supcols')
        p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
        for name in NAMES:
            super_col_value = client.get_slice(name, ColumnParent("cols"), p,
                                               ConsistencyLevel.ONE)
            logger.debug("get_slice(%s) returned %s" % (name, super_col_value))
            assert name == super_col_value[0].column.value
コード例 #14
0
    def test_thrift(self):
        """
        A thrift client receives a TimedOutException
        """
        self._prepare_cluster(start_rpc=True)
        self.expected_expt = thrift_types.TimedOutException

        client = get_thrift_client()
        client.transport.open()
        client.set_keyspace(KEYSPACE)

        with pytest.raises(self.expected_expt):
            client.insert('key1'.encode(),
                          thrift_types.ColumnParent('mytable'),
                          thrift_types.Column('value'.encode(), 'Value 1'.enocde(), 0),
                          thrift_types.ConsistencyLevel.ALL)

        client.transport.close()
コード例 #15
0
    def test_thrift(self):
        """
        A thrift client receives a TimedOutException
        """
        self._prepare_cluster(start_rpc=True, compact_storage=True)
        self.expected_expt = thrift_types.TimedOutException

        client = get_thrift_client()
        client.transport.open()
        client.set_keyspace(KEYSPACE)

        with pytest.raises(self.expected_expt):
            client.insert(
                'key1'.encode(), thrift_types.ColumnParent('mytable'),
                thrift_types.Column('value'.encode(), 'Value 1'.encode(), 0),
                thrift_types.ConsistencyLevel.ALL)

        client.transport.close()
コード例 #16
0
 def make_connection():
     host, port = node1.network_interfaces['thrift']
     client = get_thrift_client(host, port)
     client.transport.open()
     return client
コード例 #17
0
    def test_sc_with_row_cache(self):
        """ Test for bug reported in #4190 """
        cluster = self.cluster

        cluster.populate(1).start()
        node1 = cluster.nodelist()[0]
        self.patient_cql_connection(node1)

        node = self.cluster.nodelist()[0]
        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()

        ksdef = KsDef()
        ksdef.name = 'ks'
        ksdef.strategy_class = 'SimpleStrategy'
        ksdef.strategy_options = {'replication_factor': '1'}
        ksdef.cf_defs = []

        client.system_add_keyspace(ksdef)
        client.set_keyspace('ks')

        # create a super column family with UTF8 for all types
        cfdef = CfDef()
        cfdef.keyspace = 'ks'
        cfdef.name = 'Users'
        cfdef.column_type = 'Super'
        cfdef.comparator_type = 'UTF8Type'
        cfdef.subcomparator_type = 'UTF8Type'
        cfdef.key_validation_class = 'UTF8Type'
        cfdef.default_validation_class = 'UTF8Type'
        cfdef.caching = 'rows_only'

        client.system_add_column_family(cfdef)

        column = Column(name='name'.encode(),
                        value='Mina'.encode(),
                        timestamp=100)
        client.batch_mutate(
            {
                'mina'.encode(): {
                    'Users': [
                        Mutation(
                            ColumnOrSuperColumn(super_column=SuperColumn(
                                'attrs'.encode(), [column])))
                    ]
                }
            }, ThriftConsistencyLevel.ONE)

        column_parent = ColumnParent(column_family='Users')
        predicate = SlicePredicate(
            slice_range=SliceRange("".encode(), "".encode(), False, 100))
        super_columns = client.get_slice('mina'.encode(), column_parent,
                                         predicate, ThriftConsistencyLevel.ONE)
        assert 1 == len(super_columns)
        super_column = super_columns[0].super_column
        assert 'attrs'.encode() == super_column.name
        assert 1 == len(super_column.columns)
        assert 'name'.encode() == super_column.columns[0].name
        assert 'Mina'.encode() == super_column.columns[0].value

        # add a 'country' subcolumn
        column = Column(name='country'.encode(),
                        value='Canada'.encode(),
                        timestamp=100)
        client.batch_mutate(
            {
                'mina'.encode(): {
                    'Users': [
                        Mutation(
                            ColumnOrSuperColumn(super_column=SuperColumn(
                                'attrs'.encode(), [column])))
                    ]
                }
            }, ThriftConsistencyLevel.ONE)

        super_columns = client.get_slice('mina'.encode(), column_parent,
                                         predicate, ThriftConsistencyLevel.ONE)
        assert 1 == len(super_columns)
        super_column = super_columns[0].super_column
        assert 'attrs'.encode() == super_column.name
        assert 2 == len(super_column.columns)

        assert 'country'.encode() == super_column.columns[0].name
        assert 'Canada'.encode() == super_column.columns[0].value

        assert 'name'.encode() == super_column.columns[1].name
        assert 'Mina'.encode() == super_column.columns[1].value

        # add a 'region' subcolumn
        column = Column(name='region'.encode(),
                        value='Quebec'.encode(),
                        timestamp=100)
        client.batch_mutate(
            {
                'mina'.encode(): {
                    'Users': [
                        Mutation(
                            ColumnOrSuperColumn(super_column=SuperColumn(
                                'attrs'.encode(), [column])))
                    ]
                }
            }, ThriftConsistencyLevel.ONE)

        super_columns = client.get_slice('mina'.encode(), column_parent,
                                         predicate, ThriftConsistencyLevel.ONE)
        assert 1 == len(super_columns)
        super_column = super_columns[0].super_column
        assert 'attrs'.encode() == super_column.name
        assert 3 == len(super_column.columns)

        assert 'country'.encode() == super_column.columns[0].name
        assert 'Canada'.encode() == super_column.columns[0].value

        assert 'name'.encode() == super_column.columns[1].name
        assert 'Mina'.encode() == super_column.columns[1].value

        assert 'region'.encode() == super_column.columns[2].name
        assert 'Quebec'.encode() == super_column.columns[2].value
コード例 #18
0
    def test_dense_supercolumn_with_renames(self):
        cursor = self.prepare(row_factory=dict_factory)
        cluster = self.cluster

        node = self.cluster.nodelist()[0]
        node.nodetool("enablethrift")

        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)

        client.transport.open()
        client.set_keyspace('ks')

        _create_dense_super_cf(client, 'dense_super_2')

        for i in range(1, 3):
            client.insert(
                'k1'.encode(),
                ColumnParent('dense_super_2', 'key{}'.format(i).encode()),
                Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
            client.insert(
                'k2'.encode(),
                ColumnParent('dense_super_2', 'key{}'.format(i).encode()),
                Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)

        cursor.execute(
            "ALTER TABLE ks.dense_super_2 RENAME key TO renamed_key")
        cursor.execute(
            "ALTER TABLE ks.dense_super_2 RENAME column1 TO renamed_column1")
        cursor.execute(
            "ALTER TABLE ks.dense_super_2 RENAME column2 TO renamed_column2")
        cursor.execute(
            "ALTER TABLE ks.dense_super_2 RENAME value TO renamed_value")

        _validate_dense_cql(cursor,
                            cf='dense_super_2',
                            key='renamed_key',
                            column1='renamed_column1',
                            column2='renamed_column2',
                            value='renamed_value')
        _validate_dense_thrift(client, cf='dense_super_2')

        if self.upgrade_is_version_4_or_greater(
        ):  # 4.0 doesn't support compact storage
            cursor.execute(
                "ALTER TABLE ks.dense_super_2 DROP COMPACT STORAGE;")

        for is_upgraded, cursor in self.do_upgrade(cursor,
                                                   row_factory=dict_factory,
                                                   use_thrift=True):
            logger.debug("Querying {} node".format(
                "upgraded" if is_upgraded else "old"))
            if not is_version_4_or_greater:
                client = get_thrift_client(host, port)
                _validate_dense_thrift(client, cf='dense_super_2')
            _validate_dense_cql(
                cursor,
                cf='dense_super_2',
                key='renamed_key',
                column1='renamed_column1',
                column2='renamed_column2',
                value='renamed_value',
                is_version_4_or_greater=is_version_4_or_greater)
コード例 #19
0
    def test_functional(self):
        NUM_SUBCOLS = 100
        NUM_ADDS = 100

        cluster = self.cluster
        cluster.populate(3).start()
        node1 = cluster.nodelist()[0]

        time.sleep(.5)
        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        time.sleep(1)  # wait for propagation

        # create the columnfamily using thrift
        host, port = node1.network_interfaces['thrift']
        thrift_conn = get_thrift_client(host, port)
        thrift_conn.transport.open()
        thrift_conn.set_keyspace('ks')
        cf_def = CfDef(keyspace='ks',
                       name='cf',
                       column_type='Super',
                       default_validation_class='CounterColumnType')
        thrift_conn.system_add_column_family(cf_def)

        # let the sediment settle to to the bottom before drinking...
        time.sleep(2)

        for subcol in range(NUM_SUBCOLS):
            for add in range(NUM_ADDS):
                column_parent = ColumnParent(column_family='cf',
                                             super_column=('subcol_%d' %
                                                           subcol).encode())
                counter_column = CounterColumn('col_0'.encode(), 1)
                thrift_conn.add('row_0'.encode(), column_parent,
                                counter_column, ConsistencyLevel.QUORUM)
        time.sleep(1)
        cluster.flush()

        logger.debug("Stopping cluster")
        cluster.stop()
        time.sleep(5)
        logger.debug("Starting cluster")
        cluster.start()
        time.sleep(5)

        thrift_conn = get_thrift_client(host, port)
        thrift_conn.transport.open()
        thrift_conn.set_keyspace('ks')

        from_db = []

        for i in range(NUM_SUBCOLS):
            column_path = ColumnPath(column_family='cf',
                                     column='col_0'.encode(),
                                     super_column=(('subcol_%d' % i).encode()))
            column_or_super_column = thrift_conn.get('row_0'.encode(),
                                                     column_path,
                                                     ConsistencyLevel.QUORUM)
            val = column_or_super_column.counter_column.value
            logger.debug(str(val)),
            from_db.append(val)
        logger.debug("")

        expected = [NUM_ADDS for i in range(NUM_SUBCOLS)]

        if from_db != expected:
            raise Exception(
                "Expected a bunch of the same values out of the db. Got this: "
                + str(from_db))
コード例 #20
0
    def test_sc_with_row_cache(self):
        """ Test for bug reported in #4190 """
        cluster = self.cluster

        cluster.populate(1).start()
        node1 = cluster.nodelist()[0]
        self.patient_cql_connection(node1)

        node = self.cluster.nodelist()[0]
        host, port = node.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()

        ksdef = KsDef()
        ksdef.name = 'ks'
        ksdef.strategy_class = 'SimpleStrategy'
        ksdef.strategy_options = {'replication_factor': '1'}
        ksdef.cf_defs = []

        client.system_add_keyspace(ksdef)
        client.set_keyspace('ks')

        # create a super column family with UTF8 for all types
        cfdef = CfDef()
        cfdef.keyspace = 'ks'
        cfdef.name = 'Users'
        cfdef.column_type = 'Super'
        cfdef.comparator_type = 'UTF8Type'
        cfdef.subcomparator_type = 'UTF8Type'
        cfdef.key_validation_class = 'UTF8Type'
        cfdef.default_validation_class = 'UTF8Type'
        cfdef.caching = 'rows_only'

        client.system_add_column_family(cfdef)

        column = Column(name='name'.encode(), value='Mina'.encode(), timestamp=100)
        client.batch_mutate(
            {'mina'.encode(): {'Users': [Mutation(ColumnOrSuperColumn(super_column=SuperColumn('attrs'.encode(), [column])))]}},
            ThriftConsistencyLevel.ONE)

        column_parent = ColumnParent(column_family='Users')
        predicate = SlicePredicate(slice_range=SliceRange("".encode(), "".encode(), False, 100))
        super_columns = client.get_slice('mina'.encode(), column_parent, predicate, ThriftConsistencyLevel.ONE)
        assert 1 == len(super_columns)
        super_column = super_columns[0].super_column
        assert 'attrs'.encode() == super_column.name
        assert 1 == len(super_column.columns)
        assert 'name'.encode() == super_column.columns[0].name
        assert 'Mina'.encode() == super_column.columns[0].value

        # add a 'country' subcolumn
        column = Column(name='country'.encode(), value='Canada'.encode(), timestamp=100)
        client.batch_mutate(
            {'mina'.encode(): {'Users': [Mutation(ColumnOrSuperColumn(super_column=SuperColumn('attrs'.encode(), [column])))]}},
            ThriftConsistencyLevel.ONE)

        super_columns = client.get_slice('mina'.encode(), column_parent, predicate, ThriftConsistencyLevel.ONE)
        assert 1 == len(super_columns)
        super_column = super_columns[0].super_column
        assert 'attrs'.encode() == super_column.name
        assert 2 == len(super_column.columns)

        assert 'country'.encode() == super_column.columns[0].name
        assert 'Canada'.encode() == super_column.columns[0].value

        assert 'name'.encode() == super_column.columns[1].name
        assert 'Mina'.encode() == super_column.columns[1].value

        # add a 'region' subcolumn
        column = Column(name='region'.encode(), value='Quebec'.encode(), timestamp=100)
        client.batch_mutate(
            {'mina'.encode(): {'Users': [Mutation(ColumnOrSuperColumn(super_column=SuperColumn('attrs'.encode(), [column])))]}},
            ThriftConsistencyLevel.ONE)

        super_columns = client.get_slice('mina'.encode(), column_parent, predicate, ThriftConsistencyLevel.ONE)
        assert 1 == len(super_columns)
        super_column = super_columns[0].super_column
        assert 'attrs'.encode() == super_column.name
        assert 3 == len(super_column.columns)

        assert 'country'.encode() == super_column.columns[0].name
        assert 'Canada'.encode() == super_column.columns[0].value

        assert 'name'.encode() == super_column.columns[1].name
        assert 'Mina'.encode() == super_column.columns[1].value

        assert 'region'.encode() == super_column.columns[2].name
        assert 'Quebec'.encode() == super_column.columns[2].value
コード例 #21
0
    def test_functional(self):
        NUM_SUBCOLS = 100
        NUM_ADDS = 100

        cluster = self.cluster
        cluster.populate(3).start()
        node1 = cluster.nodelist()[0]

        time.sleep(.5)
        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 3)
        time.sleep(1)  # wait for propagation

        # create the columnfamily using thrift
        host, port = node1.network_interfaces['thrift']
        thrift_conn = get_thrift_client(host, port)
        thrift_conn.transport.open()
        thrift_conn.set_keyspace('ks')
        cf_def = CfDef(keyspace='ks', name='cf', column_type='Super',
                       default_validation_class='CounterColumnType')
        thrift_conn.system_add_column_family(cf_def)

        # let the sediment settle to to the bottom before drinking...
        time.sleep(2)

        for subcol in range(NUM_SUBCOLS):
            for add in range(NUM_ADDS):
                column_parent = ColumnParent(column_family='cf',
                                             super_column=('subcol_%d' % subcol).encode())
                counter_column = CounterColumn('col_0'.encode(), 1)
                thrift_conn.add('row_0'.encode(), column_parent, counter_column,
                                ConsistencyLevel.QUORUM)
        time.sleep(1)
        cluster.flush()

        logger.debug("Stopping cluster")
        cluster.stop()
        time.sleep(5)
        logger.debug("Starting cluster")
        cluster.start()
        time.sleep(5)

        thrift_conn = get_thrift_client(host, port)
        thrift_conn.transport.open()
        thrift_conn.set_keyspace('ks')

        from_db = []

        for i in range(NUM_SUBCOLS):
            column_path = ColumnPath(column_family='cf', column='col_0'.encode(),
                                     super_column=(('subcol_%d' % i).encode()))
            column_or_super_column = thrift_conn.get('row_0'.encode(), column_path,
                                                     ConsistencyLevel.QUORUM)
            val = column_or_super_column.counter_column.value
            logger.debug(str(val)),
            from_db.append(val)
        logger.debug("")

        expected = [NUM_ADDS for i in range(NUM_SUBCOLS)]

        if from_db != expected:
            raise Exception("Expected a bunch of the same values out of the db. Got this: " + str(from_db))
コード例 #22
0
 def make_connection():
     host, port = node1.network_interfaces['thrift']
     client = get_thrift_client(host, port)
     client.transport.open()
     return client