def _create_dense_super_cf(thrift, name):
    cfdef = Cassandra.CfDef('ks', name, column_type='Super',
                           key_validation_class='AsciiType',        # pk
                           comparator_type='AsciiType',             # ck
                           default_validation_class='AsciiType',    # SC value
                           subcomparator_type='LongType')           # SC key
    thrift.system_add_column_family(cfdef)
    wait_for_agreement(thrift)
def _create_dense_super_cf(thrift, name):
    cfdef = Cassandra.CfDef('ks', name, column_type='Super',
                           key_validation_class='AsciiType',        # pk
                           comparator_type='AsciiType',             # ck
                           default_validation_class='AsciiType',    # SC value
                           subcomparator_type='LongType')           # SC key
    thrift.system_add_column_family(cfdef)
    wait_for_agreement(thrift)
def _create_sparse_super_cf(thrift, name):
    cd1 = ColumnDef('col1'.encode(), 'LongType', None, None)
    cd2 = ColumnDef('col2'.encode(), 'LongType', None, None)
    cfdef = Cassandra.CfDef('ks', name, column_type='Super',
                           column_metadata=[cd1, cd2],
                           key_validation_class='AsciiType',
                           comparator_type='AsciiType',
                           subcomparator_type='AsciiType')
    thrift.system_add_column_family(cfdef)
    wait_for_agreement(thrift)
def _create_sparse_super_cf(thrift, name):
    cd1 = ColumnDef('col1'.encode(), 'LongType', None, None)
    cd2 = ColumnDef('col2'.encode(), 'LongType', None, None)
    cfdef = Cassandra.CfDef('ks', name, column_type='Super',
                           column_metadata=[cd1, cd2],
                           key_validation_class='AsciiType',
                           comparator_type='AsciiType',
                           subcomparator_type='AsciiType')
    thrift.system_add_column_family(cfdef)
    wait_for_agreement(thrift)
    def _test_keys_index_3_x_created(self, from_version):
        cql, thrift = self.prepare(start_version=from_version,
                                   num_nodes=3,
                                   rf=3)

        # Create a table with a KEYS index. This can only be done from thrift.
        logger.debug("Creating table with index from thrift")

        indexed_column = ColumnDef('c1'.encode(), 'UTF8Type', IndexType.KEYS, 'idx')
        other_column = ColumnDef('c2'.encode(), 'UTF8Type', None, None)
        table_def = Cassandra.CfDef(
            'ks',
            'ti',
            key_validation_class='UTF8Type',
            comparator_type='UTF8Type',
            default_validation_class='UTF8Type',
            column_metadata=[indexed_column, other_column],
        )
        thrift.system_add_column_family(table_def)
        logger.debug("Waiting for schema agreement")
        wait_for_agreement(thrift)

        # We're going to insert and delete some rows, and need to validate the
        # indexed entries are what we expect. To make this easier, we define
        # _insert and _delete methods that not only insert/delete the provided
        # rows, but also keep track of all the entries whose 'c1 == v1' in
        # `expected_entries`, as that is the index value we'll use for
        # validation.

        expected_entries = []

        def _insert(connection, r):
            logger.debug("Inserting %s", r)
            q = "INSERT INTO ti(key, c1, c2) VALUES ('{}', '{}', '{}')".format(r[0], r[1], r[2])
            connection.execute(SimpleStatement(q, consistency_level=ConsistencyLevel.QUORUM))
            if r[1] == 'v1':
                expected_entries.append(r)

        def _delete(connection, r):
            logger.debug("Deleting %s", r)
            q = "DELETE FROM ti WHERE key='{}'".format(r[0])
            connection.execute(SimpleStatement(q, consistency_level=ConsistencyLevel.QUORUM))
            if r[1] == 'v1':
                expected_entries.remove(r)

        def _validate_entries(connection):
            logger.debug("Expecting entries %s", expected_entries)
            assert_all(connection, "SELECT key, c2 FROM ti WHERE c1='v1'",
                       [[key, c2] for [key, _, c2] in expected_entries],
                       ignore_order=True, cl=ConsistencyLevel.QUORUM)

        to_insert = [
            ['k0', 'v1', 'goo'],
            ['k1', 'v1', 'foo'],
            ['k2', 'v2', 'bar'],
            ['k3', 'v1', 'baz'],
            ['k4', 'v3', 'oof'],
            ['k5', 'v0', 'zab'],
        ]
        for row in to_insert:
            _insert(cql, row)

        # Sanity check that we can query the index properly
        logger.debug("Checking index before upgrade")
        _validate_entries(cql)

        # Delete one entry, so we test upgrade with a tombstone in
        _delete(cql, to_insert[1])
        _validate_entries(cql)

        # Before upgrading, we need to DROP COMPACT first, or this won't work.
        cql.execute("ALTER TABLE ti DROP COMPACT STORAGE")

        # Let's make sure our DROP COMPACT STORAGE didn't break our index even
        # before upgrade.
        _validate_entries(cql)

        # At every step, we'll add a few entries and ensure we can query the
        # index. Specifically, each node will add 4 keys, 2 indexed, 2 non
        # indexed, query from all nodes, then remove one of the indexed entry
        # and query again.
        def _after_upgrade(idx, client):
            logger.debug("Checking index after upgrade of node %i", idx)

            added = []
            for i in range(4):
                key = 'k{}{}'.format(idx, i)
                c1 = 'v1' if i % 2 == 0 else 'v2'
                c2 = 'val{}{}'.format(idx, i)
                to_add = [key, c1, c2]
                _insert(client, to_add)
                added.append(to_add)

            # Test querying from every node, so we hit both upgraded and
            # non-upgraded in general
            for idx, node in enumerate(self.cluster.nodelist(), start=1):
                _validate_entries(self._connect(node))

            _delete(client, added[0])

            for idx, node in enumerate(self.cluster.nodelist(), start=1):
                _validate_entries(self._connect(node))

        self._do_rolling_upgrade(_after_upgrade)