def bootstrap(node, data_center=None, token=None):
    log.debug('called bootstrap('
              'node={node}, data_center={data_center}, '
              'token={token})')
    node_instance = Node('node%s' % node,
                         get_cluster(),
                         auto_bootstrap=False,
                         thrift_interface=(IP_FORMAT % node, 9160),
                         storage_interface=(IP_FORMAT % node, 7000),
                         binary_interface=(IP_FORMAT % node, 9042),
                         jmx_port=str(7000 + 100 * node),
                         remote_debug_port=0,
                         initial_token=token if token else node * 10)
    get_cluster().add(node_instance, is_seed=False, data_center=data_center)

    try:
        start(node)
    except Exception as e0:
        log.debug('failed 1st bootstrap attempt with: \n{}'.format(e0))
        # Try only twice
        try:
            start(node)
        except Exception as e1:
            log.debug('failed 2nd bootstrap attempt with: \n{}'.format(e1))
            log.error('Added node failed to start twice.')
            raise e1
Beispiel #2
0
    def test_connection_error(self):
        """
        Trigger and ensure connection_errors are counted
        Stop all node with the driver knowing about the "DOWN" states.
        """
        # Test writes
        for i in range(0, 100):
            self.session.execute_async(
                "INSERT INTO test (k, v) VALUES ({0}, {1})".format(i, i))

        # Stop the cluster
        get_cluster().stop(wait=True, gently=False)

        try:
            # Ensure the nodes are actually down
            query = SimpleStatement("SELECT * FROM test",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(NoHostAvailable):
                self.session.execute(query)
        finally:
            get_cluster().start(wait_for_binary_proto=True,
                                wait_other_notice=True)
            # Give some time for the cluster to come back up, for the next test
            time.sleep(5)

        self.assertGreater(self.cluster.metrics.stats.connection_errors, 0)
    def test_connection_error(self):
        """
        Trigger and ensure connection_errors are counted
        """

        cluster = Cluster(metrics_enabled=True)
        session = cluster.connect()
        session.execute("USE test3rf")

        # Test writes
        for i in range(0, 100):
            session.execute_async(
                """
                INSERT INTO test3rf.test (k, v) VALUES (%s, %s)
                """ % (i, i))

        # Force kill cluster
        get_cluster().stop(wait=True, gently=False)
        try:
            # Ensure the nodes are actually down
            self.assertRaises(NoHostAvailable, session.execute, "USE test3rf")
        finally:
            get_cluster().start(wait_for_binary_proto=True)

        self.assertGreater(cluster.metrics.stats.connection_errors, 0)
Beispiel #4
0
def setup_cluster_ssl(client_auth=False):
    """
    We need some custom setup for this module. This will start the ccm cluster with basic
    ssl connectivity, and client authentication if needed.
    """

    use_single_node(start=False)
    ccm_cluster = get_cluster()
    ccm_cluster.stop()

    # Configure ccm to use ssl.
    config_options = {
        'client_encryption_options': {
            'enabled': True,
            'keystore': SERVER_KEYSTORE_PATH,
            'keystore_password': DEFAULT_PASSWORD
        }
    }

    if (client_auth):
        client_encyrption_options = config_options['client_encryption_options']
        client_encyrption_options['require_client_auth'] = True
        client_encyrption_options['truststore'] = SERVER_TRUSTSTORE_PATH
        client_encyrption_options['truststore_password'] = DEFAULT_PASSWORD

    ccm_cluster.set_configuration_options(config_options)
    start_cluster_wait_for_up(ccm_cluster)
Beispiel #5
0
def bootstrap(node, data_center=None, token=None):
    log.debug('called bootstrap('
              'node={node}, data_center={data_center}, '
              'token={token})')
    cluster = get_cluster()
    # for now assumes cluster has at least one node
    node_type = type(next(iter(cluster.nodes.values())))
    node_instance = node_type(
        'node%s' % node,
        cluster,
        auto_bootstrap=False,
        thrift_interface=(IP_FORMAT % node, 9160),
        storage_interface=(IP_FORMAT % node, 7000),
        binary_interface=(IP_FORMAT % node, 9042),
        jmx_port=str(7000 + 100 * node),
        remote_debug_port=0,
        initial_token=token if token else node * 10
    )
    cluster.add(node_instance, is_seed=False, data_center=data_center)

    try:
        node_instance.start()
    except Exception as e0:
        log.debug('failed 1st bootstrap attempt with: \n{}'.format(e0))
        # Try only twice
        try:
            node_instance.start()
        except Exception as e1:
            log.debug('failed 2nd bootstrap attempt with: \n{}'.format(e1))
            log.error('Added node failed to start twice.')
            raise e1
Beispiel #6
0
def setup_cluster_ssl(client_auth=False):
    """
    We need some custom setup for this module. This will start the ccm cluster with basic
    ssl connectivity, and client authenticiation if needed.
    """

    use_single_node(start=False)
    ccm_cluster = get_cluster()
    ccm_cluster.stop()

    # Fetch the absolute path to the keystore for ccm.
    abs_path_server_keystore_path = os.path.abspath(SERVER_KEYSTORE_PATH)

    # Configure ccm to use ssl.

    config_options = {
        'client_encryption_options': {
            'enabled': True,
            'keystore': abs_path_server_keystore_path,
            'keystore_password': DEFAULT_PASSWORD
        }
    }

    if (client_auth):
        abs_path_server_truststore_path = os.path.abspath(
            SERVER_TRUSTSTORE_PATH)
        client_encyrption_options = config_options['client_encryption_options']
        client_encyrption_options['require_client_auth'] = True
        client_encyrption_options[
            'truststore'] = abs_path_server_truststore_path
        client_encyrption_options['truststore_password'] = DEFAULT_PASSWORD

    ccm_cluster.set_configuration_options(config_options)
    ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
Beispiel #7
0
def teardown_module():
    """
    The rest of the tests don't need ssl enabled, remove the cluster so as to not interfere with other tests.
    """

    ccm_cluster = get_cluster()
    ccm_cluster.stop()
    remove_cluster()
Beispiel #8
0
    def setUpClass(self):
        """
        This will setup the necessary infrastructure to run unified authentication tests.
        """
        if not DSE_VERSION or DSE_VERSION < Version('5.1'):
            return
        self.cluster = None

        ccm_cluster = get_cluster()
        # Stop cluster if running and configure it with the correct options
        ccm_cluster.stop()
        if isinstance(ccm_cluster, DseCluster):
            # Setup dse options in dse.yaml
            config_options = {
                'authentication_options': {
                    'enabled': 'true',
                    'default_scheme': 'internal',
                    'scheme_permissions': 'true',
                    'transitional_mode': 'normal'
                },
                'authorization_options': {
                    'enabled': 'true'
                }
            }

            # Setup dse authenticator in cassandra.yaml
            ccm_cluster.set_configuration_options({
                'authenticator':
                'com.datastax.bdp.cassandra.auth.DseAuthenticator',
                'authorizer':
                'com.datastax.bdp.cassandra.auth.DseAuthorizer'
            })
            ccm_cluster.set_dse_configuration_options(config_options)
            ccm_cluster.start(wait_for_binary_proto=True,
                              wait_other_notice=True)
        else:
            log.error("Cluster is not dse cluster test will fail")

        # Create users and test keyspace
        self.user_role = 'user1'
        self.server_role = 'server'
        self.root_cluster = Cluster(
            auth_provider=DSEPlainTextAuthProvider('cassandra', 'cassandra'))
        self.root_session = self.root_cluster.connect()

        stmts = [
            "CREATE USER {0} WITH PASSWORD '{1}'".format(
                self.server_role, self.server_role),
            "CREATE USER {0} WITH PASSWORD '{1}'".format(
                self.user_role, self.user_role),
            "CREATE KEYSPACE testproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}",
            "CREATE TABLE testproxy.testproxy (id int PRIMARY KEY, value text)",
            "GRANT ALL PERMISSIONS ON KEYSPACE testproxy to {0}".format(
                self.user_role)
        ]

        wait_role_manager_setup_then_execute(self.root_session, stmts)
Beispiel #9
0
    def test_token(self):
        expected_node_count = len(get_cluster().nodes)

        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        cluster.connect()
        tmap = cluster.metadata.token_map
        self.assertTrue(issubclass(tmap.token_class, Token))
        self.assertEqual(expected_node_count, len(tmap.ring))
        cluster.shutdown()
    def setUpClass(self):
        """
        This will setup the necessary infrastructure to run our authentication tests. It requres the ADS_HOME environment variable
        and our custom embedded apache directory server jar in order to run.
        """

        clear_kerberos_tickets()
        self.cluster = None

        # Setup variables for various keytab and other files
        self.conf_file_dir = ADS_HOME+"conf/"
        self.krb_conf = self.conf_file_dir+"krb5.conf"
        self.dse_keytab = self.conf_file_dir+"dse.keytab"
        self.dseuser_keytab = self.conf_file_dir+"dseuser.keytab"
        self.cassandra_keytab = self.conf_file_dir+"cassandra.keytab"
        self.bob_keytab = self.conf_file_dir + "bob.keytab"
        self.charlie_keytab = self.conf_file_dir + "charlie.keytab"
        actual_jar = ADS_HOME+"embedded-ads.jar"

        # Create configuration directories if they don't already exists
        if not os.path.exists(self.conf_file_dir):
            os.makedirs(self.conf_file_dir)
        log.warning("Starting adserver")
        # Start the ADS, this will create the keytab con configuration files listed above
        self.proc = subprocess.Popen(['java', '-jar', actual_jar, '-k', '--confdir', self.conf_file_dir], shell=False)
        time.sleep(10)
        # TODO poll for server to come up

        log.warning("Starting adserver started")
        ccm_cluster = get_cluster()
        log.warning("fetching tickets")
        # Stop cluster if running and configure it with the correct options
        ccm_cluster.stop()
        if isinstance(ccm_cluster, DseCluster):
            # Setup kerberos options in dse.yaml
            config_options = {'kerberos_options': {'keytab': self.dse_keytab,
                                                   'service_principal': 'dse/[email protected]',
                                                   'qop': 'auth'},
                              'authentication_options': {'enabled': 'true',
                                                         'default_scheme': 'kerberos',
                                                         'scheme_permissions': 'true',
                                                         'allow_digest_with_kerberos': 'true',
                                                         'plain_text_without_ssl': 'warn',
                                                         'transitional_mode': 'disabled'},
                              'authorization_options': {'enabled': 'true'}}

            krb5java = "-Djava.security.krb5.conf=" + self.krb_conf
            # Setup dse authenticator in cassandra.yaml
            ccm_cluster.set_configuration_options({
                'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator',
                'authorizer': 'com.datastax.bdp.cassandra.auth.DseAuthorizer'
            })
            ccm_cluster.set_dse_configuration_options(config_options)
            ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=[krb5java])
        else:
            log.error("Cluster is not dse cluster test will fail")
Beispiel #11
0
def bootstrap(node, data_center=None, token=None):
    node_instance = Node('node%s' % node,
                         get_cluster(),
                         auto_bootstrap=False,
                         thrift_interface=(IP_FORMAT % node, 9160),
                         storage_interface=(IP_FORMAT % node, 7000),
                         jmx_port=str(7000 + 100 * node),
                         remote_debug_port=0,
                         initial_token=token if token else node * 10)
    get_cluster().add(node_instance, is_seed=False, data_center=data_center)

    try:
        start(node)
    except:
        # Try only twice
        try:
            start(node)
        except:
            log.error('Added node failed to start twice.')
Beispiel #12
0
    def test_token(self):
        expected_node_count = len(get_cluster().nodes)

        cluster = Cluster()
        cluster.connect()
        tmap = cluster.metadata.token_map
        self.assertTrue(issubclass(tmap.token_class, Token))
        self.assertEqual(expected_node_count, len(tmap.ring))
        self.assertEqual(expected_node_count, len(tmap.tokens_to_hosts))
        cluster.shutdown()
Beispiel #13
0
def setup_module():
    use_singledc(start=False)
    ccm_cluster = get_cluster()
    ccm_cluster.stop()
    config_options = {'authenticator': 'PasswordAuthenticator',
                      'authorizer': 'CassandraAuthorizer'}
    ccm_cluster.set_configuration_options(config_options)
    log.debug("Starting ccm test cluster with %s", config_options)
    ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
    # there seems to be some race, with some versions of C* taking longer to 
    # get the auth (and default user) setup. Sleep here to give it a chance
    time.sleep(10)
def setup_module():
    use_singledc(start=False)
    ccm_cluster = get_cluster()
    ccm_cluster.stop()
    config_options = {'native_transport_port': 9046}
    ccm_cluster.set_configuration_options(config_options)
    # can't use wait_for_binary_proto cause ccm tries on port 9042
    ccm_cluster.start(wait_for_binary_proto=False)
    # wait until all nodes are up
    wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.1'], port=9046).connect().shutdown(), 1, 20)
    wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.2'], port=9046).connect().shutdown(), 1, 20)
    wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.3'], port=9046).connect().shutdown(), 1, 20)
def setup_module():
    if CASSANDRA_IP.startswith("127.0.0.") and not USE_CASS_EXTERNAL:
        use_singledc(start=False)
        ccm_cluster = get_cluster()
        ccm_cluster.stop()
        config_options = {
            'authenticator': 'PasswordAuthenticator',
            'authorizer': 'CassandraAuthorizer'
        }
        ccm_cluster.set_configuration_options(config_options)
        log.debug("Starting ccm test cluster with %s", config_options)
        start_cluster_wait_for_up(ccm_cluster)
def setup_module():
    use_singledc(start=False)
    ccm_cluster = get_cluster()
    ccm_cluster.clear()
    # This is necessary because test_too_many_statements may
    # timeout otherwise
    config_options = {'write_request_timeout_in_ms': '20000'}
    ccm_cluster.set_configuration_options(config_options)
    ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
    setup_keyspace()
    global CASS_SERVER_VERSION
    CASS_SERVER_VERSION = get_server_versions()[0]
Beispiel #17
0
 def setUp(self):
     """
     Test is skipped if run with native protocol version <4
     """
     if PROTOCOL_VERSION < 4:
         raise unittest.SkipTest(
             "Native protocol 4,0+ is required for custom payloads, currently using %r"
             % (PROTOCOL_VERSION, ))
     self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
     self.session = self.cluster.connect()
     self.nodes_currently_failing = []
     self.node1, self.node2, self.node3 = get_cluster().nodes.values()
def setup_module():

    if DSE_IP.startswith("127.0.0."):
        use_singledc(start=False)
        ccm_cluster = get_cluster()
        ccm_cluster.stop()
        config_options = {'authenticator': 'PasswordAuthenticator',
                          'authorizer': 'CassandraAuthorizer'}
        ccm_cluster.set_configuration_options(config_options)
        log.debug("Starting ccm test cluster with %s", config_options)
        start_cluster_wait_for_up(ccm_cluster)
    else:
        set_default_dse_ip()
def setup_module():
    """
    We need some custom setup for this module. All unit tests in this module
    require protocol >=4. We won't bother going through the setup required unless that is the
    protocol version we are using.
    """

    # If we aren't at protocol v 4 or greater don't waste time setting anything up, all tests will be skipped
    if PROTOCOL_VERSION >= 4:
        use_singledc(start=False)
        ccm_cluster = get_cluster()
        ccm_cluster.stop()
        config_options = {'tombstone_failure_threshold': 2000, 'tombstone_warn_threshold': 1000}
        ccm_cluster.set_configuration_options(config_options)
        ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
        setup_keyspace()
    def setUp(self):
        """
        Test is skipped if run with native protocol version <4
        """
        self.support_v5 = True
        if PROTOCOL_VERSION < 4:
            raise unittest.SkipTest(
                "Native protocol 4,0+ is required for custom payloads, currently using %r"
                % (PROTOCOL_VERSION,))
        try:
            self.cluster = Cluster(protocol_version=ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=True)
            self.session = self.cluster.connect()
        except NoHostAvailable:
            log.info("Protocol Version 5 not supported,")
            self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
            self.session = self.cluster.connect()
            self.support_v5 = False

        self.nodes_currently_failing = []
        self.node1, self.node2, self.node3 = get_cluster().nodes.values()
Beispiel #21
0
def bootstrap(node, data_center=None, token=None):
    cluster = get_cluster()
    # for now assumes cluster has at least one node
    node_type = type(next(iter(cluster.nodes.values())))
    node_instance = node_type('node%s' % node,
                              cluster,
                              auto_bootstrap=False,
                              thrift_interface=(IP_FORMAT % node, 9160),
                              storage_interface=(IP_FORMAT % node, 7000),
                              binary_interface=(IP_FORMAT % node, 9042),
                              jmx_port=str(7000 + 100 * node),
                              remote_debug_port=0,
                              initial_token=token if token else node * 10)
    cluster.add(node_instance, is_seed=False, data_center=data_center)

    try:
        node_instance.start()
    except Exception:
        # Try only twice
        node_instance.start()
Beispiel #22
0
    def test_legacy_tables(self):

        if get_server_versions()[0] < (2, 1, 0):
            raise unittest.SkipTest(
                'Test schema output assumes 2.1.0+ options')

        if sys.version_info[2:] != (2, 7):
            raise unittest.SkipTest(
                'This test compares static strings generated from dict items, which may change orders. Test with 2.7.'
            )

        cli_script = """CREATE KEYSPACE legacy
WITH placement_strategy = 'SimpleStrategy'
AND strategy_options = {replication_factor:1};

USE legacy;

CREATE COLUMN FAMILY simple_no_col
 WITH comparator = UTF8Type
 AND key_validation_class = UUIDType
 AND default_validation_class = UTF8Type;

CREATE COLUMN FAMILY simple_with_col
 WITH comparator = UTF8Type
 and key_validation_class = UUIDType
 and default_validation_class = UTF8Type
 AND column_metadata = [
 {column_name: col_with_meta, validation_class: UTF8Type}
 ];

CREATE COLUMN FAMILY composite_partition_no_col
 WITH comparator = UTF8Type
 AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)'
 AND default_validation_class = UTF8Type;

CREATE COLUMN FAMILY composite_partition_with_col
 WITH comparator = UTF8Type
 AND key_validation_class = 'CompositeType(UUIDType,UTF8Type)'
 AND default_validation_class = UTF8Type
 AND column_metadata = [
 {column_name: col_with_meta, validation_class: UTF8Type}
 ];

CREATE COLUMN FAMILY nested_composite_key
 WITH comparator = UTF8Type
 and key_validation_class = 'CompositeType(CompositeType(UUIDType,UTF8Type), LongType)'
 and default_validation_class = UTF8Type
 AND column_metadata = [
 {column_name: full_name, validation_class: UTF8Type}
 ];

create column family composite_comp_no_col
  with column_type = 'Standard'
  and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)'
  and default_validation_class = 'BytesType'
  and key_validation_class = 'BytesType'
  and read_repair_chance = 0.0
  and dclocal_read_repair_chance = 0.1
  and gc_grace = 864000
  and min_compaction_threshold = 4
  and max_compaction_threshold = 32
  and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'
  and caching = 'KEYS_ONLY'
  and cells_per_row_to_cache = '0'
  and default_time_to_live = 0
  and speculative_retry = 'NONE'
  and comment = 'Stores file meta data';

create column family composite_comp_with_col
  with column_type = 'Standard'
  and comparator = 'DynamicCompositeType(t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType)'
  and default_validation_class = 'BytesType'
  and key_validation_class = 'BytesType'
  and read_repair_chance = 0.0
  and dclocal_read_repair_chance = 0.1
  and gc_grace = 864000
  and min_compaction_threshold = 4
  and max_compaction_threshold = 32
  and compaction_strategy = 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'
  and caching = 'KEYS_ONLY'
  and cells_per_row_to_cache = '0'
  and default_time_to_live = 0
  and speculative_retry = 'NONE'
  and comment = 'Stores file meta data'
  and column_metadata = [
    {column_name : 'b@6d616d6d616a616d6d61',
    validation_class : BytesType,
    index_name : 'idx_one',
    index_type : 0},
    {column_name : 'b@6869746d65776974686d75736963',
    validation_class : BytesType,
    index_name : 'idx_two',
    index_type : 0}]
  and compression_options = {'sstable_compression' : 'org.apache.cassandra.io.compress.LZ4Compressor'};"""

        # note: the inner key type for legacy.nested_composite_key
        # (org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type))
        # is a bit strange, but it replays in CQL with desired results
        expected_string = """CREATE KEYSPACE legacy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}  AND durable_writes = true;

/*
Warning: Table legacy.composite_comp_with_col omitted because it has constructs not compatible with CQL (was created via legacy API).

Approximate structure, for reference:
(this should not be used to reproduce this schema)

CREATE TABLE legacy.composite_comp_with_col (
    key blob,
    t timeuuid,
    b blob,
    s text,
    "b@6869746d65776974686d75736963" blob,
    "b@6d616d6d616a616d6d61" blob,
    PRIMARY KEY (key, t, b, s)
) WITH COMPACT STORAGE
    AND CLUSTERING ORDER BY (t ASC, b ASC, s ASC)
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = 'Stores file meta data'
    AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = 'NONE';
CREATE INDEX idx_two ON legacy.composite_comp_with_col ("b@6869746d65776974686d75736963");
CREATE INDEX idx_one ON legacy.composite_comp_with_col ("b@6d616d6d616a616d6d61");
*/

CREATE TABLE legacy.nested_composite_key (
    key 'org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType, org.apache.cassandra.db.marshal.UTF8Type)',
    key2 bigint,
    full_name text,
    PRIMARY KEY ((key, key2))
) WITH COMPACT STORAGE
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = 'NONE';

CREATE TABLE legacy.composite_partition_with_col (
    key uuid,
    key2 text,
    col_with_meta text,
    PRIMARY KEY ((key, key2))
) WITH COMPACT STORAGE
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = 'NONE';

CREATE TABLE legacy.composite_partition_no_col (
    key uuid,
    key2 text,
    column1 text,
    value text,
    PRIMARY KEY ((key, key2), column1)
) WITH COMPACT STORAGE
    AND CLUSTERING ORDER BY (column1 ASC)
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = 'NONE';

CREATE TABLE legacy.simple_with_col (
    key uuid PRIMARY KEY,
    col_with_meta text
) WITH COMPACT STORAGE
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = 'NONE';

CREATE TABLE legacy.simple_no_col (
    key uuid,
    column1 text,
    value text,
    PRIMARY KEY (key, column1)
) WITH COMPACT STORAGE
    AND CLUSTERING ORDER BY (column1 ASC)
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = ''
    AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = 'NONE';

/*
Warning: Table legacy.composite_comp_no_col omitted because it has constructs not compatible with CQL (was created via legacy API).

Approximate structure, for reference:
(this should not be used to reproduce this schema)

CREATE TABLE legacy.composite_comp_no_col (
    key blob,
    column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.TimeUUIDType, org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type)',
    column2 text,
    value blob,
    PRIMARY KEY (key, column1, column1, column2)
) WITH COMPACT STORAGE
    AND CLUSTERING ORDER BY (column1 ASC, column1 ASC, column2 ASC)
    AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
    AND comment = 'Stores file meta data'
    AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}
    AND dclocal_read_repair_chance = 0.1
    AND default_time_to_live = 0
    AND gc_grace_seconds = 864000
    AND max_index_interval = 2048
    AND memtable_flush_period_in_ms = 0
    AND min_index_interval = 128
    AND read_repair_chance = 0.0
    AND speculative_retry = 'NONE';
*/"""

        ccm = get_cluster()
        ccm.run_cli(cli_script)

        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        session = cluster.connect()

        legacy_meta = cluster.metadata.keyspaces['legacy']
        self.assert_equal_diff(legacy_meta.export_as_string(), expected_string)

        session.execute('DROP KEYSPACE legacy')

        cluster.shutdown()
Beispiel #23
0
 def tearDownClass(cls):
     ccm_cluster = get_cluster()
     ccm_cluster.stop()
     remove_cluster()
Beispiel #24
0
 def tearDownClass(cls):
     cluster = get_cluster()
     cluster.start(
         wait_for_binary_proto=True)  # make sure other nodes are restarted
Beispiel #25
0
 def tearDownClass(cls):
     cluster = get_cluster()
     cluster.start()  # make sure other nodes are restarted
 def init(config_options):
     ccm_cluster = get_cluster()
     ccm_cluster.stop()
     ccm_cluster.set_configuration_options(config_options)
     log.debug("Starting ccm test cluster with %s", config_options)
     ccm_cluster.start(wait_for_binary_proto=True)