Ejemplo n.º 1
0
    def test_pool_management(self):
        # Ensure that in_flight and request_ids quiesce after cluster operations
        cluster = Cluster(
            protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0
        )  # no idle heartbeat here, pool management is tested in test_idle_heartbeat
        session = cluster.connect()
        session2 = cluster.connect()

        # prepare
        p = session.prepare("SELECT * FROM system.local WHERE key=?")
        self.assertTrue(session.execute(p, ('local', )))

        # simple
        self.assertTrue(
            session.execute("SELECT * FROM system.local WHERE key='local'"))

        # set keyspace
        session.set_keyspace('system')
        session.set_keyspace('system_traces')

        # use keyspace
        session.execute('USE system')
        session.execute('USE system_traces')

        # refresh schema
        cluster.refresh_schema_metadata()
        cluster.refresh_schema_metadata(max_schema_agreement_wait=0)

        assert_quiescent_pool_state(self, cluster)

        cluster.shutdown()
Ejemplo n.º 2
0
    def test_cannot_connect_with_bad_client_auth(self):
        """
         Test to validate that we cannot connect with invalid client auth.

        This test will use bad keys/certs to preform client authentication. It will then attempt to connect
        to a server that has client authentication enabled.


        @since 2.7.0
        @expected_result The client will throw an exception on connect

        @test_category connection:ssl
        """

        # Setup absolute paths to key/cert files
        abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS)
        abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE)
        abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE_BAD)

        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          ssl_options={
                              'ca_certs': abs_path_ca_cert_path,
                              'ssl_version': ssl.PROTOCOL_TLSv1,
                              'keyfile': abs_driver_keyfile,
                              'certfile': abs_driver_certfile
                          })
        with self.assertRaises(NoHostAvailable) as context:
            cluster.connect()
        cluster.shutdown()
Ejemplo n.º 3
0
    def test_session_no_cluster(self):
        """
        Test session context without cluster context.

        @since 3.4
        @jira_ticket PYTHON-521
        @expected_result session should be created correctly. Session should shutdown correctly outside of context

        @test_category configuration
        """
        cluster = Cluster(**self.cluster_kwargs)
        unmanaged_session = cluster.connect()
        with cluster.connect() as session:
            self.assertFalse(cluster.is_shutdown)
            self.assertFalse(session.is_shutdown)
            self.assertFalse(unmanaged_session.is_shutdown)
            self.assertTrue(
                session.execute('select release_version from system.local')[0])
        self.assertTrue(session.is_shutdown)
        self.assertFalse(cluster.is_shutdown)
        self.assertFalse(unmanaged_session.is_shutdown)
        unmanaged_session.shutdown()
        self.assertTrue(unmanaged_session.is_shutdown)
        self.assertFalse(cluster.is_shutdown)
        cluster.shutdown()
        self.assertTrue(cluster.is_shutdown)
Ejemplo n.º 4
0
    def test_raise_error_on_control_connection_timeout(self):
        """
        Test for initial control connection timeout

        test_raise_error_on_control_connection_timeout tests that the driver times out after the set initial connection
        timeout. It first pauses node1, essentially making it unreachable. It then attempts to create a Cluster object
        via connecting to node1 with a timeout of 1 second, and ensures that a NoHostAvailable is raised, along with
        an OperationTimedOut for 1 second.

        @expected_errors NoHostAvailable When node1 is paused, and a connection attempt is made.
        @since 2.6.0
        @jira_ticket PYTHON-206
        @expected_result NoHostAvailable exception should be raised after 1 second.

        @test_category connection
        """

        get_node(1).pause()
        cluster = Cluster(contact_points=['127.0.0.1'],
                          protocol_version=PROTOCOL_VERSION,
                          connect_timeout=1)

        with self.assertRaisesRegexp(
                NoHostAvailable,
                "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"
        ):
            cluster.connect()
        cluster.shutdown()

        get_node(1).resume()
Ejemplo n.º 5
0
    def test_invalid_protocol_negotation(self):
        """
        Test for protocol negotiation when explicit versions are set

        If an explicit protocol version that is not compatible with the server version is set
        an exception should be thrown. It should not attempt to negotiate

        for reference supported protocol version to server versions is as follows/

        1.2 -> 1
        2.0 -> 2, 1
        2.1 -> 3, 2, 1
        2.2 -> 4, 3, 2, 1
        3.X -> 4, 3

        @since 3.6.0
        @jira_ticket PYTHON-537
        @expected_result downgrading should not be allowed when explicit protocol versions are set.

        @test_category connection
        """

        upper_bound = get_unsupported_upper_protocol()
        if upper_bound is not None:
            cluster = Cluster(protocol_version=upper_bound)
            with self.assertRaises(NoHostAvailable):
                cluster.connect()
            cluster.shutdown()

        lower_bound = get_unsupported_lower_protocol()
        if lower_bound is not None:
            cluster = Cluster(protocol_version=lower_bound)
            with self.assertRaises(NoHostAvailable):
                cluster.connect()
            cluster.shutdown()
Ejemplo n.º 6
0
    def test_duplicate_metrics_per_cluster(self):
        """
        Test to validate that cluster metrics names can't overlap.
        @since 3.6.0
        @jira_ticket PYTHON-561
        @expected_result metric names should not be allowed to be same.

        @test_category metrics
        """
        cluster2 = Cluster(
            metrics_enabled=True,
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(retry_policy=FallthroughRetryPolicy())
            })

        cluster3 = Cluster(
            metrics_enabled=True,
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(retry_policy=FallthroughRetryPolicy())
            })

        # Ensure duplicate metric names are not allowed
        cluster2.metrics.set_stats_name("appcluster")
        cluster2.metrics.set_stats_name("appcluster")
        with self.assertRaises(ValueError):
            cluster3.metrics.set_stats_name("appcluster")
        cluster3.metrics.set_stats_name("devops")

        session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True)
        session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True)

        # Basic validation that naming metrics doesn't impact their segration or accuracy
        for i in range(10):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(
                self.ks_name),
                                    consistency_level=ConsistencyLevel.ALL)
            session2.execute(query)

        for i in range(5):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(
                self.ks_name),
                                    consistency_level=ConsistencyLevel.ALL)
            session3.execute(query)

        self.assertEqual(
            cluster2.metrics.get_stats()['request_timer']['count'], 10)
        self.assertEqual(
            cluster3.metrics.get_stats()['request_timer']['count'], 5)

        # Check scales to ensure they are appropriately named
        self.assertTrue("appcluster" in scales._Stats.stats.keys())
        self.assertTrue("devops" in scales._Stats.stats.keys())

        cluster2.shutdown()
        cluster3.shutdown()
Ejemplo n.º 7
0
    def test_for_schema_disagreement_attribute(self):
        """
        Tests to ensure that schema disagreement is properly surfaced on the response future.

        Creates and destroys keyspaces/tables with various schema agreement timeouts set.
        First part runs cql create/drop cmds with schema agreement set in such away were it will be impossible for agreement to occur during timeout.
        It then validates that the correct value is set on the result.
        Second part ensures that when schema agreement occurs, that the result set reflects that appropriately

        @since 3.1.0
        @jira_ticket PYTHON-458
        @expected_result is_schema_agreed is set appropriately on response thefuture

        @test_category schema
        """
        # This should yield a schema disagreement
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          max_schema_agreement_wait=0.001)
        session = cluster.connect(wait_for_all_pools=True)

        rs = session.execute(
            "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}"
        )
        self.check_and_wait_for_agreement(session, rs, False)
        rs = session.execute(
            SimpleStatement(
                "CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)",
                consistency_level=ConsistencyLevel.ALL))
        self.check_and_wait_for_agreement(session, rs, False)
        rs = session.execute("DROP KEYSPACE test_schema_disagreement")
        self.check_and_wait_for_agreement(session, rs, False)
        cluster.shutdown()

        # These should have schema agreement
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          max_schema_agreement_wait=100)
        session = cluster.connect()
        rs = session.execute(
            "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}"
        )
        self.check_and_wait_for_agreement(session, rs, True)
        rs = session.execute(
            SimpleStatement(
                "CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)",
                consistency_level=ConsistencyLevel.ALL))
        self.check_and_wait_for_agreement(session, rs, True)
        rs = session.execute("DROP KEYSPACE test_schema_disagreement")
        self.check_and_wait_for_agreement(session, rs, True)
        cluster.shutdown()
Ejemplo n.º 8
0
    def _setup_for_proxy(self, grant=True):
        os.environ['KRB5_CONFIG'] = self.krb_conf
        self.refresh_kerberos_tickets(self.cassandra_keytab, "*****@*****.**", self.krb_conf)
        auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='*****@*****.**')
        cluster = Cluster(auth_provider=auth_provider)
        session = cluster.connect()

        session.execute("CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('*****@*****.**'))
        session.execute("CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('*****@*****.**'))

        session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to '*****@*****.**'")

        session.execute("CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('*****@*****.**'))
        session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to '*****@*****.**'")

        # Create a keyspace and allow only charlie to query it.
        session.execute(
            "CREATE KEYSPACE testkrbproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
        session.execute("CREATE TABLE testkrbproxy.testproxy (id int PRIMARY KEY, value text)")
        session.execute("GRANT ALL PERMISSIONS ON KEYSPACE testkrbproxy to '{0}'".format('*****@*****.**'))

        if grant:
            session.execute("GRANT PROXY.LOGIN ON ROLE '{0}' to '{1}'".format('*****@*****.**', '*****@*****.**'))

        cluster.shutdown()
    def _protocol_divergence_fail_by_flag_uses_int(self,
                                                   version,
                                                   uses_int_query_flag,
                                                   int_flag=True,
                                                   beta=False):
        cluster = Cluster(protocol_version=version,
                          allow_beta_protocol_version=beta)
        session = cluster.connect()

        query_one = SimpleStatement(
            "INSERT INTO test3rf.test (k, v) VALUES (1, 1)")
        query_two = SimpleStatement(
            "INSERT INTO test3rf.test (k, v) VALUES (2, 2)")

        execute_with_long_wait_retry(session, query_one)
        execute_with_long_wait_retry(session, query_two)

        with mock.patch('dse.protocol.ProtocolVersion.uses_int_query_flags',
                        new=mock.Mock(return_value=int_flag)):
            future = self._send_query_message(
                session,
                cluster._default_timeout,
                consistency_level=ConsistencyLevel.ONE,
                fetch_size=1)

            response = future.result()

            # This means the flag are not handled as they are meant by the server if uses_int=False
            self.assertEqual(response.has_more_pages, uses_int_query_flag)

        execute_with_long_wait_retry(session,
                                     SimpleStatement("TRUNCATE test3rf.test"))
        cluster.shutdown()
Ejemplo n.º 10
0
    def test_numpy_results_paged(self):
        """
        Test Numpy-based parser that returns a NumPy array
        """
        # arrays = { 'a': arr1, 'b': arr2, ... }
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory)})
        session = cluster.connect(keyspace="testspace")
        session.client_protocol_handler = NumpyProtocolHandler
        session.default_fetch_size = 2

        expected_pages = (self.N_ITEMS + session.default_fetch_size - 1) // session.default_fetch_size

        self.assertLess(session.default_fetch_size, self.N_ITEMS)

        results = session.execute("SELECT * FROM test_table")

        self.assertTrue(results.has_more_pages)
        for count, page in enumerate(results, 1):
            self.assertIsInstance(page, dict)
            for colname, arr in page.items():
                if count <= expected_pages:
                    self.assertGreater(len(arr), 0, "page count: %d" % (count,))
                    self.assertLessEqual(len(arr), session.default_fetch_size)
                else:
                    # we get one extra item out of this iteration because of the way NumpyParser returns results
                    # The last page is returned as a dict with zero-length arrays
                    self.assertEqual(len(arr), 0)
            self.assertEqual(self._verify_numpy_page(page), len(arr))
        self.assertEqual(count, expected_pages + 1)  # see note about extra 'page' above

        cluster.shutdown()
Ejemplo n.º 11
0
def validate_ssl_options(ssl_options):
    # find absolute path to client CA_CERTS
    tries = 0
    while True:
        if tries > 5:
            raise RuntimeError(
                "Failed to connect to SSL cluster after 5 attempts")
        try:
            cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                              ssl_options=ssl_options)
            session = cluster.connect(wait_for_all_pools=True)
            break
        except Exception:
            ex_type, ex, tb = sys.exc_info()
            log.warn("{0}: {1} Backtrace: {2}".format(
                ex_type.__name__, ex, traceback.extract_tb(tb)))
            del tb
            tries += 1

    # attempt a few simple commands.
    insert_keyspace = """CREATE KEYSPACE ssltest
            WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}
            """
    statement = SimpleStatement(insert_keyspace)
    statement.consistency_level = 1
    session.execute(statement)

    drop_keyspace = "DROP KEYSPACE ssltest"
    statement = SimpleStatement(drop_keyspace)
    statement.consistency_level = ConsistencyLevel.ANY
    session.execute(statement)

    cluster.shutdown()
Ejemplo n.º 12
0
def use_cluster_with_graph(num_nodes):
    """
    This is a  work around to account for the fact that spark nodes will conflict over master assignment
    when started all at once.
    """

    # Create the cluster but don't start it.
    use_singledc(start=False, workloads=['graph', 'spark'])
    # Start first node.
    get_node(1).start(wait_for_binary_proto=True)
    # Wait binary protocol port to open
    wait_for_node_socket(get_node(1), 120)
    # Wait for spark master to start up
    spark_master_http = ("localhost", 7080)
    common.check_socket_listening(spark_master_http, timeout=60)
    tmp_cluster = Cluster(protocol_version=PROTOCOL_VERSION)

    # Start up remaining nodes.
    try:
        session = tmp_cluster.connect()
        statement = "ALTER KEYSPACE dse_leases WITH REPLICATION = {'class': 'NetworkTopologyStrategy', 'dc1': '%d'}" % (num_nodes)
        session.execute(statement)
    finally:
        tmp_cluster.shutdown()

    for i in range(1, num_nodes+1):
        if i is not 1:
            node = get_node(i)
            node.start(wait_for_binary_proto=True)
            wait_for_node_socket(node, 120)

    # Wait for workers to show up as Alive on master
    wait_for_spark_workers(3, 120)
Ejemplo n.º 13
0
class BasicGraphUnitTestCase(BasicKeyspaceUnitTestCase):
    """
    This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear
    down
    """
    @property
    def graph_name(self):
        return self._testMethodName.lower()

    def session_setup(self):
        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        self.session = self.cluster.connect()
        self.ks_name = self._testMethodName.lower()
        self.cass_version, self.cql_version = get_server_versions()

    def setUp(self):
        self.session_setup()
        self.reset_graph()
        profiles = self.cluster.profile_manager.profiles
        profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = self.graph_name
        profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].graph_options.graph_name = self.graph_name
        self.clear_schema()

    def tearDown(self):
        self.cluster.shutdown()

    def clear_schema(self):
        self.session.execute_graph('schema.clear()')

    def reset_graph(self):
        reset_graph(self.session, self.graph_name)

    def wait_for_graph_inserted(self):
        wait_for_graph_inserted(self.session, self.graph_name)
Ejemplo n.º 14
0
    def test_white_list(self):
        use_singledc()
        keyspace = 'test_white_list'

        cluster = Cluster(('127.0.0.2',), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0,
                          execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy((IP_FORMAT % 2,)))})
        session = cluster.connect()
        self._wait_for_nodes_up([1, 2, 3])

        create_schema(cluster, session, keyspace)
        self._insert(session, keyspace)
        self._query(session, keyspace)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 12)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        # white list policy should not allow reconnecting to ignored hosts
        force_stop(3)
        self._wait_for_nodes_down([3])
        self.assertFalse(cluster.metadata._hosts[IP_FORMAT % 3].is_currently_reconnecting())

        self.coordinator_stats.reset_counts()
        force_stop(2)
        self._wait_for_nodes_down([2])

        try:
            self._query(session, keyspace)
            self.fail()
        except NoHostAvailable:
            pass
        finally:
            cluster.shutdown()
Ejemplo n.º 15
0
    def _cluster_session_with_lbp(self, lbp):
        # create a cluster with no delay on events

        cluster = Cluster(protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0,
                          execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=lbp)})
        session = cluster.connect()
        return cluster, session
def main():
    cluster = Cluster()
    session = cluster.connect()

    start_time = datetime.datetime.fromtimestamp(1564524000)
    end_time = datetime.datetime.fromtimestamp(1564610400)

    results = []

    current_time = start_time
    while current_time <= end_time:
        rows = session.execute(
            """
            SELECT user_id, product_id, year, month, day, hour, interaction_time, type FROM prodcat.user_interaction_product_history
            WHERE year = %s AND month = %s AND day = %s AND hour = %s
            """, (current_time.year, current_time.month, current_time.day,
                  current_time.hour))

        results += rows.current_rows
        current_time += datetime.timedelta(hours=1)

    interactions = parse_interaction_data(results, start_time)
    interaction_details = calc_interaction_details(interactions)
    model = initialize_model(interaction_details)

    train(interactions, interaction_details, model, 150)
    def test_protocol_divergence_v5_fail_by_continuous_paging(self):
        """
        Test to validate that V5 and DSE_V1 diverge. ContinuousPagingOptions is not supported by V5

        @since DSE 2.0b3 GRAPH 1.0b1
        @jira_ticket PYTHON-694
        @expected_result NoHostAvailable will be risen when the continuous_paging_options parameter is set

        @test_category connection
        """
        cluster = Cluster(protocol_version=ProtocolVersion.V5,
                          allow_beta_protocol_version=True)
        session = cluster.connect()

        max_pages = 4
        max_pages_per_second = 3
        continuous_paging_options = ContinuousPagingOptions(
            max_pages=max_pages, max_pages_per_second=max_pages_per_second)

        future = self._send_query_message(
            session,
            timeout=cluster._default_timeout,
            consistency_level=ConsistencyLevel.ONE,
            continuous_paging_options=continuous_paging_options)

        # This should raise NoHostAvailable because continuous paging is not supported under ProtocolVersion.DSE_V1
        with self.assertRaises(NoHostAvailable) as context:
            future.result()
        self.assertIn(
            "Continuous paging may only be used with protocol version ProtocolVersion.DSE_V1 or higher",
            str(context.exception))

        cluster.shutdown()
    def _test_downgrading_cl(self, keyspace, rf, accepted):
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()),
                                  DowngradingConsistencyRetryPolicy())
                          })
        session = cluster.connect(wait_for_all_pools=True)

        create_schema(cluster, session, keyspace, replication_factor=rf)
        self._insert(session, keyspace, 1)
        self._query(session, keyspace, 1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        try:
            force_stop(2)
            wait_for_down(cluster, 2)

            self._assert_writes_succeed(session, keyspace, accepted)
            self._assert_reads_succeed(session, keyspace,
                                       accepted - set([ConsistencyLevel.ANY]))
            self._assert_writes_fail(session, keyspace,
                                     SINGLE_DC_CONSISTENCY_LEVELS - accepted)
            self._assert_reads_fail(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS - accepted)
        finally:
            start(2)
            wait_for_up(cluster, 2)

        cluster.shutdown()
    def test_rfthree_tokenaware_none_down(self):
        keyspace = 'test_rfthree_tokenaware_none_down'
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()))
                          })
        session = cluster.connect(wait_for_all_pools=True)
        wait_for_up(cluster, 1)
        wait_for_up(cluster, 2)

        create_schema(cluster, session, keyspace, replication_factor=3)
        self._insert(session, keyspace, count=1)
        self._query(session, keyspace, count=1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()

        self._assert_writes_succeed(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS)
        self._assert_reads_succeed(session,
                                   keyspace,
                                   SINGLE_DC_CONSISTENCY_LEVELS -
                                   set([ConsistencyLevel.ANY]),
                                   expected_reader=2)

        cluster.shutdown()
Ejemplo n.º 20
0
class ConnectionTimeoutTest(unittest.TestCase):

    def setUp(self):
        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                               execution_profiles=
                               {EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=
                                                                       WhiteListRoundRobinPolicy([DSE_IP]))})
        self.session = self.cluster.connect()

    def tearDown(self):
        self.cluster.shutdown()

    @patch('dse.connection.Connection.max_in_flight', 2)
    def test_in_flight_timeout(self):
        """
        Test to ensure that connection id fetching will block when max_id is reached/

        In previous versions of the driver this test will cause a
        NoHostAvailable exception to be thrown, when the max_id is restricted

        @since 3.3
        @jira_ticket PYTHON-514
        @expected_result When many requests are run on a single node connection acquisition should block
        until connection is available or the request times out.

        @test_category connection timeout
        """
        futures = []
        query = '''SELECT * FROM system.local'''
        for _ in range(100):
            futures.append(self.session.execute_async(query))

        for future in futures:
            future.result()
Ejemplo n.º 21
0
    def test_can_insert_udts_with_nulls(self):
        """
        Test the insertion of UDTs with null and empty string fields
        """

        c = Cluster(protocol_version=PROTOCOL_VERSION)
        s = c.connect(self.keyspace_name, wait_for_all_pools=True)

        s.execute("CREATE TYPE user (a text, b int, c uuid, d blob)")
        User = namedtuple('user', ('a', 'b', 'c', 'd'))
        c.register_user_type(self.keyspace_name, "user", User)

        s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")

        insert = s.prepare("INSERT INTO mytable (a, b) VALUES (0, ?)")
        s.execute(insert, [User(None, None, None, None)])

        results = s.execute("SELECT b FROM mytable WHERE a=0")
        self.assertEqual((None, None, None, None), results[0].b)

        select = s.prepare("SELECT b FROM mytable WHERE a=0")
        self.assertEqual((None, None, None, None), s.execute(select)[0].b)

        # also test empty strings
        s.execute(insert, [User('', None, None, six.binary_type())])
        results = s.execute("SELECT b FROM mytable WHERE a=0")
        self.assertEqual(('', None, None, six.binary_type()), results[0].b)

        c.shutdown()
Ejemplo n.º 22
0
    def load_schema(self, use_schema=False):
        """加载schema文件
        """
        if use_schema:
            # 创建默认的执行配置,指向特定的graph
            ep = GraphExecutionProfile(graph_options=GraphOptions(
                graph_name=self.graph_name))
            cluster = Cluster(
                execution_profiles={EXEC_PROFILE_GRAPH_DEFAULT: ep})

            session = cluster.connect()

            # 创建graph
            session.execute_graph(
                "system.graph(name).ifNotExists().create()",
                {'name': self.graph_name},
                execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)

            # 批量执行gremlin创建schema的命令
            with open(self.schema_path) as f:
                pat = re.compile("[/ \n]")  # 正则表达式
                for line in f:
                    if not re.match(pat, line):  # 开头不是斜杠、空格、换行的
                        print("正在加载 {}".format(line.strip()))
                        session.execute_graph(line.strip())

        else:
            print("schema未加载,请确保graph中存在schema")
Ejemplo n.º 23
0
def use_cluster_with_graph(num_nodes):
    """
    This is a  work around to account for the fact that spark nodes will conflict over master assignment
    when started all at once.
    """

    # Create the cluster but don't start it.
    use_singledc(start=False, workloads=['graph', 'spark'])
    # Start first node.
    get_node(1).start(wait_for_binary_proto=True)
    # Wait binary protocol port to open
    wait_for_node_socket(get_node(1), 120)
    # Wait for spark master to start up
    spark_master_http = ("localhost", 7080)
    common.check_socket_listening(spark_master_http, timeout=60)
    tmp_cluster = Cluster(protocol_version=PROTOCOL_VERSION)

    # Start up remaining nodes.
    try:
        session = tmp_cluster.connect()
        statement = "ALTER KEYSPACE dse_leases WITH REPLICATION = {'class': 'NetworkTopologyStrategy', 'dc1': '%d'}" % (
            num_nodes)
        session.execute(statement)
    finally:
        tmp_cluster.shutdown()

    for i in range(1, num_nodes + 1):
        if i is not 1:
            node = get_node(i)
            node.start(wait_for_binary_proto=True)
            wait_for_node_socket(node, 120)

    # Wait for workers to show up as Alive on master
    wait_for_spark_workers(3, 120)
def main():
    cluster = Cluster()
    session = cluster.connect()

    start_time = datetime.datetime.fromtimestamp(1564524000)
    end_time = datetime.datetime.fromtimestamp(1564610400)

    results = []

    current_time = start_time
    while current_time <= end_time:
        rows = session.execute(
            """
            SELECT user_id, product_id, year, month, day, hour, interaction_time, type FROM prodcat.user_interaction_product_history
            WHERE year = %s AND month = %s AND day = %s AND hour = %s
            """,
            (current_time.year, current_time.month, current_time.day, current_time.hour)
        )

        results += rows.current_rows
        current_time += datetime.timedelta(hours=1)

    interactions = parse_interaction_data(results, start_time)
    interaction_details = calc_interaction_details(interactions)
    model = initialize_model(interaction_details)

    for interaction in interactions[:50]:
        predictions = evaluate_model(interaction, interaction_details, model, 18)

        print("Predicted Product: %s, Actual: %s", predictions[0], interaction.product_sequence_id)
        print("Predicted State Change: %s, Actual: %s", predictions[1], interaction.state)
Ejemplo n.º 25
0
    def test_for_schema_disagreements_same_keyspace(self):
        """
        Tests for any schema disagreements using the same keyspace multiple times
        """

        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        session = cluster.connect(wait_for_all_pools=True)

        for i in range(30):
            try:
                execute_until_pass(
                    session,
                    "CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"
                )
            except AlreadyExists:
                execute_until_pass(session, "DROP KEYSPACE test")
                execute_until_pass(
                    session,
                    "CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"
                )

            execute_until_pass(
                session,
                "CREATE TABLE test.cf (key int PRIMARY KEY, value int)")

            for j in range(100):
                execute_until_pass(
                    session,
                    "INSERT INTO test.cf (key, value) VALUES ({0}, {0})".
                    format(j))

            execute_until_pass(session, "DROP KEYSPACE test")
        cluster.shutdown()
Ejemplo n.º 26
0
    def test_prepare_on_all_hosts(self):
        """
        Test to validate prepare_on_all_hosts flag is honored.

        Use a special ForcedHostSwitchPolicy to ensure prepared queries are cycled over nodes that should not
        have them prepared. Check the logs to insure they are being re-prepared on those nodes

        @since 3.4.0
        @jira_ticket PYTHON-556
        @expected_result queries will have to re-prepared on hosts that aren't the control connection
        """
        white_list = ForcedHostSwitchPolicy()
        clus = Cluster(
            execution_profiles={
                EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=white_list)},
            protocol_version=PROTOCOL_VERSION,
            prepare_on_all_hosts=False,
            reprepare_on_up=False)
        self.addCleanup(clus.shutdown)

        session = clus.connect(wait_for_all_pools=True)
        mock_handler = MockLoggingHandler()
        logger = logging.getLogger(cluster.__name__)
        logger.addHandler(mock_handler)
        select_statement = session.prepare("SELECT * FROM system.local")
        session.execute(select_statement)
        session.execute(select_statement)
        session.execute(select_statement)
        self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing"))
Ejemplo n.º 27
0
    def test_connection_param_validation(self):
        """
        Test to validate that invalid parameter combinations for registering connections via session are not tolerated
        @since 3.8
        @jira_ticket PYTHON-649
        @expected_result queries should execute appropriately

        @test_category object_mapper
        """
        cluster = Cluster([DSE_IP])
        session = cluster.connect()
        with self.assertRaises(CQLEngineException):
            conn.register_connection("bad_coonection1",
                                     session=session,
                                     consistency="not_null")
        with self.assertRaises(CQLEngineException):
            conn.register_connection("bad_coonection2",
                                     session=session,
                                     lazy_connect="not_null")
        with self.assertRaises(CQLEngineException):
            conn.register_connection("bad_coonection3",
                                     session=session,
                                     retry_connect="not_null")
        with self.assertRaises(CQLEngineException):
            conn.register_connection("bad_coonection4",
                                     session=session,
                                     cluster_options="not_null")
        with self.assertRaises(CQLEngineException):
            conn.register_connection("bad_coonection5",
                                     hosts="not_null",
                                     session=session)
        cluster.shutdown()

        cluster.shutdown()
Ejemplo n.º 28
0
    def test_refresh_schema_type(self):

        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        session = cluster.connect()

        keyspace_name = 'test1rf'
        type_name = self._testMethodName

        execute_until_pass(
            session, 'CREATE TYPE IF NOT EXISTS %s.%s (one int, two text)' %
            (keyspace_name, type_name))
        original_meta = cluster.metadata.keyspaces
        original_test1rf_meta = original_meta[keyspace_name]
        original_type_meta = original_test1rf_meta.user_types[type_name]

        # only refresh one type
        cluster.refresh_user_type_metadata('test1rf', type_name)
        current_meta = cluster.metadata.keyspaces
        current_test1rf_meta = current_meta[keyspace_name]
        current_type_meta = current_test1rf_meta.user_types[type_name]
        self.assertIs(original_meta, current_meta)
        self.assertEqual(original_test1rf_meta.export_as_string(),
                         current_test1rf_meta.export_as_string())
        self.assertIsNot(original_type_meta, current_type_meta)
        self.assertEqual(original_type_meta.as_cql_query(),
                         current_type_meta.as_cql_query())
        cluster.shutdown()
    def test_pool_with_host_down(self):
        """
        Test to ensure that cluster.connect() doesn't return prior to pools being initialized.

        This test will figure out which host our pool logic will connect to first. It then shuts that server down.
        Previously the cluster.connect() would return prior to the pools being initialized, and the first queries would
        return a no host exception

        @since 3.7.0
        @jira_ticket PYTHON-617
        @expected_result query should complete successfully

        @test_category connection
        """

        # find the first node, we will try create connections to, shut it down.

        # We will be shuting down a random house, so we need a complete contact list
        all_contact_points = ["127.0.0.1", "127.0.0.2", "127.0.0.3"]

        # Connect up and find out which host will bet queries routed to to first
        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        cluster.connect(wait_for_all_pools=True)
        hosts = cluster.metadata.all_hosts()
        address = hosts[0].address
        node_to_stop = int(address.split('.')[-1:][0])
        cluster.shutdown()

        # We now register a cluster that has it's Control Connection NOT on the node that we are shutting down.
        # We do this so we don't miss the event
        contact_point = '127.0.0.{0}'.format(self.get_node_not_x(node_to_stop))
        cluster = Cluster(contact_points=[contact_point],
                          protocol_version=PROTOCOL_VERSION)
        cluster.connect(wait_for_all_pools=True)
        try:
            force_stop(node_to_stop)
            wait_for_down(cluster, node_to_stop)
            # Attempt a query against that node. It should complete
            cluster2 = Cluster(contact_points=all_contact_points,
                               protocol_version=PROTOCOL_VERSION)
            session2 = cluster2.connect()
            session2.execute("SELECT * FROM system.local")
        finally:
            cluster2.shutdown()
            start(node_to_stop)
            wait_for_up(cluster, node_to_stop)
            cluster.shutdown()
Ejemplo n.º 30
0
 def _results_from_row_factory(self, row_factory):
     cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                       execution_profiles={
                           EXEC_PROFILE_DEFAULT:
                           ExecutionProfile(row_factory=row_factory)
                       })
     with cluster:
         return cluster.connect().execute(self.select)
Ejemplo n.º 31
0
def setup_cluster():
    if args.host is None:
        nodes = ['localhost']
    else:
        nodes = [args.host]

    if args.port is None:
        port = 9042
    else:
        port = int(args.port)

    if args.connect_timeout is None:
        connect_timeout = 5
    else:
        connect_timeout = int(args.connect_timeout)

    if args.ssl is not None and args.certfile is not None:
        ssl_opts = {
            'ca_certs': args.certfile,
            'ssl_version': PROTOCOL_TLSv1,
            'keyfile': args.userkey,
            'certfile': args.usercert
        }
    else:
        ssl_opts = {}

    cluster = None

    if args.protocol_version is not None:
        auth = None

        if args.username is not None and args.password is not None:
            if args.protocol_version == 1:
                auth = get_credentials
            elif args.protocol_version > 1:
                auth = PlainTextAuthProvider(username=args.username,
                                             password=args.password)

        cluster = Cluster(control_connection_timeout=connect_timeout,
                          connect_timeout=connect_timeout,
                          contact_points=nodes,
                          port=port,
                          protocol_version=args.protocol_version,
                          auth_provider=auth,
                          ssl_options=ssl_opts)
    else:
        cluster = Cluster(control_connection_timeout=connect_timeout,
                          connect_timeout=connect_timeout,
                          contact_points=nodes,
                          port=port,
                          ssl_options=ssl_opts)

    session = cluster.connect()

    session.default_timeout = TIMEOUT
    session.default_fetch_size = FETCH_SIZE
    session.row_factory = dse.query.ordered_dict_factory
    return session
Ejemplo n.º 32
0
    def test_graph_profile(self):
        """
            Test verifying various aspects of graph config properties.

            @since 1.0.0
            @jira_ticket PYTHON-570

            @test_category dse graph
            """
        generate_classic(self.session)
        # Create variou execution policies
        exec_dif_factory = GraphExecutionProfile(
            row_factory=single_object_row_factory)
        exec_dif_factory.graph_options.graph_name = self.graph_name
        exec_dif_lbp = GraphExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        exec_dif_lbp.graph_options.graph_name = self.graph_name
        exec_bad_lbp = GraphExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))
        exec_dif_lbp.graph_options.graph_name = self.graph_name
        exec_short_timeout = GraphExecutionProfile(
            request_timeout=1,
            load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
        exec_short_timeout.graph_options.graph_name = self.graph_name

        # Add a single exection policy on cluster creation
        local_cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={"exec_dif_factory": exec_dif_factory})
        local_session = local_cluster.connect()
        rs1 = self.session.execute_graph('g.V()')
        rs2 = local_session.execute_graph('g.V()',
                                          execution_profile='exec_dif_factory')

        # Verify default and non default policy works
        self.assertFalse(isinstance(rs2[0], Vertex))
        self.assertTrue(isinstance(rs1[0], Vertex))
        # Add other policies validate that lbp are honored
        local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp)
        local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp")
        local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp)
        with self.assertRaises(NoHostAvailable):
            local_session.execute_graph('g.V()',
                                        execution_profile="exec_bad_lbp")

        # Try with missing EP
        with self.assertRaises(ValueError):
            local_session.execute_graph('g.V()',
                                        execution_profile='bad_exec_profile')

        # Validate that timeout is honored
        local_cluster.add_execution_profile("exec_short_timeout",
                                            exec_short_timeout)
        with self.assertRaises(OperationTimedOut):
            local_session.execute_graph(
                'java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);',
                execution_profile='exec_short_timeout')
Ejemplo n.º 33
0
        def test_graph_profile(self):
            """
            Test verifying various aspects of graph config properties.

            @since 1.0.0
            @jira_ticket PYTHON-570

            @test_category dse graph
            """
            generate_classic(self.session)
            # Create variou execution policies
            exec_dif_factory = GraphExecutionProfile(row_factory=single_object_row_factory)
            exec_dif_factory.graph_options.graph_name = self.graph_name
            exec_dif_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
            exec_dif_lbp.graph_options.graph_name = self.graph_name
            exec_bad_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))
            exec_dif_lbp.graph_options.graph_name = self.graph_name
            exec_short_timeout = GraphExecutionProfile(request_timeout=1, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
            exec_short_timeout.graph_options.graph_name = self.graph_name

            # Add a single exection policy on cluster creation
            local_cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={"exec_dif_factory": exec_dif_factory})
            local_session = local_cluster.connect()
            rs1 = self.session.execute_graph('g.V()')
            rs2 = local_session.execute_graph('g.V()', execution_profile='exec_dif_factory')

            # Verify default and non default policy works
            self.assertFalse(isinstance(rs2[0], Vertex))
            self.assertTrue(isinstance(rs1[0], Vertex))
            # Add other policies validate that lbp are honored
            local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp)
            local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp")
            local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp)
            with self.assertRaises(NoHostAvailable):
                local_session.execute_graph('g.V()', execution_profile="exec_bad_lbp")

            # Try with missing EP
            with self.assertRaises(ValueError):
                local_session.execute_graph('g.V()', execution_profile='bad_exec_profile')

            # Validate that timeout is honored
            local_cluster.add_execution_profile("exec_short_timeout", exec_short_timeout)
            with self.assertRaises(OperationTimedOut):
                local_session.execute_graph('java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);', execution_profile='exec_short_timeout')
Ejemplo n.º 34
0
class BasicDseAuthTest(unittest.TestCase):

    @classmethod
    def setUpClass(self):
        """
        This will setup the necessary infrastructure to run our authentication tests. It requres the ADS_HOME environment variable
        and our custom embedded apache directory server jar in order to run.
        """

        clear_kerberos_tickets()

        # Setup variables for various keytab and other files
        self.conf_file_dir = ADS_HOME+"conf/"
        self.krb_conf = self.conf_file_dir+"krb5.conf"
        self.dse_keytab = self.conf_file_dir+"dse.keytab"
        self.dseuser_keytab = self.conf_file_dir+"dseuser.keytab"
        self.cassandra_keytab = self.conf_file_dir+"cassandra.keytab"
        actual_jar = ADS_HOME+"embedded-ads.jar"

        # Create configuration directories if they don't already exists
        if not os.path.exists(self.conf_file_dir):
            os.makedirs(self.conf_file_dir)
        log.warning("Starting adserver")
        # Start the ADS, this will create the keytab con configuration files listed above
        self.proc = subprocess.Popen(['java', '-jar', actual_jar, '-k', '--confdir', self.conf_file_dir], shell=False)
        time.sleep(10)
        # TODO poll for server to come up

        log.warning("Starting adserver started")
        ccm_cluster = get_cluster()
        log.warning("fetching tickets")
        # Stop cluster if running and configure it with the correct options
        ccm_cluster.stop()
        if isinstance(ccm_cluster, DseCluster):
            # Setup kerberos options in dse.yaml
            config_options = {'kerberos_options': {'keytab': self.dse_keytab,
                                                   'service_principal': 'dse/[email protected]',
                                                   'qop': 'auth'},
                              'authentication_options': {'enabled': 'true',
                                                         'default_scheme': 'kerberos',
                                                         'scheme_permissions': 'true',
                                                         'allow_digest_with_kerberos': 'true',
                                                         'plain_text_without_ssl': 'warn',
                                                         'transitional_mode': 'disabled'}
                              }

            krb5java = "-Djava.security.krb5.conf=" + self.krb_conf
            # Setup dse authenticator in cassandra.yaml
            ccm_cluster.set_configuration_options({'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator'})
            ccm_cluster.set_dse_configuration_options(config_options)
            ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=[krb5java])
        else:
            log.error("Cluster is not dse cluster test will fail")

    @classmethod
    def tearDownClass(self):
        """
        Terminates running ADS (Apache directory server).
        """

        self.proc.terminate()

    def tearDown(self):
        """
        This will clear any existing kerberos tickets by using kdestroy
        """
        clear_kerberos_tickets()
        self.cluster.shutdown()

    def refresh_kerberos_tickets(self, keytab_file, user_name, krb_conf):
        """
        Fetches a new ticket for using the keytab file and username provided.
        """
        self.ads_pid = subprocess.call(['kinit', '-t', keytab_file, user_name], env={'KRB5_CONFIG': krb_conf}, shell=False)

    def connect_and_query(self, auth_provider):
        """
        Runs a simple system query with the auth_provided specified.
        """
        os.environ['KRB5_CONFIG'] = self.krb_conf
        self.cluster = Cluster(auth_provider=auth_provider)
        self.session = self.cluster.connect()
        query = "SELECT * FROM system.local"
        statement = SimpleStatement(query)
        rs = self.session.execute(statement)
        return rs

    def test_should_not_authenticate_with_bad_user_ticket(self):
        """
        This tests will attempt to authenticate with a user that has a valid ticket, but is not a valid dse user.
        @since 1.0.0
        @jira_ticket PYTHON-457
        @test_category dse auth
        @expected_result NoHostAvailable exception should be thrown

        """
        self.refresh_kerberos_tickets(self.dseuser_keytab, "*****@*****.**", self.krb_conf)
        auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"])
        self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider)

    def test_should_not_athenticate_without_ticket(self):
        """
        This tests will attempt to authenticate with a user that is valid but has no ticket
        @since 1.0.0
        @jira_ticket PYTHON-457
        @test_category dse auth
        @expected_result NoHostAvailable exception should be thrown

        """
        auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"])
        self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider)

    def test_connect_with_kerberos(self):
        """
        This tests will attempt to authenticate with a user that is valid and has a ticket
        @since 1.0.0
        @jira_ticket PYTHON-457
        @test_category dse auth
        @expected_result Client should be able to connect and run a basic query

        """
        self.refresh_kerberos_tickets(self.cassandra_keytab, "*****@*****.**", self.krb_conf)
        auth_provider = DSEGSSAPIAuthProvider()
        rs = self.connect_and_query(auth_provider)
        self.assertIsNotNone(rs)
        connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()]
        # Check to make sure our server_authenticator class is being set appropriate
        for connection in connections:
            self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class)

    def test_connect_with_kerberos_and_graph(self):
        """
        This tests will attempt to authenticate with a user and execute a graph query
        @since 1.0.0
        @jira_ticket PYTHON-457
        @test_category dse auth
        @expected_result Client should be able to connect and run a basic graph query with authentication

        """
        self.refresh_kerberos_tickets(self.cassandra_keytab, "*****@*****.**", self.krb_conf)

        auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"])
        rs = self.connect_and_query(auth_provider)
        self.assertIsNotNone(rs)
        reset_graph(self.session, self._testMethodName.lower())
        profiles = self.cluster.profile_manager.profiles
        profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = self._testMethodName.lower()
        generate_classic(self.session)

        rs = self.session.execute_graph('g.V()')
        self.assertIsNotNone(rs)

    def test_connect_with_kerberos_host_not_resolved(self):
        """
        This tests will attempt to authenticate with IP, this will fail.
        @since 1.0.0
        @jira_ticket PYTHON-566
        @test_category dse auth
        @expected_result Client should error when ip is used

        """
        self.refresh_kerberos_tickets(self.cassandra_keytab, "*****@*****.**", self.krb_conf)
        auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], resolve_host_name=False)
        self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider)

    def test_connect_with_explicit_principal(self):
        """
        This tests will attempt to authenticate using valid and invalid user principals
        @since 1.0.0
        @jira_ticket PYTHON-574
        @test_category dse auth
        @expected_result Client principals should be used by the underlying mechanism

        """

        # Connect with valid principal
        self.refresh_kerberos_tickets(self.cassandra_keytab, "*****@*****.**", self.krb_conf)
        auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="*****@*****.**")
        rs = self.connect_and_query(auth_provider)
        connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()]

        # Check to make sure our server_authenticator class is being set appropriate
        for connection in connections:
            self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class)

        # Use invalid principal
        auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="*****@*****.**")
        self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider)