Пример #1
0
    def test_execute_concurrent_paged_result(self):

        num_statements = 201
        statement = SimpleStatement(
            "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)",
            consistency_level=ConsistencyLevel.QUORUM)
        parameters = [(i, i) for i in range(num_statements)]

        results = self.execute_concurrent_args_helper(self.session, statement,
                                                      parameters)
        self.assertEqual(num_statements, len(results))
        for success, result in results:
            self.assertTrue(success)
            self.assertFalse(result)

        # read
        statement = SimpleStatement("SELECT * FROM test3rf.test LIMIT %s",
                                    consistency_level=ConsistencyLevel.QUORUM,
                                    fetch_size=int(num_statements / 2))

        results = self.execute_concurrent_args_helper(self.session, statement,
                                                      [(num_statements, )])
        self.assertEqual(1, len(results))
        self.assertTrue(results[0][0])
        result = results[0][1]
        self.assertTrue(result.has_more_pages)
        self.assertEqual(num_statements, sum(1 for _ in result))
    def _perform_cql_statement(self,
                               text,
                               consistency_level,
                               expected_exception,
                               session=None):
        """
        Simple helper method to preform cql statements and check for expected exception
        @param text CQl statement to execute
        @param consistency_level Consistency level at which it is to be executed
        @param expected_exception Exception expected to be throw or none
        """
        if session is None:
            session = self.session
        statement = SimpleStatement(text)
        statement.consistency_level = consistency_level

        if expected_exception is None:
            self.execute_helper(session, statement)
        else:
            with self.assertRaises(expected_exception) as cm:
                self.execute_helper(session, statement)
            if self.support_v5 and (isinstance(cm.exception, WriteFailure)
                                    or isinstance(cm.exception, ReadFailure)):
                if isinstance(cm.exception, ReadFailure):
                    self.assertEqual(
                        list(cm.exception.error_code_map.values())[0], 1)
                else:
                    self.assertEqual(
                        list(cm.exception.error_code_map.values())[0], 0)
Пример #3
0
    def test_conditional_update(self):
        self.session.execute("INSERT INTO test3rf.test (k, v) VALUES (0, 0)")
        statement = SimpleStatement(
            "UPDATE test3rf.test SET v=1 WHERE k=0 IF v=1",
            serial_consistency_level=ConsistencyLevel.SERIAL)
        # crazy test, but PYTHON-299
        # TODO: expand to check more parameters get passed to statement, and on to messages
        self.assertEqual(statement.serial_consistency_level,
                         ConsistencyLevel.SERIAL)
        future = self.session.execute_async(statement)
        result = future.result()
        self.assertEqual(future.message.serial_consistency_level,
                         ConsistencyLevel.SERIAL)
        self.assertTrue(result)
        self.assertFalse(result[0].applied)

        statement = SimpleStatement(
            "UPDATE test3rf.test SET v=1 WHERE k=0 IF v=0",
            serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL)
        self.assertEqual(statement.serial_consistency_level,
                         ConsistencyLevel.LOCAL_SERIAL)
        future = self.session.execute_async(statement)
        result = future.result()
        self.assertEqual(future.message.serial_consistency_level,
                         ConsistencyLevel.LOCAL_SERIAL)
        self.assertTrue(result)
        self.assertTrue(result[0].applied)
Пример #4
0
    def test_execute_concurrent(self):
        for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201):
            # write
            statement = SimpleStatement(
                "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)",
                consistency_level=ConsistencyLevel.QUORUM)
            statements = cycle((statement, ))
            parameters = [(i, i) for i in range(num_statements)]

            results = self.execute_concurrent_helper(
                self.session, list(zip(statements, parameters)))
            self.assertEqual(num_statements, len(results))
            for success, result in results:
                self.assertTrue(success)
                self.assertFalse(result)

            # read
            statement = SimpleStatement(
                "SELECT v FROM test3rf.test WHERE k=%s",
                consistency_level=ConsistencyLevel.QUORUM)
            statements = cycle((statement, ))
            parameters = [(i, ) for i in range(num_statements)]

            results = self.execute_concurrent_helper(
                self.session, list(zip(statements, parameters)))
            self.assertEqual(num_statements, len(results))
            self.assertEqual([(True, [(i, )]) for i in range(num_statements)],
                             results)
Пример #5
0
def execute(query,
            params=None,
            consistency_level=None,
            timeout=NOT_SET,
            connection=None):

    conn = get_connection(connection)

    if not conn.session:
        raise CQLEngineException(
            "It is required to setup() cqlengine before executing queries")

    if isinstance(query, SimpleStatement):
        pass  #
    elif isinstance(query, BaseCQLStatement):
        params = query.get_context()
        query = SimpleStatement(str(query),
                                consistency_level=consistency_level,
                                fetch_size=query.fetch_size)
    elif isinstance(query, six.string_types):
        query = SimpleStatement(query, consistency_level=consistency_level)

    log.debug(format_log_context(query.query_string, connection=connection))

    result = conn.session.execute(query, params, timeout=timeout)

    return result
    def test_no_parameters(self):
        batch = BatchStatement(BatchType.LOGGED)
        batch.add("INSERT INTO test3rf.test (k, v) VALUES (0, 0)")
        batch.add("INSERT INTO test3rf.test (k, v) VALUES (1, 1)", ())
        batch.add(SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (2, 2)"))
        batch.add(SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (3, 3)"), ())

        prepared = self.session.prepare("INSERT INTO test3rf.test (k, v) VALUES (4, 4)")
        batch.add(prepared)
        batch.add(prepared, ())
        batch.add(prepared.bind([]))
        batch.add(prepared.bind([]), ())

        batch.add("INSERT INTO test3rf.test (k, v) VALUES (5, 5)", ())
        batch.add("INSERT INTO test3rf.test (k, v) VALUES (6, 6)", ())
        batch.add("INSERT INTO test3rf.test (k, v) VALUES (7, 7)", ())
        batch.add("INSERT INTO test3rf.test (k, v) VALUES (8, 8)", ())
        batch.add("INSERT INTO test3rf.test (k, v) VALUES (9, 9)", ())

        self.assertRaises(ValueError, batch.add, prepared.bind([]), (1))
        self.assertRaises(ValueError, batch.add, prepared.bind([]), (1, 2))
        self.assertRaises(ValueError, batch.add, prepared.bind([]), (1, 2, 3))

        self.session.execute(batch)
        self.confirm_results()
    def _protocol_divergence_fail_by_flag_uses_int(self,
                                                   version,
                                                   uses_int_query_flag,
                                                   int_flag=True,
                                                   beta=False):
        cluster = Cluster(protocol_version=version,
                          allow_beta_protocol_version=beta)
        session = cluster.connect()

        query_one = SimpleStatement(
            "INSERT INTO test3rf.test (k, v) VALUES (1, 1)")
        query_two = SimpleStatement(
            "INSERT INTO test3rf.test (k, v) VALUES (2, 2)")

        execute_with_long_wait_retry(session, query_one)
        execute_with_long_wait_retry(session, query_two)

        with mock.patch('dse.protocol.ProtocolVersion.uses_int_query_flags',
                        new=mock.Mock(return_value=int_flag)):
            future = self._send_query_message(
                session,
                cluster._default_timeout,
                consistency_level=ConsistencyLevel.ONE,
                fetch_size=1)

            response = future.result()

            # This means the flag are not handled as they are meant by the server if uses_int=False
            self.assertEqual(response.has_more_pages, uses_int_query_flag)

        execute_with_long_wait_retry(session,
                                     SimpleStatement("TRUNCATE test3rf.test"))
        cluster.shutdown()
    def test_routing_key_generation_complex(self):
        """
        Compares the routing key generated by complex composite partition key using the model with the one generated by the equivalent
        bound statement
        @since 3.2
        @jira_ticket PYTHON-535
        @expected_result they should match

        @test_category object_mapper
        """
        prepared = self.session.prepare("""
          INSERT INTO {0}.complex_model_routing (partition, cluster, count, text, float, text_2) VALUES  (?, ?, ?, ?, ?, ?)
          """.format(DEFAULT_KEYSPACE))
        partition = uuid4()
        cluster = 1
        count = 2
        text = "text"
        float = 1.2
        text_2 = "text_2"
        bound = prepared.bind((partition, cluster, count, text, float, text_2))
        mrk = ComplexModelRouting._routing_key_from_values(
            [partition, cluster, text, float],
            self.session.cluster.protocol_version)
        simple = SimpleStatement("")
        simple.routing_key = mrk
        self.assertEqual(bound.routing_key, simple.routing_key)
    def test_routing_key_is_ignored(self):
        """
        Compares the routing key generated by simple partition key using the model with the one generated by the equivalent
        bound statement. It also verifies basic operations work with no routing key
        @since 3.2
        @jira_ticket PYTHON-505
        @expected_result they shouldn't match

        @test_category object_mapper
        """

        prepared = self.session.prepare("""
          INSERT INTO {0}.basic_model_no_routing (k, v) VALUES  (?, ?)
          """.format(DEFAULT_KEYSPACE))
        bound = prepared.bind((1, 2))

        mrk = BasicModelNoRouting._routing_key_from_values(
            [1], self.session.cluster.protocol_version)
        simple = SimpleStatement("")
        simple.routing_key = mrk
        self.assertNotEqual(bound.routing_key, simple.routing_key)

        # Verify that basic create, update and delete work with no routing key
        t = BasicModelNoRouting.create(k=2, v=3)
        t.update(v=4).save()
        f = BasicModelNoRouting.objects.filter(k=2).first()
        self.assertEqual(t, f)

        t.delete()
        self.assertEqual(BasicModelNoRouting.objects.count(), 0)
Пример #10
0
    def test_write_timeout(self):
        """
        Trigger and ensure write_timeouts are counted
        Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state.
        Attempt a write at cl.ALL and receive a WriteTimeout.
        """

        # Test write
        self.session.execute("INSERT INTO test (k, v) VALUES (1, 1)")

        # Assert read
        query = SimpleStatement("SELECT * FROM test WHERE k=1",
                                consistency_level=ConsistencyLevel.ALL)
        results = execute_until_pass(self.session, query)
        self.assertTrue(results)

        # Pause node so it shows as unreachable to coordinator
        get_node(1).pause()

        try:
            # Test write
            query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(WriteTimeout):
                self.session.execute(query, timeout=None)
            self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)

        finally:
            get_node(1).resume()
Пример #11
0
    def test_duplicate_metrics_per_cluster(self):
        """
        Test to validate that cluster metrics names can't overlap.
        @since 3.6.0
        @jira_ticket PYTHON-561
        @expected_result metric names should not be allowed to be same.

        @test_category metrics
        """
        cluster2 = Cluster(
            metrics_enabled=True,
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(retry_policy=FallthroughRetryPolicy())
            })

        cluster3 = Cluster(
            metrics_enabled=True,
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(retry_policy=FallthroughRetryPolicy())
            })

        # Ensure duplicate metric names are not allowed
        cluster2.metrics.set_stats_name("appcluster")
        cluster2.metrics.set_stats_name("appcluster")
        with self.assertRaises(ValueError):
            cluster3.metrics.set_stats_name("appcluster")
        cluster3.metrics.set_stats_name("devops")

        session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True)
        session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True)

        # Basic validation that naming metrics doesn't impact their segration or accuracy
        for i in range(10):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(
                self.ks_name),
                                    consistency_level=ConsistencyLevel.ALL)
            session2.execute(query)

        for i in range(5):
            query = SimpleStatement("SELECT * FROM {0}.{0}".format(
                self.ks_name),
                                    consistency_level=ConsistencyLevel.ALL)
            session3.execute(query)

        self.assertEqual(
            cluster2.metrics.get_stats()['request_timer']['count'], 10)
        self.assertEqual(
            cluster3.metrics.get_stats()['request_timer']['count'], 5)

        # Check scales to ensure they are appropriately named
        self.assertTrue("appcluster" in scales._Stats.stats.keys())
        self.assertTrue("devops" in scales._Stats.stats.keys())

        cluster2.shutdown()
        cluster3.shutdown()
Пример #12
0
    def test_speculative_execution(self):
        """
        Test to ensure that speculative execution honors LBP, and that they retry appropriately.

        This test will use various LBP, and ConstantSpeculativeExecutionPolicy settings and ensure the proper number of hosts are queried
        @since 3.7.0
        @jira_ticket PYTHON-218
        @expected_result speculative retries should honor max retries, idempotent state of queries, and underlying lbp.

        @test_category metadata
        """
        self.session.execute("""USE {0}""".format(self.keyspace_name))
        self.session.execute(
            """create or replace function timeout (arg int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS $$ long start = System.currentTimeMillis(); while(System.currentTimeMillis() - start < arg){} return arg; $$;"""
        )
        self.session.execute(
            """CREATE TABLE  d (k int PRIMARY KEY , i int);""")
        self.session.execute("""INSERT INTO d (k,i) VALUES (0, 1000);""")
        statement = SimpleStatement("""SELECT timeout(i) FROM d WHERE k =0""",
                                    is_idempotent=True)
        statement_non_idem = SimpleStatement(
            """SELECT timeout(i) FROM d WHERE k =0""", is_idempotent=False)

        # This LBP should repeat hosts up to around 30
        result = self.session.execute(statement,
                                      execution_profile='spec_ep_brr')
        self.assertEqual(21, len(result.response_future.attempted_hosts))

        # This LBP should keep host list to 3
        result = self.session.execute(statement,
                                      execution_profile='spec_ep_rr')
        self.assertEqual(3, len(result.response_future.attempted_hosts))
        # Spec_execution policy should limit retries to 1
        result = self.session.execute(statement,
                                      execution_profile='spec_ep_rr_lim')

        self.assertEqual(2, len(result.response_future.attempted_hosts))

        # Spec_execution policy should not be used if  the query is not idempotent
        result = self.session.execute(statement_non_idem,
                                      execution_profile='spec_ep_brr')
        self.assertEqual(1, len(result.response_future.attempted_hosts))

        # Default policy with non_idem query
        result = self.session.execute(statement_non_idem)
        self.assertEqual(1, len(result.response_future.attempted_hosts))

        # Should be able to run an idempotent query against default execution policy with no speculative_execution_policy
        result = self.session.execute(statement)
        self.assertEqual(1, len(result.response_future.attempted_hosts))

        # Test timeout with spec_ex
        with self.assertRaises(OperationTimedOut):
            result = self.session.execute(statement,
                                          execution_profile='spec_ep_rr',
                                          timeout=.5)
Пример #13
0
    def test_for_schema_disagreement_attribute(self):
        """
        Tests to ensure that schema disagreement is properly surfaced on the response future.

        Creates and destroys keyspaces/tables with various schema agreement timeouts set.
        First part runs cql create/drop cmds with schema agreement set in such away were it will be impossible for agreement to occur during timeout.
        It then validates that the correct value is set on the result.
        Second part ensures that when schema agreement occurs, that the result set reflects that appropriately

        @since 3.1.0
        @jira_ticket PYTHON-458
        @expected_result is_schema_agreed is set appropriately on response thefuture

        @test_category schema
        """
        # This should yield a schema disagreement
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          max_schema_agreement_wait=0.001)
        session = cluster.connect(wait_for_all_pools=True)

        rs = session.execute(
            "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}"
        )
        self.check_and_wait_for_agreement(session, rs, False)
        rs = session.execute(
            SimpleStatement(
                "CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)",
                consistency_level=ConsistencyLevel.ALL))
        self.check_and_wait_for_agreement(session, rs, False)
        rs = session.execute("DROP KEYSPACE test_schema_disagreement")
        self.check_and_wait_for_agreement(session, rs, False)
        cluster.shutdown()

        # These should have schema agreement
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          max_schema_agreement_wait=100)
        session = cluster.connect()
        rs = session.execute(
            "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}"
        )
        self.check_and_wait_for_agreement(session, rs, True)
        rs = session.execute(
            SimpleStatement(
                "CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)",
                consistency_level=ConsistencyLevel.ALL))
        self.check_and_wait_for_agreement(session, rs, True)
        rs = session.execute("DROP KEYSPACE test_schema_disagreement")
        self.check_and_wait_for_agreement(session, rs, True)
        cluster.shutdown()
Пример #14
0
    def test_unavailable_error_message(self):
        session = self.make_session()
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW,
                                                          None)
        message = QueryMessage(query=query,
                               consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query, 1)
        rf.send_request()

        result = Mock(spec=UnavailableErrorMessage, info={})
        rf._set_result(None, None, None, result)
        self.assertRaises(Exception, rf.result)
 def _query(self,
            session,
            keyspace,
            count,
            consistency_level=ConsistencyLevel.ONE):
     routing_key = struct.pack('>i', 0)
     for i in range(count):
         ss = SimpleStatement('SELECT * FROM cf WHERE k = 0',
                              consistency_level=consistency_level,
                              routing_key=routing_key)
         tries = 0
         while True:
             if tries > 100:
                 raise RuntimeError(
                     "Failed to execute query after 100 attempts: {0}".
                     format(ss))
             try:
                 self.coordinator_stats.add_coordinator(
                     session.execute_async(ss))
                 break
             except (OperationTimedOut, ReadTimeout):
                 ex_type, ex, tb = sys.exc_info()
                 log.warn("{0}: {1} Backtrace: {2}".format(
                     ex_type.__name__, ex, traceback.extract_tb(tb)))
                 del tb
                 tries += 1
                 time.sleep(1)
Пример #16
0
    def test_read_timeout_error_message(self):
        session = self.make_session()
        query = SimpleStatement("SELECT * FROM foo")
        query.retry_policy = Mock()
        query.retry_policy.on_read_timeout.return_value = (RetryPolicy.RETHROW,
                                                           None)
        message = QueryMessage(query=query,
                               consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query, 1)
        rf.send_request()

        result = Mock(spec=ReadTimeoutErrorMessage, info={})
        rf._set_result(None, None, None, result)

        self.assertRaises(Exception, rf.result)
Пример #17
0
    def test_custom_query_batching(self):
        """
        Test to validate that custom payloads work with batch queries

        creates a batch query and ensures that custom payloads are passed to C*. A custom
        query provider is used with C* so we can validate that same custom payloads are sent back
        with the results


        @since 2.6
        @jira_ticket PYTHON-280
        @expected_result valid custom payloads should be sent and received

        @test_category queries:custom_payload
        """

        # Construct Batch Statement
        batch = BatchStatement(BatchType.LOGGED)
        for i in range(10):
            batch.add(
                SimpleStatement(
                    "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)"), (i, i))

        # Validate that various types of custom payloads are sent and received okay
        self.validate_various_custom_payloads(statement=batch)
Пример #18
0
    def test_trace_cl(self):
        """
        Test to ensure that CL is set correctly honored when executing trace queries.

        @since 3.3
        @jira_ticket PYTHON-435
        @expected_result Consistency Levels set on get_query_trace should be honored
        """
        # Execute a query
        query = "SELECT * FROM system.local"
        statement = SimpleStatement(query)
        response_future = self.session.execute_async(statement, trace=True)
        response_future.result()
        with self.assertRaises(Unavailable):
            response_future.get_query_trace(query_cl=ConsistencyLevel.THREE)
        # Try again with a smattering of other CL's
        self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.TWO).trace_id)
        response_future = self.session.execute_async(statement, trace=True)
        response_future.result()
        self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.ONE).trace_id)
        response_future = self.session.execute_async(statement, trace=True)
        response_future.result()
        with self.assertRaises(InvalidRequest):
            self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.ANY).trace_id)
        self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.QUORUM).trace_id)
Пример #19
0
    def delete(self, username=None):
        """Delete a collection and the associated row in the tree entry table"""
        from indigo.models import Notification
        if self.is_root:
            return
        cfg = get_config(None)
        session = connection.get_session()
        keyspace = cfg.get('KEYSPACE', 'indigo')
        session.set_keyspace(keyspace)
        query = SimpleStatement(
            """DELETE FROM tree_entry WHERE container=%s""")
        session.execute(query, (self.path, ))
        # Get the row that describe the collection as a child of its parent
        child = TreeEntry.objects.filter(container=self.container,
                                         name=u"{}/".format(
                                             self.name)).first()
        if child:
            child.delete()

        session = get_graph_session()
        session.execute_graph("""v_coll = {}.drop();
                                 """.format(gq_get_vertex_collection(self)))

        state = self.mqtt_get_state()
        payload = self.mqtt_payload(state, {})
        Notification.delete_collection(username, self.path, payload)
        self.reset()
Пример #20
0
    def test_speculative_and_timeout(self):
        """
        Test to ensure the timeout is honored when using speculative execution
        @since 3.10
        @jira_ticket PYTHON-750
        @expected_result speculative retries be schedule every fixed period, during the maximum
        period of the timeout.

        @test_category metadata
        """
        # We mock this so no messages are sent, otherwise a reponse might arrive
        # and we would not know how many hosts we queried
        with patch.object(Connection, "send_msg",
                          return_value=100) as mocked_send_msg:

            statement = SimpleStatement(
                "INSERT INTO test3rf.test (k, v) VALUES (0, 1);",
                is_idempotent=True)

            # An OperationTimedOut is placed here in response_future,
            # that's why we can't call session.execute,which would raise it, but
            # we have to directly wait for the event
            response_future = self.session.execute_async(
                statement, execution_profile='spec_ep_brr_lim', timeout=2.2)
            response_future._event.wait(4)
            self.assertIsInstance(response_future._final_exception,
                                  OperationTimedOut)

            # This is because 2.2 / 0.4 + 1 = 6
            self.assertEqual(len(response_future.attempted_hosts), 6)
Пример #21
0
    def test_wide_table(self):
        table = 'wide_table'
        table_width = 330
        session = self.make_session_and_keyspace()
        table_declaration = 'CREATE TABLE %s (key INT PRIMARY KEY, '
        table_declaration += ' INT, '.join(
            create_column_name(i) for i in range(table_width))
        table_declaration += ' INT)'
        session.execute(table_declaration % table)

        # Write
        insert_statement = 'INSERT INTO %s (key, '
        insert_statement += ', '.join(
            create_column_name(i) for i in range(table_width))
        insert_statement += ') VALUES (%s, '
        insert_statement += ', '.join(str(i) for i in range(table_width))
        insert_statement += ')'
        insert_statement = insert_statement % (table, 0)

        session.execute(
            SimpleStatement(insert_statement,
                            consistency_level=ConsistencyLevel.QUORUM))

        # Read
        result = session.execute('SELECT * FROM %s WHERE key=%s' % (table, 0))

        # Verify
        for row in result:
            for i in range(table_width):
                self.assertEqual(row[create_column_name(i)], i)

        session.cluster.shutdown()
Пример #22
0
    def test_recreates(self):
        """
        Basic test for repeated schema creation and use, using many different keyspaces
        """

        session = self.session

        for i in range(2):
            for keyspace_number in range(5):
                keyspace = "ks_{0}".format(keyspace_number)

                if keyspace in self.cluster.metadata.keyspaces.keys():
                    drop = "DROP KEYSPACE {0}".format(keyspace)
                    log.debug(drop)
                    execute_until_pass(session, drop)

                create = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 3}}".format(
                    keyspace)
                log.debug(create)
                execute_until_pass(session, create)

                create = "CREATE TABLE {0}.cf (k int PRIMARY KEY, i int)".format(
                    keyspace)
                log.debug(create)
                execute_until_pass(session, create)

                use = "USE {0}".format(keyspace)
                log.debug(use)
                execute_until_pass(session, use)

                insert = "INSERT INTO cf (k, i) VALUES (0, 0)"
                log.debug(insert)
                ss = SimpleStatement(insert,
                                     consistency_level=ConsistencyLevel.QUORUM)
                execute_until_pass(session, ss)
    def test_default_serial_consistency_level(self, *_):
        """
        Make sure default_serial_consistency_level passes through to a query message.
        Also make sure Statement.serial_consistency_level overrides the default.

        PR #510
        """
        c = Cluster(protocol_version=4)
        s = Session(c, [Host("127.0.0.1", SimpleConvictionPolicy)])

        # default is None
        default_profile = c.profile_manager.default
        self.assertIsNone(default_profile.serial_consistency_level)

        sentinel = 1001
        for cl in (None, ConsistencyLevel.LOCAL_SERIAL,
                   ConsistencyLevel.SERIAL, sentinel):
            default_profile.serial_consistency_level = cl

            # default is passed through
            f = s.execute_async(query='')
            self.assertEqual(f.message.serial_consistency_level, cl)

            # any non-None statement setting takes precedence
            for cl_override in (ConsistencyLevel.LOCAL_SERIAL,
                                ConsistencyLevel.SERIAL):
                f = s.execute_async(
                    SimpleStatement(query_string='',
                                    serial_consistency_level=cl_override))
                self.assertEqual(default_profile.serial_consistency_level, cl)
                self.assertEqual(f.message.serial_consistency_level,
                                 cl_override)
    def test_statement_params_override_profile(self):
        non_default_profile = ExecutionProfile(RoundRobinPolicy(),
                                               *[object() for _ in range(3)])
        cluster = Cluster(
            execution_profiles={'non-default': non_default_profile})
        session = Session(cluster,
                          hosts=[Host("127.0.0.1", SimpleConvictionPolicy)])

        rf = session.execute_async("query", execution_profile='non-default')

        ss = SimpleStatement("query",
                             retry_policy=DowngradingConsistencyRetryPolicy(),
                             consistency_level=ConsistencyLevel.ALL,
                             serial_consistency_level=ConsistencyLevel.SERIAL)
        my_timeout = 1.1234

        self.assertNotEqual(ss.retry_policy.__class__,
                            rf._load_balancer.__class__)
        self.assertNotEqual(ss.consistency_level, rf.message.consistency_level)
        self.assertNotEqual(ss._serial_consistency_level,
                            rf.message.serial_consistency_level)
        self.assertNotEqual(my_timeout, rf.timeout)

        rf = session.execute_async(ss,
                                   timeout=my_timeout,
                                   execution_profile='non-default')
        expected_profile = ExecutionProfile(
            non_default_profile.load_balancing_policy, ss.retry_policy,
            ss.consistency_level, ss._serial_consistency_level, my_timeout,
            non_default_profile.row_factory)
        self._verify_response_future_profile(rf, expected_profile)
Пример #25
0
    def test_connection_error(self):
        """
        Trigger and ensure connection_errors are counted
        Stop all node with the driver knowing about the "DOWN" states.
        """
        # Test writes
        for i in range(0, 100):
            self.session.execute_async(
                "INSERT INTO test (k, v) VALUES ({0}, {1})".format(i, i))

        # Stop the cluster
        get_cluster().stop(wait=True, gently=False)

        try:
            # Ensure the nodes are actually down
            query = SimpleStatement("SELECT * FROM test",
                                    consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(NoHostAvailable):
                self.session.execute(query)
        finally:
            get_cluster().start(wait_for_binary_proto=True,
                                wait_other_notice=True)
            # Give some time for the cluster to come back up, for the next test
            time.sleep(5)

        self.assertGreater(self.cluster.metrics.stats.connection_errors, 0)
Пример #26
0
 def delete_id(cls, uuid):
     """Delete all blobs for the specified uuid"""
     cfg = get_config(None)
     session = connection.get_session()
     keyspace = cfg.get('KEYSPACE', 'indigo')
     session.set_keyspace(keyspace)
     query = SimpleStatement("""DELETE FROM data_object WHERE uuid=%s""")
     session.execute(query, (uuid,))
Пример #27
0
    def test_simple_statements(self):
        batch = BatchStatement(BatchType.LOGGED)
        for i in range(10):
            batch.add(SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (%s, %s)"), (i, i))

        self.session.execute(batch)
        self.session.execute_async(batch).result()
        self.confirm_results()
Пример #28
0
    def test_simple_statement(self):
        """
        Highlight the format of printing SimpleStatements
        """

        ss = SimpleStatement('SELECT * FROM test3rf.test', consistency_level=ConsistencyLevel.ONE)
        self.assertEqual(str(ss),
                         '<SimpleStatement query="SELECT * FROM test3rf.test", consistency=ONE>')
Пример #29
0
 def test_bad_consistency_level(self):
     statement = SimpleStatement("foo")
     self.assertRaises(ValueError, setattr, statement,
                       'serial_consistency_level', ConsistencyLevel.ONE)
     self.assertRaises(ValueError,
                       SimpleStatement,
                       'foo',
                       serial_consistency_level=ConsistencyLevel.ONE)
Пример #30
0
    def test_wide_batch_rows(self):
        """
        Test for inserting wide rows with batching

        test_wide_batch_rows tests inserting a wide row of data using batching. It will then attempt to query
        that data and ensure that all of it has been inserted appropriately.

        @expected_result all items should be inserted, and verified.

        @test_category queries:batch
        """

        # Table Creation
        table = 'wide_batch_rows'
        session = self.make_session_and_keyspace()
        session.execute('CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))' %
                        table)

        # Run batch insert
        statement = 'BEGIN BATCH '
        to_insert = 2000
        for i in range(to_insert):
            statement += 'INSERT INTO %s (k, i) VALUES (%s, %s) ' % (table, 0,
                                                                     i)
        statement += 'APPLY BATCH'
        statement = SimpleStatement(statement,
                                    consistency_level=ConsistencyLevel.QUORUM)

        # Execute insert with larger timeout, since it's a wide row
        try:
            session.execute(statement, timeout=30.0)

        except OperationTimedOut:
            #If we timeout on insertion that's bad but it could be just slow underlying c*
            #Attempt to validate anyway, we will fail if we don't get the right data back.
            ex_type, ex, tb = sys.exc_info()
            log.warn(
                "Batch wide row insertion timed out, this may require additional investigation"
            )
            log.warn("{0}: {1} Backtrace: {2}".format(
                ex_type.__name__, ex, traceback.extract_tb(tb)))
            del tb

        # Verify
        results = session.execute('SELECT i FROM %s WHERE k=%s' % (table, 0))
        lastvalue = 0
        for j, row in enumerate(results):
            lastValue = row['i']
            self.assertEqual(lastValue, j)

        #check the last value make sure it's what we expect
        index_value = to_insert - 1
        self.assertEqual(
            lastValue, index_value,
            "Verification failed only found {0} inserted we were expecting {1}"
            .format(j, index_value))

        session.cluster.shutdown()