def test_null_types(self):
        """
        Test to validate that the numpy protocol handler can deal with null values.
        @since 3.3.0
         - updated 3.6.0: now numeric types used masked array
        @jira_ticket PYTHON-550
        @expected_result Numpy can handle non mapped types' null values.

        @test_category data_types:serialization
        """
        s = self.session
        s.client_protocol_handler = NumpyProtocolHandler

        table = "%s.%s" % (self.keyspace_name, self.function_table_name)
        create_table_with_all_types(table, s, 10)

        begin_unset = max(
            s.execute('select primkey from %s' % (table, ))[0]['primkey']) + 1
        keys_null = range(begin_unset, begin_unset + 10)

        # scatter some emptry rows in here
        insert = "insert into %s (primkey) values (%%s)" % (table, )
        execute_concurrent_with_args(s, insert, ((k, ) for k in keys_null))

        result = s.execute("select * from %s" % (table, ))[0]

        from numpy.ma import masked, MaskedArray
        result_keys = result.pop('primkey')
        mapped_index = [v[1] for v in sorted(zip(result_keys, count()))]

        had_masked = had_none = False
        for col_array in result.values():
            # these have to be different branches (as opposed to comparing against an 'unset value')
            # because None and `masked` have different identity and equals semantics
            if isinstance(col_array, MaskedArray):
                had_masked = True
                [
                    self.assertIsNot(col_array[i], masked)
                    for i in mapped_index[:begin_unset]
                ]
                [
                    self.assertIs(col_array[i], masked)
                    for i in mapped_index[begin_unset:]
                ]
            else:
                had_none = True
                [
                    self.assertIsNotNone(col_array[i])
                    for i in mapped_index[:begin_unset]
                ]
                [
                    self.assertIsNone(col_array[i])
                    for i in mapped_index[begin_unset:]
                ]
        self.assertTrue(had_masked)
        self.assertTrue(had_none)
Esempio n. 2
0
    def _insert(self, session, keyspace, count=12,
                consistency_level=ConsistencyLevel.ONE):
        session.execute('USE %s' % keyspace)
        ss = SimpleStatement('INSERT INTO cf(k, i) VALUES (0, 0)', consistency_level=consistency_level)

        tries = 0
        while tries < 100:
            try:
                execute_concurrent_with_args(session, ss, [None] * count)
                return
            except (OperationTimedOut, WriteTimeout, WriteFailure):
                ex_type, ex, tb = sys.exc_info()
                log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                del tb
                tries += 1

        raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(ss))
    def execute_concurrent_args_helper(self, session, query, params):
        tries = 0
        while tries < 100:
            try:
                return execute_concurrent_with_args(session, query, params, concurrency=50)
            except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure):
                ex_type, ex, tb = sys.exc_info()
                log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                del tb
                tries += 1

        raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query))
Esempio n. 4
0
    def test_concurrent_with_paging(self):
        statements_and_params = zip(cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]),
                                    [(i, ) for i in range(100)])
        execute_concurrent(self.session, list(statements_and_params))

        prepared = self.session.prepare("SELECT * FROM test3rf.test")

        for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000):
            self.session.default_fetch_size = fetch_size
            results = execute_concurrent_with_args(self.session, prepared, [None] * 10)
            self.assertEqual(10, len(results))
            for (success, result) in results:
                self.assertTrue(success)
                self.assertEqual(100, len(list(result)))
Esempio n. 5
0
    def test_recursion_limited(self):
        """
        Verify that recursion is controlled when raise_on_first_error=False and something is wrong with the query.

        PYTHON-585
        """
        max_recursion = sys.getrecursionlimit()
        s = Session(Cluster(), [Host("127.0.0.1", SimpleConvictionPolicy)])
        self.assertRaises(TypeError, execute_concurrent_with_args, s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=True)

        results = execute_concurrent_with_args(s, "doesn't matter", [('param',)] * max_recursion, raise_on_first_error=False)  # previously
        self.assertEqual(len(results), max_recursion)
        for r in results:
            self.assertFalse(r[0])
            self.assertIsInstance(r[1], TypeError)
    def test_named_table_with_mv(self):
        """
        Test NamedTable access to materialized views

        Creates some materialized views using Traditional CQL. Then ensures we can access those materialized view using
        the NamedKeyspace, and NamedTable interfaces. Tests basic filtering as well.

        @since 3.0.0
        @jira_ticket PYTHON-406
        @expected_result Named Tables should have access to materialized views

        @test_category materialized_view
        """
        ks = models.DEFAULT_KEYSPACE
        self.session.execute(
            "DROP MATERIALIZED VIEW IF EXISTS {0}.alltimehigh".format(ks))
        self.session.execute(
            "DROP MATERIALIZED VIEW IF EXISTS {0}.monthlyhigh".format(ks))
        self.session.execute("DROP TABLE IF EXISTS {0}.scores".format(ks))
        create_table = """CREATE TABLE {0}.scores(
                        user TEXT,
                        game TEXT,
                        year INT,
                        month INT,
                        day INT,
                        score INT,
                        PRIMARY KEY (user, game, year, month, day)
                        )""".format(ks)

        self.session.execute(create_table)
        create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
                        SELECT game, year, month, score, user, day FROM {0}.scores
                        WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
                        PRIMARY KEY ((game, year, month), score, user, day)
                        WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(
            ks)

        self.session.execute(create_mv)

        create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS
                        SELECT * FROM {0}.scores
                        WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL
                        PRIMARY KEY (game, score, user, year, month, day)
                        WITH CLUSTERING ORDER BY (score DESC)""".format(ks)

        self.session.execute(create_mv_alltime)

        # Populate the base table with data
        prepared_insert = self.session.prepare(
            """INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES  (?, ?, ? ,? ,?, ?)"""
            .format(ks))
        parameters = (('pcmanus', 'Coup', 2015, 5, 1,
                       4000), ('jbellis', 'Coup', 2015, 5, 3,
                               1750), ('yukim', 'Coup', 2015, 5, 3, 2250),
                      ('tjake', 'Coup', 2015, 5, 3,
                       500), ('iamaleksey', 'Coup', 2015, 6, 1,
                              2500), ('tjake', 'Coup', 2015, 6, 2, 1000),
                      ('pcmanus', 'Coup', 2015, 6, 2,
                       2000), ('jmckenzie', 'Coup', 2015, 6, 9,
                               2700), ('jbellis', 'Coup', 2015, 6, 20, 3500),
                      ('jbellis', 'Checkers', 2015, 6, 20,
                       1200), ('jbellis', 'Chess', 2015, 6, 21,
                               3500), ('pcmanus', 'Chess', 2015, 1, 25, 3200))
        prepared_insert.consistency_level = ConsistencyLevel.ALL
        execute_concurrent_with_args(self.session, prepared_insert, parameters)

        # Attempt to query the data using Named Table interface
        # Also test filtering on mv's
        key_space = NamedKeyspace(ks)
        mv_monthly = key_space.table("monthlyhigh")
        mv_all_time = key_space.table("alltimehigh")
        self.assertTrue(
            self.check_table_size("scores", key_space, len(parameters)))
        self.assertTrue(
            self.check_table_size("monthlyhigh", key_space, len(parameters)))
        self.assertTrue(
            self.check_table_size("alltimehigh", key_space, len(parameters)))

        filtered_mv_monthly_objects = mv_monthly.objects.filter(game='Chess',
                                                                year=2015,
                                                                month=6)
        self.assertEqual(len(filtered_mv_monthly_objects), 1)
        self.assertEqual(filtered_mv_monthly_objects[0]['score'], 3500)
        self.assertEqual(filtered_mv_monthly_objects[0]['user'], 'jbellis')
        filtered_mv_alltime_objects = mv_all_time.objects.filter(game='Chess')
        self.assertEqual(len(filtered_mv_alltime_objects), 2)
        self.assertEqual(filtered_mv_alltime_objects[0]['score'], 3500)