Example #1
0
def test_wrong_set_order(cql, table_fsi):
    i = unique_key_int()
    insert = cql.prepare(f"INSERT INTO {table_fsi} (k) VALUES (?)")
    lookup = cql.prepare(f"SELECT * FROM {table_fsi} WHERE k = ?")
    cql.execute(insert, [tuple([i + 1, i, i + 2])])
    assert len(list(cql.execute(lookup, [tuple([i, i + 1, i + 2])]))) == 1
    assert len(list(cql.execute(lookup, [tuple([i + 1, i, i + 2])]))) == 1
Example #2
0
def test_wrong_set_order_in_nested(cql, table1):
    k = unique_key_int()

    # When inserting or selecting with a frozen<map<set<int>, int>> key
    # where the value is specified inline in CQL, the order of the set
    # does not matter, as expected:
    def s(*args):
        return ",".join([str(i) for i in args])

    cql.execute("INSERT INTO " + table1 + " (k) VALUES ({{" +
                s(k, k + 1, k + 2) + "}: 1})")
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k, k + 1, k + 2) + "}: 1}"))) == 1
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k + 1, k, k + 2) + "}: 1}"))) == 1
    k = k + 1  # advance k so the next test will write to a different item
    cql.execute("INSERT INTO " + table1 + " (k) VALUES ({{" +
                s(k + 1, k, k + 2) + "}: 1})")
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k, k + 1, k + 2) + "}: 1}"))) == 1
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k + 1, k, k + 2) + "}: 1}"))) == 1
    k = k + 1

    # Try the same with prepared statements. Here we can trigger issue #7856.
    insert = cql.prepare(f"INSERT INTO {table1} (k) VALUES (?)")
    lookup = cql.prepare(f"SELECT * FROM {table1} WHERE k = ?")
    cql.execute(insert, [{tuple([k, k + 1, k + 2]): 1}])
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k, k + 1, k + 2) + "}: 1}"))) == 1
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k + 1, k, k + 2) + "}: 1}"))) == 1
    assert len(list(cql.execute(lookup, [{tuple([k, k + 1, k + 2]): 1}]))) == 1
    # This lookup by wrong order triggers issue #7856:
    assert len(list(cql.execute(lookup, [{tuple([k + 1, k, k + 2]): 1}]))) == 1
    k = k + 1
    # This insert by wrong order triggers issue #7856 in the lookups which follow.
    cql.execute(insert, [{tuple([k + 1, k, k + 2]): 1}])
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k, k + 1, k + 2) + "}: 1}"))) == 1
    assert len(
        list(
            cql.execute("SELECT * FROM " + table1 + " WHERE k = {{" +
                        s(k + 1, k, k + 2) + "}: 1}"))) == 1
    assert len(list(cql.execute(lookup, [{tuple([k, k + 1, k + 2]): 1}]))) == 1
    assert len(list(cql.execute(lookup, [{tuple([k + 1, k, k + 2]): 1}]))) == 1
Example #3
0
def test_select_default_order(cql, table_int_desc):
    k = unique_key_int()
    stmt = cql.prepare(f'INSERT INTO {table_int_desc} (k, c) VALUES ({k}, ?)')
    numbers = range(5)
    for i in numbers:
        cql.execute(stmt, [i])
    # In a table created with descending sort order, the default select
    # order is descending:
    rows = [(i, ) for i in numbers]
    reverse_rows = [(i, ) for i in reversed(numbers)]
    assert reverse_rows == list(
        cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k}'))
    # Confirm that when specifying the order explicitly, both work:
    assert rows == list(
        cql.execute(
            f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c ASC'))
    assert reverse_rows == list(
        cql.execute(
            f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c DESC'))
    # Repeat the same three assertions as above, adding a "limit" of N=3:
    N = 3
    rows = rows[0:N]
    reverse_rows = reverse_rows[0:N]
    assert reverse_rows == list(
        cql.execute(f'SELECT c FROM {table_int_desc} WHERE k = {k} LIMIT {N}'))
    assert rows == list(
        cql.execute(
            f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c ASC LIMIT {N}'
        ))
    assert reverse_rows == list(
        cql.execute(
            f'SELECT c FROM {table_int_desc} WHERE k = {k} ORDER BY c DESC LIMIT {N}'
        ))
Example #4
0
def test_fromjson_int_overflow_unprepared(cql, table1):
    p = unique_key_int()
    # The highest legal int is 2147483647 (2^31-1).2147483648 is not a legal
    # int, so trying to insert it should result in an error - not silent
    # wraparound to -2147483648 as happened in Scylla.
    with pytest.raises(FunctionFailure):
        cql.execute(f"INSERT INTO {table1} (p, v) VALUES ({p}, fromJson('2147483648'))")
Example #5
0
def test_validation_blob_as_int_len(cql, test_keyspace):
    types = [
        ('i', 'int', 4),
        ('b', 'bigint', 8),
        ('s', 'smallint', 2),
        ('t', 'tinyint', 1),
    ]
    types_def = ','.join([f'{x[0]} {x[1]}' for x in types])
    with new_test_table(cql, test_keyspace,
                        f'k int primary key, {types_def}') as table:
        k = unique_key_int()
        for var, typ, length in types:
            # Check that a blob with exactly length bytes is fine, one with one
            # less or one more is rejected as an invalid request:
            cql.execute(
                f"INSERT INTO {table} (k, {var}) VALUES ({k}, blobAs{typ}(0x{'00'*length}))"
            )
            assert 0 == getattr(
                cql.execute(f"SELECT {var} FROM {table} WHERE k = {k}").one(),
                var)
            with pytest.raises(InvalidRequest, match='is not a valid binary'):
                cql.execute(
                    f"INSERT INTO {table} (k, {var}) VALUES ({k}, blobAs{typ}(0x{'00'*(length+1)}))"
                )
            if length - 1 != 0:
                with pytest.raises(InvalidRequest,
                                   match='is not a valid binary'):
                    cql.execute(
                        f"INSERT INTO {table} (k, {var}) VALUES ({k}, blobAs{typ}(0x{'00'*(length-1)}))"
                    )
Example #6
0
def test_prepared_statements(scylla_only, cql, table1):
    table = table1
    key = unique_key_int()
    prep = cql.prepare(
        f"INSERT INTO {table} (p,c,v) VALUES ({key},6,7) USING TIMEOUT ?")
    with pytest.raises(WriteTimeout):
        cql.execute(prep, (Duration(nanoseconds=0), ))
    cql.execute(prep, (Duration(nanoseconds=10**15), ))
    result = list(cql.execute(f"SELECT * FROM {table} WHERE p = {key}"))
    assert len(result) == 1 and (result[0].c, result[0].v) == (6, 7)
    prep = cql.prepare(f"SELECT * FROM {table} USING TIMEOUT ?")
    with pytest.raises(ReadTimeout):
        cql.execute(prep, (Duration(nanoseconds=0), ))
    cql.execute(prep, (Duration(nanoseconds=10**15), ))
    prep = cql.prepare(
        f"UPDATE {table} USING TIMEOUT ? AND TIMESTAMP ? SET v = ? WHERE p = {key} and c = 1"
    )
    with pytest.raises(WriteTimeout):
        cql.execute(prep, (Duration(nanoseconds=0), 3, 42))
    cql.execute(prep, (Duration(nanoseconds=10**15), 3, 42))
    prep_named = cql.prepare(
        f"UPDATE {table} USING TIMEOUT :timeout AND TIMESTAMP :ts SET v = :v WHERE p = {key} and c = 1"
    )
    # Timeout cannot be left unbound
    with pytest.raises(InvalidRequest):
        cql.execute(prep_named, {'timestamp': 42, 'v': 3})
    cql.execute(prep_named, {
        'timestamp': 42,
        'v': 3,
        'timeout': Duration(nanoseconds=10**15)
    })
    result = list(
        cql.execute(f"SELECT * FROM {table} WHERE p = {key} AND c = 1"))
    assert len(result) == 1 and (result[0].c, result[0].v) == (1, 3)
Example #7
0
def test_per_query_timeout_large_enough(scylla_only, cql, table1):
    table = table1
    k1, k2, k3 = unique_key_int(), unique_key_int(), unique_key_int()
    key = unique_key_int()
    cql.execute(
        f"INSERT INTO {table} (p,c,v) VALUES ({key},1,1) USING TIMEOUT 60m")
    cql.execute(
        f"UPDATE {table} USING TIMEOUT 48h SET v = 5 WHERE p = {key} AND c = 1"
    )
    res = list(
        cql.execute(
            f"SELECT * FROM {table} WHERE p IN ({k1},{k2},{k3},{key}) USING TIMEOUT 24h"
        ))
    assert set(res) == set(
        cql.execute(
            f"SELECT * FROM {table} WHERE p IN ({k1},{k2},{k3},{key})"))
Example #8
0
def test_type_time_from_int_prepared(cql, table1):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, t) VALUES (?, ?)")
    cql.execute(stmt, [p, 123])
    assert list(cql.execute(f"SELECT t from {table1} where p = {p}")) == [
        (Time(123), )
    ]
Example #9
0
def test_fromjson_int_scientific_notation_prepared(cql, table1, cassandra_bug):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, bigv) VALUES (?, fromJson(?))")
    cql.execute(stmt, [p, '1.23456789E+9'])
    assert list(cql.execute(f"SELECT p, bigv from {table1} where p = {p}")) == [(p, 1234567890)]
    stmt = cql.prepare(f"INSERT INTO {table1} (p, v) VALUES (?, fromJson(?))")
    cql.execute(stmt, [p, '1e6'])
    assert list(cql.execute(f"SELECT p, v from {table1} where p = {p}")) == [(p, 1000000)]
Example #10
0
def test_fromjson_wrong_type_prepared(cql, table1):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, v) VALUES (?, fromJson(?))")
    with pytest.raises(FunctionFailure):
        cql.execute(stmt, [p, '"dog"'])
    stmt = cql.prepare(f"INSERT INTO {table1} (p, a) VALUES (?, fromJson(?))")
    with pytest.raises(FunctionFailure):
        cql.execute(stmt, [p, '3'])
Example #11
0
def test_basic_default_ttl(cql, table_ttl_1):
    p1 = unique_key_int()
    p2 = unique_key_int()
    cql.execute(
        f'INSERT INTO {table_ttl_1} (p, v) VALUES ({p1}, 1) USING TTL 1000')
    cql.execute(f'INSERT INTO {table_ttl_1} (p, v) VALUES ({p2}, 1)')
    # p2 item should expire in *less* than one second (it will expire
    # in the next whole second),
    start = time.time()
    while len(list(cql.execute(f'SELECT * from {table_ttl_1} where p={p2}'))):
        assert time.time() < start + 2
        time.sleep(0.1)
    # p1 should not have expired yet. By the way, its current ttl(v) would
    # normally be exactly 999 now, but theoretically could be a bit lower in
    # case of delays in the test.
    assert len(list(
        cql.execute(f'SELECT *from {table_ttl_1} where p={p1}'))) == 1
Example #12
0
def test_type_time_from_string_unprepared(cql, table1):
    p = unique_key_int()
    for t in [
            "08:12:54", "08:12:54.123", "08:12:54.123456", "08:12:54.123456789"
    ]:
        cql.execute(f"INSERT INTO {table1} (p, t) VALUES ({p}, '{t}')")
        assert list(cql.execute(f"SELECT t from {table1} where p = {p}")) == [
            (Time(t), )
        ]
Example #13
0
def test_tojson_double(cql, table1):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, d) VALUES (?, ?)")
    cql.execute(stmt, [p, 123.456])
    assert list(cql.execute(f"SELECT d, toJson(d) from {table1} where p = {p}")) == [(123.456, "123.456")]
    # While 123.456 above worked, in issue #7972 we note that 123123.123123
    # does not work.
    cql.execute(stmt, [p, 123123.123123])
    assert list(cql.execute(f"SELECT d, toJson(d) from {table1} where p = {p}")) == [(123123.123123, "123123.123123")]
Example #14
0
def test_select_json_null_component(cql, table1, type1):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, tup) VALUES ({p}, ?)")
    cql.execute(stmt, [('hello', None)])
    assert list(cql.execute(f"SELECT JSON tup from {table1} where p = {p}")) == [('{"tup": ["hello", null]}',)]

    stmt = cql.prepare(f"INSERT INTO {table1} (p, t1) VALUES ({p}, ?)")
    cql.execute(stmt, [('hello', None)])
    assert list(cql.execute(f"SELECT JSON t1 from {table1} where p = {p}")) == [('{"t1": {"t": "hello", "b": null}}',)]
Example #15
0
def test_fromjson_boolean_string_prepared(cql, table1):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, b) VALUES (?, fromJson(?))")
    cql.execute(stmt, [p, '"true"'])
    assert list(cql.execute(f"SELECT p, b from {table1} where p = {p}")) == [(p, True)]
    cql.execute(stmt, [p, '"false"'])
    assert list(cql.execute(f"SELECT p, b from {table1} where p = {p}")) == [(p, False)]
    cql.execute(stmt, [p, '"fALSe"'])
    assert list(cql.execute(f"SELECT p, b from {table1} where p = {p}")) == [(p, False)]
Example #16
0
def test_tojson_decimal_high_mantissa2(cql, table1):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, dec) VALUES ({p}, ?)")
    # Although 1e1000 is higher than a normal double, it should be fine for
    # Scylla's "decimal" type:
    high = '1e1000'
    cql.execute(stmt, [Decimal(high)])
    result = cql.execute(f"SELECT toJson(dec) from {table1} where p = {p}").one()[0]
    # We expect the "result" JSON string to be 1E+1000 - not 100000000....000000.
    assert len(result) < 10
Example #17
0
def test_fromjson_bigint_nonoverflow_scientific(cql, table1, cassandra_bug):
    p = unique_key_int()
    stmt = cql.prepare(f"INSERT INTO {table1} (p, bigv) VALUES (?, fromJson(?))")
    # 1152921504606846975 is 2^60-1, more than 2^53 but less than what a
    # bigint can store (2^63-1). We do not use 2^63-1 in this test because
    # an inaccuracy there in the up direction can lead us to overflowing
    # the signed integer and UBSAN errors - while we want to detect the
    # inaccuracy cleanly, here.
    cql.execute(stmt, [p, '115292150460684697.5e1'])
    assert list(cql.execute(f"SELECT bigv from {table1} where p = {p}")) == [(1152921504606846975,)]
Example #18
0
def test_default_ttl_0_override(cql, table_ttl_100):
    p = unique_key_int()
    cql.execute(
        f'INSERT INTO {table_ttl_100} (p, v) VALUES ({p}, 1) USING TTL 0')
    # We can immediately check that this item's TTL is "null", meaning it
    # will never expire. There's no need to have any sleeps.
    assert list(
        cql.execute(f'SELECT ttl(v) from {table_ttl_100} where p={p}')) == [
            (None, )
        ]
Example #19
0
def test_lwt_missing_row_with_static(cql, table1):
    p = unique_key_int()
    # Insert into partition p just the static column - and no clustering rows.
    cql.execute(f'INSERT INTO {table1}(p, s) values ({p}, 1)')
    # Now, do an update with WHERE p={p} AND c=1. This clustering row does
    # *not* exist, so we expect to see r=null - and s=1 from before.
    r = list(
        cql.execute(
            f'UPDATE {table1} SET s=2,r=1 WHERE p={p} AND c=1 IF s=1 and r=null'
        ))
    assert len(r) == 1
    assert r[0].applied == True
    # At this point we should have one row, for c=1
    assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == [(p, 1,
                                                                         2, 1)]
Example #20
0
def test_missing_row_with_static(cql, table1):
    p = unique_key_int()
    # Insert into partition p just static column and once clustering row c=2
    cql.execute(f'INSERT INTO {table1}(p, s, c, r) values ({p}, 1, 2, 3)')
    # If we SELECT row c=2, we get it and the static column:
    assert list(
        cql.execute(
            f'SELECT p, s, c, r FROM {table1} WHERE p={p} AND c=2')) == [
                (p, 1, 2, 3)
            ]
    # If we SELECT row c=1 (which doesn't exist), we get nothing - not even
    # the static column
    assert list(
        cql.execute(
            f'SELECT p, s, c, r FROM {table1} WHERE p={p} AND c=1')) == []
Example #21
0
def test_wrong_set_order_in_nested_2(cql, table1):
    k = unique_key_int()
    insert = cql.prepare(f"INSERT INTO {table1} (k) VALUES (?)")
    # Insert map with frozen-set key in "wrong" order:
    cql.execute(insert, [{tuple([k + 1, k, k + 2]): 1}])
    # This is an inefficient scan of the entire table, it can see more than
    # the item we just inserted, but it doesn't matter.
    for row in cql.execute(f"SELECT * FROM {table1}"):
        assert isinstance(row.k, OrderedMapSerializedKey)
        # The Python driver implements k.items() inefficiently - it has
        # a list of keys as SortedSet objects, and converts them back to
        # seralized strings to look them up in k._index. But this
        # conversion re-sorts the set and then it can't be found because
        # of issue #7856, so k.items() will throw KeyError when iterated.
        list(row.k.items())
Example #22
0
def test_select_json_function_call(cql, table1):
    p = unique_key_int()
    cql.execute(f"INSERT INTO {table1} (p, v) VALUES ({p}, 17) USING TIMESTAMP 1234")
    input_and_output = {
        'v':                       '{"v": 17}',
        'count(*)':                '{"count": 1}',
        'ttl(v)':                  '{"ttl(v)": null}',
        'writetime(v)':            '{"writetime(v)": 1234}',
        'intAsBlob(v)':            '{"system.intasblob(v)": "0x00000011"}',
        'blobasInt(intAsBlob(v))': '{"system.blobasint(system.intasblob(v))": 17}',
        'tojson(v)':               '{"system.tojson(v)": "17"}',
        'CAST(v AS FLOAT)':        '{"cast(v as float)": 17.0}',
    }
    for input, output in input_and_output.items():
        assert list(cql.execute(f"SELECT JSON {input} from {table1} where p = {p}")) == [(EquivalentJson(output),)]
Example #23
0
def test_negative_timestamp(cql, table1):
    p = unique_key_int()
    write = cql.prepare(
        f"INSERT INTO {table1} (k, v) VALUES (?, ?) USING TIMESTAMP ?")
    read = cql.prepare(f"SELECT writetime(v) FROM {table1} where k = ?")
    # Note we need to order the loop in increasing timestamp if we want
    # the read to see the latest value:
    for ts in [-2**63 + 1, -100, -1]:
        print(ts)
        cql.execute(write, [p, 1, ts])
        assert ts == cql.execute(read, [p]).one()[0]
    # The specific value -2**63 is not allowed as a timestamp - although it
    # is a legal signed 64-bit integer, it is reserved to mean "not deleted"
    # in the deletion time of cells.
    with pytest.raises(InvalidRequest, match='bound'):
        cql.execute(write, [p, 1, -2**63])
Example #24
0
def test_per_query_timeout_effective(scylla_only, cql, table1):
    table = table1
    key = unique_key_int()
    with pytest.raises(ReadTimeout):
        cql.execute(f"SELECT * FROM {table} USING TIMEOUT 0ms")
    with pytest.raises(WriteTimeout):
        cql.execute(
            f"INSERT INTO {table} (p,c,v) VALUES ({key},1,1) USING TIMEOUT 0ms"
        )
    with pytest.raises(WriteTimeout):
        cql.execute(
            f"UPDATE {table} USING TIMEOUT 0ms SET v = 5 WHERE p = {key} AND c = 1"
        )
    with pytest.raises(WriteTimeout):
        cql.execute(f"DELETE FROM {table} USING TIMEOUT 0ms WHERE p = {key}")
    with pytest.raises(WriteTimeout):
        cql.execute(
            f"DELETE FROM {table} USING TIMEOUT 0ms AND timestamp 42 WHERE p = {key}"
        )
Example #25
0
def test_mix_per_query_timeout_with_other_params(scylla_only, cql, table1):
    table = table1
    key = unique_key_int()
    cql.execute(
        f"INSERT INTO {table} (p,c,v) VALUES ({key},1,1) USING TIMEOUT 60m AND TTL 1000000 AND TIMESTAMP 321"
    )
    cql.execute(
        f"INSERT INTO {table} (p,c,v) VALUES ({key},2,1) USING TIMESTAMP 42 AND TIMEOUT 30m"
    )
    res = list(
        cql.execute(
            f"SELECT ttl(v), writetime(v) FROM {table} WHERE p = {key} and c = 1"
        ))
    assert len(res) == 1 and res[0].ttl_v > 0 and res[0].writetime_v == 321
    res = list(
        cql.execute(
            f"SELECT ttl(v), writetime(v) FROM {table} WHERE p = {key} and c = 2"
        ))
    assert len(res) == 1 and not res[0].ttl_v and res[0].writetime_v == 42
Example #26
0
def test_static_not_selected(cql, table1):
    p = unique_key_int()
    # The partition p doesn't exist, so the following select yields nothing:
    assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == []
    # Insert just the static column, and no clustering row:
    cql.execute(f'INSERT INTO {table1} (p, s) values ({p}, 1)')
    # If we select all the columns, including the static column s, SELECTing
    # the partition gives us one "row" with the static column set and all
    # other columns set to null - otherwise the static column cannot be
    # returned.
    assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == [
        (p, None, 1, None)
    ]
    # But what happens if we SELECT just regular (non-static) columns?
    # Should the SELECT return nothing (since there is no clustering row),
    # or return one "row" with null columns (basically just like in the
    # previous SELECT, just intersected with the desired column)?
    # Currently, Cassandra does the former, Scylla does the latter,
    # so the following assert fails on Scylla:
    assert list(cql.execute(f'SELECT r FROM {table1} WHERE p={p}')) == []
Example #27
0
def test_lwt_static_condition(cql, table1):
    p = unique_key_int()
    cql.execute(f'INSERT INTO {table1}(p, s) values ({p}, 1)')
    # When the condition only mentions static (partition-wide) columns,
    # it is allowed not to specify the clustering key in the WHERE:
    r = list(cql.execute(f'UPDATE {table1} SET s=2 WHERE p={p} IF s=1'))
    assert len(r) == 1
    assert r[0].applied == True
    assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == [
        (p, None, 2, None)
    ]
    # When the condition also mentions a non-static column, WHERE must point
    # to a clustering column, i.e., mention the clustering key. If the
    # clustering key is missing, we get an InvalidRequest error, where the
    # message is slightly different between Scylla and Cassandra ("Missing
    # mandatory PRIMARY KEY part c" and "Some clustering keys are missing: c",
    # respectively.
    with pytest.raises(InvalidRequest,
                       match=re.compile('missing', re.IGNORECASE)):
        cql.execute(f'UPDATE {table1} SET s=2 WHERE p={p} IF r=1')
Example #28
0
def test_type_duration_human_readable_input_units(cql, table1):
    # Map of allowed units and their expected meaning.
    units = {
        'y': Duration(12, 0, 0),
        'mo': Duration(1, 0, 0),
        'w': Duration(0, 7, 0),
        'd': Duration(0, 1, 0),
        'h': Duration(0, 0, 3600000000000),
        'm': Duration(0, 0, 60000000000),
        's': Duration(0, 0, 1000000000),
        'ms': Duration(0, 0, 1000000),
        'us': Duration(0, 0, 1000),
        'ns': Duration(0, 0, 1),
        # An alias for "us" which should be supported, but wasn't (issue #8001)
        'µs': Duration(0, 0, 1000),
    }
    p = unique_key_int()
    for (unit, duration) in units.items():
        print(unit)
        cql.execute(f"INSERT INTO {table1} (p, d) VALUES ({p}, 1{unit})")
        assert list(cql.execute(f"SELECT d FROM {table1} where p = {p}")) == [(duration,)]
Example #29
0
def test_fromjson_null_constant(cql, table1):
    p = unique_key_int()
    # Check that a "null" JSON constant can be used to unset a column
    stmt = cql.prepare(f"INSERT INTO {table1} (p, v) VALUES (?, fromJson(?))")
    cql.execute(stmt, [p, '1'])
    assert list(cql.execute(f"SELECT p, v from {table1} where p = {p}")) == [(p, 1)]
    cql.execute(stmt, [p, 'null'])
    assert list(cql.execute(f"SELECT p, v from {table1} where p = {p}")) == [(p, None)]
    # Check that a "null" JSON constant can be used to unset part of a tuple
    stmt = cql.prepare(f"INSERT INTO {table1} (p, tup) VALUES (?, fromJson(?))")
    cql.execute(stmt, [p, '["a", 1]'])
    assert list(cql.execute(f"SELECT p, tup from {table1} where p = {p}")) == [(p, ('a', 1))]
    cql.execute(stmt, [p, '["a", null]'])
    assert list(cql.execute(f"SELECT p, tup from {table1} where p = {p}")) == [(p, ('a', None))]
    cql.execute(stmt, [p, '[null, 2]'])
    assert list(cql.execute(f"SELECT p, tup from {table1} where p = {p}")) == [(p, (None, 2))]
    # However, a "null" JSON constant is not just allowed everywhere that a
    # normal value is allowed. E.g, it cannot be part of a list. Let's
    # verify that we didn't overdo the fix.
    stmt = cql.prepare(f"INSERT INTO {table1} (p, l) VALUES (?, fromJson(?))")
    with pytest.raises(FunctionFailure):
        cql.execute(stmt, [p, '["a", null]'])
Example #30
0
def test_fromjson_nonint_unprepared(cql, table1):
    p = unique_key_int()
    with pytest.raises(FunctionFailure):
        cql.execute(f"INSERT INTO {table1} (p, v) VALUES ({p}, fromJson('1.2'))")