示例#1
0
def float_bytes_to_real(bytes, signed):
    # XXX Currently does not make use of the signed parameter
    if len(bytes) not in (4, 8):
        raise SchemeException(
                "floating-point-bytes->real: byte string must have length 2, 4, or 8")

    try:
        if objectmodel.we_are_translated():
            val = rarithmetic.r_int64(0)
            for i, v in enumerate(bytes):
                val += rarithmetic.r_int64(ord(v)) << (i * 8)
            return values.W_Flonum(longlong2float.longlong2float(val))
        else:
            # use unsigned to avoid rlib bug
            val = rarithmetic.r_uint64(0)
            for i, v in enumerate(bytes):
                val += rarithmetic.r_uint64(ord(v)) << (i * 8)
            return values.W_Flonum(pycket_longlong2float(val))
    except OverflowError, e:
        # Uncomment the check below to run Pycket on the
        # interpreter with compiled (zo) files
        # (fasl makes a call that blows the longlong2float on rpython)

        # if val == 18442240474082181120L:
        #     return values.W_Flonum.NEGINF
        raise SchemeException("RPython overflow : %s" % e)
示例#2
0
文件: numeric.py 项目: pycket/pycket
def float_bytes_to_real(bytes, signed):
    # XXX Currently does not make use of the signed parameter
    if len(bytes) not in (4, 8):
        raise SchemeException(
                "floating-point-bytes->real: byte string must have length 2, 4, or 8")

    try:
        if objectmodel.we_are_translated():
            val = rarithmetic.r_int64(0)
            for i, v in enumerate(bytes):
                val += rarithmetic.r_int64(ord(v)) << (i * 8)
            return values.W_Flonum(longlong2float.longlong2float(val))
        else:
            # use unsigned to avoid rlib bug
            val = rarithmetic.r_uint64(0)
            for i, v in enumerate(bytes):
                val += rarithmetic.r_uint64(ord(v)) << (i * 8)
            return values.W_Flonum(pycket_longlong2float(val))
    except OverflowError, e:
        # Uncomment the check below to run Pycket on the
        # interpreter with compiled (zo) files
        # (fasl makes a call that blows the longlong2float on rpython)

        # if val == 18442240474082181120L:
        #     return values.W_Flonum.NEGINF
        raise SchemeException("RPython overflow : %s" % e)
示例#3
0
def choosen_seed(new_k0, new_k1, test_misaligned_path=False):
    """For tests."""
    global misaligned_is_fine
    old = seed.k0l, seed.k1l, misaligned_is_fine
    seed.k0l = _le64toh(r_uint64(new_k0))
    seed.k1l = _le64toh(r_uint64(new_k1))
    if test_misaligned_path:
        misaligned_is_fine = False
    yield
    seed.k0l, seed.k1l, misaligned_is_fine = old
示例#4
0
def choosen_seed(new_k0, new_k1, test_misaligned_path=False,
                 test_prebuilt=False):
    """For tests."""
    global misaligned_is_fine, seed
    old = seed, misaligned_is_fine
    seed = Seed()
    seed.k0l = r_uint64(new_k0)
    seed.k1l = r_uint64(new_k1)
    if test_prebuilt:
        _update_prebuilt_hashes()
    else:
        seed.bound_prebuilt_size = 0
    if test_misaligned_path:
        misaligned_is_fine = False
    yield
    seed, misaligned_is_fine = old
示例#5
0
def _decode64(s):
    return (r_uint64(ord(s[0])) |
            r_uint64(ord(s[1])) << 8 |
            r_uint64(ord(s[2])) << 16 |
            r_uint64(ord(s[3])) << 24 |
            r_uint64(ord(s[4])) << 32 |
            r_uint64(ord(s[5])) << 40 |
            r_uint64(ord(s[6])) << 48 |
            r_uint64(ord(s[7])) << 56)
示例#6
0
def test_sign_when_casting_uint_to_larger_int():
    from rpython.rtyper.lltypesystem import rffi
    from rpython.rlib.rarithmetic import r_uint32, r_uint64
    #
    value = 0xAAAABBBB
    assert cast(lltype.SignedLongLong, r_uint32(value)) == value
    if hasattr(rffi, '__INT128_T'):
        value = 0xAAAABBBBCCCCDDDD
        assert cast(rffi.__INT128_T, r_uint64(value)) == value
示例#7
0
def test_sign_when_casting_uint_to_larger_int():
    from rpython.rtyper.lltypesystem import rffi
    from rpython.rlib.rarithmetic import r_uint32, r_uint64
    #
    value = 0xAAAABBBB
    assert cast(lltype.SignedLongLong, r_uint32(value)) == value
    if hasattr(rffi, '__INT128_T'):
        value = 0xAAAABBBBCCCCDDDD
        assert cast(rffi.__INT128_T, r_uint64(value)) == value
示例#8
0
def _update_prebuilt_hashes():
    seed.bound_prebuilt_size = 0
    with lltype.scoped_alloc(rffi.CCHARP.TO, 1) as p:
        addr = llmemory.cast_ptr_to_adr(p)
        seed.hash_single = [r_uint64(0)] * 256
        for i in range(256):
            p[0] = chr(i)
            seed.hash_single[i] = _siphash24(addr, 1)
        seed.hash_empty = _siphash24(addr, 0)
    seed.bound_prebuilt_size = 2
示例#9
0
def source_hash(space, magic, content):
    from rpython.rlib.rsiphash import siphash24_with_key
    from rpython.rlib.rarithmetic import r_uint64
    h = siphash24_with_key(content, r_uint64(magic))
    res = [b"x"] * 8
    for i in range(8):
        res[i] = chr(h & 0xff)
        h >>= 8
    assert not h
    return space.newbytes(b"".join(res))
示例#10
0
文件: rutf8.py 项目: zcxowwww/pypy
        builder.append(chr((0xf0 | (code >> 18))))
        builder.append(chr((0x80 | ((code >> 12) & 0x3f))))
        builder.append(chr((0x80 | ((code >> 6) & 0x3f))))
        builder.append(chr((0x80 | (code & 0x3f))))
        return
    raise OutOfRange(code)


# note - table lookups are really slow. Measured on various elements of obama
#        chinese wikipedia, they're anywhere between 10% and 30% slower.
#        In extreme cases (small, only chinese text), they're 40% slower

#        The following was found by hand to be more optimal than both,
#        on x86-64...
_is_64bit = sys.maxint > 2**32
_constant_ncp = rarithmetic.r_uint64(0xffff0000ffffffff)


@always_inline
def next_codepoint_pos(code, pos):
    """Gives the position of the next codepoint after pos.
    Assumes valid utf8.  'pos' must be before the end of the string.
    """
    assert pos >= 0
    chr1 = ord(code[pos])
    if chr1 <= 0x7F:
        return pos + 1
    if _is_64bit and not jit.we_are_jitted():
        # optimized for Intel x86-64 by hand
        res = pos + 1 + (((chr1 > 0xDF) << 1) + rarithmetic.intmask(
            (_constant_ncp >> (chr1 & 0x3F)) & 1))
示例#11
0
class Seed:
    k0l = k1l = r_uint64(0)
示例#12
0
def _siphash24(addr_in, size, SZ=1):
    """Takes an address pointer and a size.  Returns the hash as a r_uint64,
    which can then be casted to the expected type."""

    if BIG_ENDIAN:
        index = SZ - 1
    else:
        index = 0
    if size < seed.bound_prebuilt_size:
        if size <= 0:
            return seed.hash_empty
        else:
            t = rarithmetic.intmask(llop.raw_load(rffi.UCHAR, addr_in, index))
            return seed.hash_single[t]

    k0 = seed.k0l
    k1 = seed.k1l
    b = r_uint64(size) << 56
    v0 = k0 ^ magic0
    v1 = k1 ^ magic1
    v2 = k0 ^ magic2
    v3 = k1 ^ magic3

    direct = (SZ == 1) and (misaligned_is_fine or
                            (rffi.cast(lltype.Signed, addr_in) & 7) == 0)
    if direct:
        assert SZ == 1
        while size >= 8:
            mi = llop.raw_load(rffi.ULONGLONG, addr_in, index)
            mi = _le64toh(mi)
            size -= 8
            index += 8
            v3 ^= mi
            v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
            v0 ^= mi
    else:
        while size >= 8:
            mi = (
                r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ))
                << 16 | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 3 * SZ)) << 24
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ))
                << 32 | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ))
                << 48 | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 7 * SZ)) << 56)
            size -= 8
            index += 8 * SZ
            v3 ^= mi
            v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
            v0 ^= mi

    t = r_uint64(0)
    if size == 7:
        t = r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ)) << 48
        size = 6
    if size == 6:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40
        size = 5
    if size == 5:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ)) << 32
        size = 4
    if size == 4:
        if direct:
            v = _le32toh(r_uint32(llop.raw_load(rffi.UINT, addr_in, index)))
            t |= r_uint64(v)
            size = 0
        else:
            t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in,
                                        index + 3 * SZ)) << 24
            size = 3
    if size == 3:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ)) << 16
        size = 2
    if size == 2:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8
        size = 1
    if size == 1:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index))
        size = 0
    assert size == 0

    b |= t

    v3 ^= b
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
    v0 ^= b
    v2 ^= 0xff
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)

    return (v0 ^ v1) ^ (v2 ^ v3)
示例#13
0
    global misaligned_is_fine, seed
    old = seed, misaligned_is_fine
    seed = Seed()
    seed.k0l = r_uint64(new_k0)
    seed.k1l = r_uint64(new_k1)
    if test_prebuilt:
        _update_prebuilt_hashes()
    else:
        seed.bound_prebuilt_size = 0
    if test_misaligned_path:
        misaligned_is_fine = False
    yield
    seed, misaligned_is_fine = old


magic0 = r_uint64(0x736f6d6570736575)
magic1 = r_uint64(0x646f72616e646f6d)
magic2 = r_uint64(0x6c7967656e657261)
magic3 = r_uint64(0x7465646279746573)


@always_inline
def _rotate(x, b):
    return (x << b) | (x >> (64 - b))


@always_inline
def _half_round(a, b, c, d, s, t):
    a += b
    c += d
    b = _rotate(b, s) ^ a
示例#14
0
def _siphash24(addr_in, size):
    """Takes an address pointer and a size.  Returns the hash as a r_uint64,
    which can then be casted to the expected type."""

    k0 = seed.k0l
    k1 = seed.k1l
    b = r_uint64(size) << 56
    v0 = k0 ^ magic0
    v1 = k1 ^ magic1
    v2 = k0 ^ magic2
    v3 = k1 ^ magic3

    direct = (misaligned_is_fine
              or (rffi.cast(lltype.Signed, addr_in) & 7) == 0)
    index = 0
    if direct:
        while size >= 8:
            mi = llop.raw_load(rffi.ULONGLONG, addr_in, index)
            mi = _le64toh(mi)
            size -= 8
            index += 8
            v3 ^= mi
            v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
            v0 ^= mi
    else:
        while size >= 8:
            mi = (
                r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index))
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1)) << 8
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2)) << 16
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 3)) << 24
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4)) << 32
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5)) << 40
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6)) << 48
                |
                r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 7)) << 56)
            size -= 8
            index += 8
            v3 ^= mi
            v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
            v0 ^= mi

    t = r_uint64(0)
    if size == 7:
        t = r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6)) << 48
        size = 6
    if size == 6:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5)) << 40
        size = 5
    if size == 5:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4)) << 32
        size = 4
    if size == 4:
        if direct:
            t |= r_uint64(llop.raw_load(rffi.UINT, addr_in, index))
            size = 0
        else:
            t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 3)) << 24
            size = 3
    if size == 3:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2)) << 16
        size = 2
    if size == 2:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1)) << 8
        size = 1
    if size == 1:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index))
        size = 0
    assert size == 0

    b |= _le64toh(t)

    v3 ^= b
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
    v0 ^= b
    v2 ^= 0xff
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)

    return (v0 ^ v1) ^ (v2 ^ v3)
示例#15
0
def test_siphash24_with_key():
    from rpython.rlib.rarithmetic import r_uint64
    assert siphash24_with_key(b"abcdef", r_uint64(1)) == r_uint64(7956077396882317016L)
示例#16
0
def _siphash24_with_key(addr_in, size, k0, k1, SZ=1):
    if BIG_ENDIAN:
        index = SZ - 1
    else:
        index = 0
    b = r_uint64(size) << 56
    v0 = k0 ^ magic0
    v1 = k1 ^ magic1
    v2 = k0 ^ magic2
    v3 = k1 ^ magic3

    direct = (SZ == 1) and (misaligned_is_fine or
                            (rffi.cast(lltype.Signed, addr_in) & 7) == 0)
    if direct:
        assert SZ == 1
        while size >= 8:
            mi = llop.raw_load(rffi.ULONGLONG, addr_in, index)
            mi = _le64toh(mi)
            size -= 8
            index += 8
            v3 ^= mi
            v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
            v0 ^= mi
    else:
        while size >= 8:
            mi = (
                r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ))
                << 16 | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 3 * SZ)) << 24
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ))
                << 32 | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40
                | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ))
                << 48 | r_uint64(
                    llop.raw_load(rffi.UCHAR, addr_in, index + 7 * SZ)) << 56)
            size -= 8
            index += 8 * SZ
            v3 ^= mi
            v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
            v0 ^= mi

    t = r_uint64(0)
    if size == 7:
        t = r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ)) << 48
        size = 6
    if size == 6:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40
        size = 5
    if size == 5:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ)) << 32
        size = 4
    if size == 4:
        if direct:
            v = _le32toh(r_uint32(llop.raw_load(rffi.UINT, addr_in, index)))
            t |= r_uint64(v)
            size = 0
        else:
            t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in,
                                        index + 3 * SZ)) << 24
            size = 3
    if size == 3:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ)) << 16
        size = 2
    if size == 2:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8
        size = 1
    if size == 1:
        t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index))
        size = 0
    assert size == 0

    b |= t

    v3 ^= b
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
    v0 ^= b
    v2 ^= 0xff
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)
    v0, v1, v2, v3 = _double_round(v0, v1, v2, v3)

    return (v0 ^ v1) ^ (v2 ^ v3)