def read_int_at_mem(self, gcref, ofs, size, sign): for STYPE, UTYPE, itemsize in unroll_basic_sizes: if size == itemsize: if sign: val = llop.raw_load(STYPE, gcref, ofs) val = rffi.cast(lltype.Signed, val) else: val = llop.raw_load(UTYPE, gcref, ofs) val = rffi.cast(lltype.Signed, val) return val else: raise NotImplementedError("size = %d" % size)
def get_or_make_raw(): if we_are_translated(): _threadlocalref_seeme(self) addr = llop.threadlocalref_addr(llmemory.Address) return llop.raw_load(FIELDTYPE, addr, offset) else: return getattr(self.local, 'rawvalue', zero)
def typed_read(self, TP, byte_offset): """ Read the value of type TP starting at byte_offset. No bounds checks """ if not is_alignment_correct(TP, byte_offset): raise CannotRead ptr = self.get_raw_address() return llop.raw_load(TP, ptr, byte_offset)
def _siphash24(addr_in, size, SZ=1): """Takes an address pointer and a size. Returns the hash as a r_uint64, which can then be casted to the expected type.""" if size < seed.bound_prebuilt_size: if size <= 0: return seed.hash_empty else: if BIG_ENDIAN: index = SZ - 1 else: index = 0 t = rarithmetic.intmask(llop.raw_load(rffi.UCHAR, addr_in, index)) return seed.hash_single[t] k0 = seed.k0l k1 = seed.k1l return _siphash24_with_key(addr_in, size, k0, k1, SZ)
def _siphash24(addr_in, size, SZ=1): """Takes an address pointer and a size. Returns the hash as a r_uint64, which can then be casted to the expected type.""" if BIG_ENDIAN: index = SZ - 1 else: index = 0 if size < seed.bound_prebuilt_size: if size <= 0: return seed.hash_empty else: t = rarithmetic.intmask(llop.raw_load(rffi.UCHAR, addr_in, index)) return seed.hash_single[t] k0 = seed.k0l k1 = seed.k1l b = r_uint64(size) << 56 v0 = k0 ^ magic0 v1 = k1 ^ magic1 v2 = k0 ^ magic2 v3 = k1 ^ magic3 direct = (SZ == 1) and (misaligned_is_fine or (rffi.cast(lltype.Signed, addr_in) & 7) == 0) if direct: assert SZ == 1 while size >= 8: mi = llop.raw_load(rffi.ULONGLONG, addr_in, index) mi = _le64toh(mi) size -= 8 index += 8 v3 ^= mi v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= mi else: while size >= 8: mi = ( r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ)) << 16 | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 3 * SZ)) << 24 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ)) << 32 | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ)) << 48 | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 7 * SZ)) << 56) size -= 8 index += 8 * SZ v3 ^= mi v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= mi t = r_uint64(0) if size == 7: t = r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ)) << 48 size = 6 if size == 6: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40 size = 5 if size == 5: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ)) << 32 size = 4 if size == 4: if direct: v = _le32toh(r_uint32(llop.raw_load(rffi.UINT, addr_in, index))) t |= r_uint64(v) size = 0 else: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 3 * SZ)) << 24 size = 3 if size == 3: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ)) << 16 size = 2 if size == 2: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8 size = 1 if size == 1: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) size = 0 assert size == 0 b |= t v3 ^= b v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= b v2 ^= 0xff v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) return (v0 ^ v1) ^ (v2 ^ v3)
def read_float_at_mem(self, gcref, ofs): return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs)
def read_ref_at_mem(self, gcref, ofs): return llop.raw_load(llmemory.GCREF, gcref, ofs)
def _siphash24(addr_in, size): """Takes an address pointer and a size. Returns the hash as a r_uint64, which can then be casted to the expected type.""" k0 = seed.k0l k1 = seed.k1l b = r_uint64(size) << 56 v0 = k0 ^ magic0 v1 = k1 ^ magic1 v2 = k0 ^ magic2 v3 = k1 ^ magic3 direct = (misaligned_is_fine or (rffi.cast(lltype.Signed, addr_in) & 7) == 0) index = 0 if direct: while size >= 8: mi = llop.raw_load(rffi.ULONGLONG, addr_in, index) mi = _le64toh(mi) size -= 8 index += 8 v3 ^= mi v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= mi else: while size >= 8: mi = ( r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1)) << 8 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2)) << 16 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 3)) << 24 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4)) << 32 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5)) << 40 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6)) << 48 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 7)) << 56) size -= 8 index += 8 v3 ^= mi v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= mi t = r_uint64(0) if size == 7: t = r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6)) << 48 size = 6 if size == 6: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5)) << 40 size = 5 if size == 5: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4)) << 32 size = 4 if size == 4: if direct: t |= r_uint64(llop.raw_load(rffi.UINT, addr_in, index)) size = 0 else: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 3)) << 24 size = 3 if size == 3: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2)) << 16 size = 2 if size == 2: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1)) << 8 size = 1 if size == 1: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) size = 0 assert size == 0 b |= _le64toh(t) v3 ^= b v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= b v2 ^= 0xff v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) return (v0 ^ v1) ^ (v2 ^ v3)
def _siphash24_with_key(addr_in, size, k0, k1, SZ=1): if BIG_ENDIAN: index = SZ - 1 else: index = 0 b = r_uint64(size) << 56 v0 = k0 ^ magic0 v1 = k1 ^ magic1 v2 = k0 ^ magic2 v3 = k1 ^ magic3 direct = (SZ == 1) and (misaligned_is_fine or (rffi.cast(lltype.Signed, addr_in) & 7) == 0) if direct: assert SZ == 1 while size >= 8: mi = llop.raw_load(rffi.ULONGLONG, addr_in, index) mi = _le64toh(mi) size -= 8 index += 8 v3 ^= mi v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= mi else: while size >= 8: mi = ( r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ)) << 16 | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 3 * SZ)) << 24 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ)) << 32 | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40 | r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ)) << 48 | r_uint64( llop.raw_load(rffi.UCHAR, addr_in, index + 7 * SZ)) << 56) size -= 8 index += 8 * SZ v3 ^= mi v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= mi t = r_uint64(0) if size == 7: t = r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 6 * SZ)) << 48 size = 6 if size == 6: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 5 * SZ)) << 40 size = 5 if size == 5: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 4 * SZ)) << 32 size = 4 if size == 4: if direct: v = _le32toh(r_uint32(llop.raw_load(rffi.UINT, addr_in, index))) t |= r_uint64(v) size = 0 else: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 3 * SZ)) << 24 size = 3 if size == 3: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 2 * SZ)) << 16 size = 2 if size == 2: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index + 1 * SZ)) << 8 size = 1 if size == 1: t |= r_uint64(llop.raw_load(rffi.UCHAR, addr_in, index)) size = 0 assert size == 0 b |= t v3 ^= b v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0 ^= b v2 ^= 0xff v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) v0, v1, v2, v3 = _double_round(v0, v1, v2, v3) return (v0 ^ v1) ^ (v2 ^ v3)