def unwrap_u64(vt, val): if vt == ValTypeI32: return uint64(val) elif vt == ValTypeI64: return uint64(val) elif vt == ValTypeF32: return uint64(val) elif vt == ValTypeF64: return uint64(val) else: raise Exception("unreachable")
def __trunc_sat_u(z, n): if math.isnan(z): return 0 if z == -math.inf: return 0 max_value = (uint64(1) << n) - 1 if math.isinf(z): return max_value x = math.trunc(z) if x < 0: return 0 elif x >= float64(max_value): return max_value else: return uint64(x)
def i64_trunc_f64u(vm, _): f = math.trunc(vm.pop_f64()) if f >= __MaxUint64 or f < 0: raise ErrIntOverflow if math.isnan(f): raise ErrConvertToInt vm.push_u64(uint64(f))
def i64_clz(vm, _): vm.push_u64(uint64(__leading_zeros64(vm.pop_u64())))
def i64_pop_cnt(vm, _): vm.push_u64(uint64(__ones_count64(vm.pop_u64())))
def i64_extend_i32u(vm, _): vm.push_u64(uint64(vm.pop_u32()))
def push_u32(self, val): self.push_numeric(uint64(val))
def i64_ctz(vm, _): vm.push_u64(uint64(__trailing_zeros64(vm.pop_u64())))
def push_u64(self, val): self.slots.append(uint64(val))
def pop_u64(self) -> uint64: return uint64(self.slots.pop())
def i64_load_32u(vm, mem_arg): val = read_u32(vm, mem_arg) vm.push_u64(uint64(val))
def i64_load_16u(vm, mem_arg): val = read_u16(vm, mem_arg) vm.push_u64(uint64(val))
def i64_load_8u(vm, mem_arg): val = read_u8(vm, mem_arg) vm.push_u64(uint64(val))
def get_offset(vm, mem_arg): offset = mem_arg.offset return uint64(vm.pop_u32()) + uint64(offset)
def read_u64(vm, mem_arg): buf = [0x00] * 8 offset = get_offset(vm, mem_arg) buf = vm.memory.read(offset, buf) return uint64(int.from_bytes(bytearray(buf), byteorder='little'))