def _check_subclass(self, vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 known_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable1), rclass.CLASSTYPE) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable2), rclass.CLASSTYPE) if (expected_class.subclassrange_min <= known_class.subclassrange_min <= expected_class.subclassrange_max): return True return False
def _check_subclass( vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 known_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable1), rclass.CLASSTYPE) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable2), rclass.CLASSTYPE) # note: the test is for a range including 'max', but 'max' # should never be used for actual classes. Including it makes # it easier to pass artificial tests. return (expected_class.subclassrange_min <= known_class.subclassrange_min <= expected_class.subclassrange_max)
def _check_subclass(vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 known_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable1), rclass.CLASSTYPE) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable2), rclass.CLASSTYPE) # note: the test is for a range including 'max', but 'max' # should never be used for actual classes. Including it makes # it easier to pass artificial tests. return (expected_class.subclassrange_min <= known_class.subclassrange_min <= expected_class.subclassrange_max)
def _check_subclass(self, vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 known_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable1), rclass.CLASSTYPE) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable2), rclass.CLASSTYPE) if (expected_class.subclassrange_min <= known_class.subclassrange_min <= expected_class.subclassrange_max): return True return False
def maybe_on_top_of_llinterp(self, func, args, RESULT): ptr = llmemory.cast_int_to_adr(func).ptr if hasattr(ptr._obj, 'graph'): res = self.llinterp.eval_graph(ptr._obj.graph, args) else: res = ptr._obj._callable(*args) return support.cast_result(RESULT, res)
def test_cast_adr_to_int(): S = lltype.Struct('S') p = lltype.malloc(S, immortal=True) def fn(n): a = llmemory.cast_ptr_to_adr(p) if n == 2: return llmemory.cast_adr_to_int(a, "emulated") elif n == 4: return llmemory.cast_adr_to_int(a, "symbolic") else: return llmemory.cast_adr_to_int(a, "forced") res = interpret(fn, [2]) assert is_valid_int(res) assert res == lltype.cast_ptr_to_int(p) # res = interpret(fn, [4]) assert isinstance(res, llmemory.AddressAsInt) assert llmemory.cast_int_to_adr(res) == llmemory.cast_ptr_to_adr(p) # res = interpret(fn, [6]) assert is_valid_int(res) from rpython.rtyper.lltypesystem import rffi assert res == rffi.cast(lltype.Signed, p)
def execute_guard_class(self, descr, arg, klass): value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, arg) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(klass), rclass.CLASSTYPE) if value.typeptr != expected_class: self.fail_guard(descr)
def execute_call_release_gil(self, descr, saveerr, func, *args): if hasattr(descr, '_original_func_'): func = descr._original_func_ # see pyjitpl.py # we want to call the function that does the aroundstate # manipulation here (as a hack, instead of really doing # the aroundstate manipulation ourselves) return self.execute_call_may_force(descr, func, *args) guard_op = self.lltrace.operations[self.current_index + 1] assert guard_op.getopnum() == rop.GUARD_NOT_FORCED self.force_guard_op = guard_op call_args = support.cast_call_args_in_order(descr.ARGS, args) # func_adr = llmemory.cast_int_to_adr(func) if hasattr(func_adr.ptr._obj, '_callable'): # this is needed e.g. by test_fficall.test_guard_not_forced_fails, # because to actually force the virtualref we need to llinterp the # graph, not to directly execute the python function result = self.cpu.maybe_on_top_of_llinterp(func, call_args, descr.RESULT) else: FUNC = lltype.FuncType(descr.ARGS, descr.RESULT, descr.ABI) func_to_call = rffi.cast(lltype.Ptr(FUNC), func) result = func_to_call(*call_args) del self.force_guard_op return support.cast_result(descr.RESULT, result)
def int2adr(int): """ Cast an int back to an address. Inverse of adr2int(). """ return llmemory.cast_int_to_adr(int)
def walk_stack_root(invoke, arg0, arg1, arg2, start, addr, is_minor): skip = 0 while addr != start: addr -= sizeofaddr #XXX reintroduce support for tagged values? #if gc.points_to_valid_gc_object(addr): # callback(gc, addr) if skip & 1 == 0: content = addr.address[0] n = llmemory.cast_adr_to_int(content) if n & 1 == 0: if content: # non-0, non-odd: a regular ptr invoke(arg0, arg1, arg2, addr) else: # odd number: a skip bitmask if n > 0: # initially, an unmarked value if is_minor: newcontent = llmemory.cast_int_to_adr(-n) addr.address[0] = newcontent # mark skip = n else: # a marked value if is_minor: return skip = -n skip >>= 1
def maybe_on_top_of_llinterp(self, func, args, RESULT): ptr = llmemory.cast_int_to_adr(func).ptr if hasattr(ptr._obj, 'graph'): res = self.llinterp.eval_graph(ptr._obj.graph, args) else: res = ptr._obj._callable(*args) if RESULT is lltype.Void: return None return support.cast_result(RESULT, res)
def belongs_to_current_thread(framedata): # xxx obscure: the answer is Yes if, as a pointer, framedata # lies between the start of the current stack and the top of it. stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL) ll_assert(stack_start != llmemory.NULL, "current thread not found in gcdata.aid2stack!") stack_stop = llmemory.cast_int_to_adr( llop.stack_current(lltype.Signed)) return (stack_start <= framedata <= stack_stop or stack_start >= framedata >= stack_stop)
def belongs_to_current_thread(framedata): # xxx obscure: the answer is Yes if, as a pointer, framedata # lies between the start of the current stack and the top of it. stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL) ll_assert(stack_start != llmemory.NULL, "current thread not found in gcdata.aid2stack!") stack_stop = llmemory.cast_int_to_adr( llop.stack_current(lltype.Signed)) return (stack_start <= framedata <= stack_stop or stack_start >= framedata >= stack_stop)
def _next_id(self): # return an id not currently in use (as an address instead of an int) if self.id_free_list.non_empty(): result = self.id_free_list.pop() # reuse a dead id else: # make up a fresh id number result = llmemory.cast_int_to_adr(self.next_free_id) self.next_free_id += 2 # only odd numbers, to make lltype # and llmemory happy and to avoid # clashes with real addresses return result
def execute_guard_subclass(self, descr, arg, klass): value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, arg) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(klass), rclass.CLASSTYPE) if (expected_class.subclassrange_min <= value.typeptr.subclassrange_min <= expected_class.subclassrange_max): pass else: self.fail_guard(descr)
def cast_from_int(TYPE, x): if isinstance(TYPE, lltype.Ptr): if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) #if repr(x.ptr).startswith('<* <C object '): # pom pom pom # # assume that we want a "C-style" cast, without typechecking the value return rffi.cast(TYPE, x) #return llmemory.cast_adr_to_ptr(x, TYPE) elif TYPE == llmemory.Address: if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) assert lltype.typeOf(x) == llmemory.Address return x elif TYPE is lltype.SingleFloat: assert lltype.typeOf(x) is lltype.Signed return longlong.int2singlefloat(x) else: if lltype.typeOf(x) == llmemory.Address: x = heaptracker.adr2int(x) return lltype.cast_primitive(TYPE, x)
def _next_id(self): # return an id not currently in use (as an address instead of an int) if self.id_free_list.non_empty(): result = self.id_free_list.pop() # reuse a dead id else: # make up a fresh id number result = llmemory.cast_int_to_adr(self.next_free_id) self.next_free_id += 2 # only odd numbers, to make lltype # and llmemory happy and to avoid # clashes with real addresses return result
def cast_from_int(TYPE, x): if isinstance(TYPE, lltype.Ptr): if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) try: # pom pom pom return llmemory.cast_adr_to_ptr(x, TYPE) except Exception: # assume that we want a "C-style" cast, without typechecking the value return rffi.cast(TYPE, x) elif TYPE == llmemory.Address: if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) assert lltype.typeOf(x) == llmemory.Address return x elif TYPE is lltype.SingleFloat: assert lltype.typeOf(x) is lltype.Signed return longlong.int2singlefloat(x) else: if lltype.typeOf(x) == llmemory.Address: x = heaptracker.adr2int(x) return lltype.cast_primitive(TYPE, x)
def execute_guard_exception(self, descr, excklass): lle = self.last_exception if lle is None: gotklass = lltype.nullptr(rclass.CLASSTYPE.TO) else: gotklass = lle.args[0] excklass = llmemory.cast_adr_to_ptr(llmemory.cast_int_to_adr(excklass), rclass.CLASSTYPE) if gotklass != excklass: self.fail_guard(descr) # res = lle.args[1] self.last_exception = None return support.cast_to_ptr(res)
def execute_call(self, calldescr, func, *args): effectinfo = calldescr.get_extra_info() if effectinfo is not None and hasattr(effectinfo, 'oopspecindex'): oopspecindex = effectinfo.oopspecindex if oopspecindex == EffectInfo.OS_MATH_SQRT: return self._do_math_sqrt(args[0]) TP = llmemory.cast_int_to_adr(func).ptr._obj._TYPE call_args = support.cast_call_args_in_order(TP.ARGS, args) try: res = self.cpu.maybe_on_top_of_llinterp(func, call_args, TP.RESULT) self.last_exception = None except LLException, lle: self.last_exception = lle res = _example_res[getkind(TP.RESULT)[0]]
def execute_call(self, calldescr, func, *args): effectinfo = calldescr.get_extra_info() if effectinfo is not None and hasattr(effectinfo, 'oopspecindex'): oopspecindex = effectinfo.oopspecindex if oopspecindex == EffectInfo.OS_MATH_SQRT: return self._do_math_sqrt(args[0]) TP = llmemory.cast_int_to_adr(func).ptr._obj._TYPE call_args = support.cast_call_args_in_order(TP.ARGS, args) try: res = self.cpu.maybe_on_top_of_llinterp(func, call_args, TP.RESULT) self.last_exception = None except LLException, lle: self.last_exception = lle res = _example_res[getkind(TP.RESULT)[0]]
def execute_guard_exception(self, descr, excklass): lle = self.last_exception if lle is None: gotklass = lltype.nullptr(rclass.CLASSTYPE.TO) else: gotklass = lle.args[0] excklass = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(excklass), rclass.CLASSTYPE) if gotklass != excklass: self.fail_guard(descr) # res = lle.args[1] self.last_exception = None return support.cast_to_ptr(res)
def must_compile(self, deadframe, metainterp_sd, jitdriver_sd): jitcounter = metainterp_sd.warmrunnerdesc.jitcounter # if self.status & (self.ST_BUSY_FLAG | self.ST_TYPE_MASK) == 0: # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. hash = self.status assert hash == (self.status & self.ST_SHIFT_MASK) # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. elif self.status & self.ST_BUSY_FLAG: return False # else: # we have a GUARD_VALUE that fails. from rpython.rlib.objectmodel import current_object_addr_as_int index = intmask(self.status >> self.ST_SHIFT) typetag = intmask(self.status & self.ST_TYPE_MASK) # fetch the actual value of the guard_value, possibly turning # it to an integer if typetag == self.TY_INT: intval = metainterp_sd.cpu.get_value_direct( deadframe, 'i', index) elif typetag == self.TY_REF: refval = metainterp_sd.cpu.get_value_direct( deadframe, 'r', index) intval = lltype.cast_ptr_to_int(refval) elif typetag == self.TY_FLOAT: floatval = metainterp_sd.cpu.get_value_direct( deadframe, 'f', index) intval = longlong.gethash_fast(floatval) else: assert 0, typetag if not we_are_translated(): if isinstance(intval, llmemory.AddressAsInt): intval = llmemory.cast_adr_to_int( llmemory.cast_int_to_adr(intval), "forced") hash = r_uint( current_object_addr_as_int(self) * 777767777 + intval * 1442968193) # increment = jitdriver_sd.warmstate.increment_trace_eagerness return jitcounter.tick(hash, increment)
def must_compile(self, deadframe, metainterp_sd, jitdriver_sd): jitcounter = metainterp_sd.warmrunnerdesc.jitcounter # if self.status & (self.ST_BUSY_FLAG | self.ST_TYPE_MASK) == 0: # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. hash = self.status assert hash == (self.status & self.ST_SHIFT_MASK) # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. elif self.status & self.ST_BUSY_FLAG: return False # else: # we have a GUARD_VALUE that fails. from rpython.rlib.objectmodel import current_object_addr_as_int index = intmask(self.status >> self.ST_SHIFT) typetag = intmask(self.status & self.ST_TYPE_MASK) # fetch the actual value of the guard_value, possibly turning # it to an integer if typetag == self.TY_INT: intval = metainterp_sd.cpu.get_value_direct(deadframe, 'i', index) elif typetag == self.TY_REF: refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) intval = lltype.cast_ptr_to_int(refval) elif typetag == self.TY_FLOAT: floatval = metainterp_sd.cpu.get_value_direct(deadframe, 'f', index) intval = longlong.gethash_fast(floatval) else: assert 0, typetag if not we_are_translated(): if isinstance(intval, llmemory.AddressAsInt): intval = llmemory.cast_adr_to_int( llmemory.cast_int_to_adr(intval), "forced") hash = r_uint(current_object_addr_as_int(self) * 777767777 + intval * 1442968193) # increment = jitdriver_sd.warmstate.increment_trace_eagerness return jitcounter.tick(hash, increment)
def test_cast_adr_to_int(): S = lltype.Struct('S') p = lltype.malloc(S, immortal=True) def fn(n): a = llmemory.cast_ptr_to_adr(p) if n == 2: return llmemory.cast_adr_to_int(a, "emulated") elif n == 4: return llmemory.cast_adr_to_int(a, "symbolic") else: return llmemory.cast_adr_to_int(a, "forced") res = interpret(fn, [2]) assert is_valid_int(res) assert res == lltype.cast_ptr_to_int(p) # res = interpret(fn, [4]) assert isinstance(res, llmemory.AddressAsInt) assert llmemory.cast_int_to_adr(res) == llmemory.cast_ptr_to_adr(p) # res = interpret(fn, [6]) assert is_valid_int(res) from rpython.rtyper.lltypesystem import rffi assert res == rffi.cast(lltype.Signed, p)
def execute_call_release_gil(self, descr, saveerr, func, *args): if hasattr(descr, '_original_func_'): func = descr._original_func_ # see pyjitpl.py # we want to call the function that does the aroundstate # manipulation here (as a hack, instead of really doing # the aroundstate manipulation ourselves) return self.execute_call_may_force(descr, func, *args) guard_op = self.lltrace.operations[self.current_index + 1] assert guard_op.getopnum() == rop.GUARD_NOT_FORCED self.force_guard_op = guard_op call_args = support.cast_call_args_in_order(descr.ARGS, args) # func_adr = llmemory.cast_int_to_adr(func) if hasattr(func_adr.ptr._obj, '_callable'): # this is needed e.g. by test_fficall.test_guard_not_forced_fails, # because to actually force the virtualref we need to llinterp the # graph, not to directly execute the python function result = self.cpu.maybe_on_top_of_llinterp(func, call_args, descr.RESULT) else: FUNC = lltype.FuncType(descr.ARGS, descr.RESULT, descr.ABI) func_to_call = rffi.cast(lltype.Ptr(FUNC), func) result = func_to_call(*call_args) del self.force_guard_op return support.cast_result(descr.RESULT, result)
def execute_guard_class(self, descr, arg, klass): value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, arg) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(klass), rclass.CLASSTYPE) if value.typeptr != expected_class: self.fail_guard(descr)
def _do_call(self, func, args_i, args_r, args_f, calldescr): TP = llmemory.cast_int_to_adr(func).ptr._obj._TYPE args = support.cast_call_args(TP.ARGS, args_i, args_r, args_f) return self.maybe_on_top_of_llinterp(func, args, TP.RESULT)
def thread_start(): value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed)) gcdata.aid2stack.setitem(get_aid(), value)
def get_aid(): """Return the thread identifier, cast to an (opaque) address.""" return llmemory.cast_int_to_adr(rthread.get_ident())
def int2adr(int): return llmemory.cast_int_to_adr(int)
def cast_int_to_ptr(x, TYPE): x = llmemory.cast_int_to_adr(x) return llmemory.cast_adr_to_ptr(x, TYPE)
def cast_int_to_ptr(x, TYPE): x = llmemory.cast_int_to_adr(x) return llmemory.cast_adr_to_ptr(x, TYPE)
def int2adr(int): return llmemory.cast_int_to_adr(int)
def get_aid(): """Return the thread identifier, cast to an (opaque) address.""" return llmemory.cast_int_to_adr(rthread.get_ident())
def thread_start(): value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed)) gcdata.aid2stack.setitem(get_aid(), value)
def op_cast_int_to_adr(int): return llmemory.cast_int_to_adr(int)
def _do_call(self, func, args_i, args_r, args_f, calldescr): TP = llmemory.cast_int_to_adr(func).ptr._obj._TYPE args = support.cast_call_args(TP.ARGS, args_i, args_r, args_f) return self.maybe_on_top_of_llinterp(func, args, TP.RESULT)
def op_cast_int_to_adr(int): return llmemory.cast_int_to_adr(int)
def discard_translations(data, size): if we_are_translated() and VALGRIND_DISCARD_TRANSLATIONS is not None: VALGRIND_DISCARD_TRANSLATIONS(llmemory.cast_int_to_adr(data), size)
def discard_translations(data, size): if we_are_translated(): VALGRIND_DISCARD_TRANSLATIONS(llmemory.cast_int_to_adr(data), size)