def _record_constptrs(self, op, gcrefs_output_list, ops_with_movable_const_ptr, changeable_const_pointers): l = None for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): p = v.value if rgc._make_sure_does_not_move(p): gcrefs_output_list.append(p) else: if l is None: l = [i] else: l.append(i) if v not in changeable_const_pointers: changeable_const_pointers.append(v) # if op.is_guard() or op.getopnum() == rop.FINISH: llref = cast_instance_to_gcref(op.getdescr()) assert rgc._make_sure_does_not_move(llref) gcrefs_output_list.append(llref) # if l: ops_with_movable_const_ptr[op] = l
def _record_constptrs(self, op, gcrefs_output_list): for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): p = v.value rgc._make_sure_does_not_move(p) gcrefs_output_list.append(p) if op.is_guard() or op.getopnum() == rop.FINISH: llref = cast_instance_to_gcref(op.getdescr()) rgc._make_sure_does_not_move(llref) gcrefs_output_list.append(llref)
def prepare_finish(self, op): descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] else: locs = [imm(fail_descr)] return locs
def prepare_finish(self, op): descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] else: locs = [imm(fail_descr)] return locs
def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): self._store_force_index(guard_op) descr = op.getdescr() assert isinstance(descr, JitCellToken) # # Write a call to the target assembler # we need to allocate the frame, keep in sync with runner's # execute_token jd = descr.outermost_jitdriver_sd self._call_assembler_emit_call(self.imm(descr._ll_function_addr), argloc, tmploc) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_descr_void else: kind = op.result.type if kind == INT: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_int elif kind == REF: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_ref elif kind == FLOAT: value = self.cpu.done_with_this_frame_descr_float else: raise AssertionError(kind) gcref = cast_instance_to_gcref(value) rgc._make_sure_does_not_move(gcref) value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) self._call_assembler_emit_helper_call(self.imm(asm_helper_adr), [tmploc, vloc], result_loc) jmp_location = self._call_assembler_patch_je(result_loc, je_location) # Path B: fast path. Must load the return value, and reset the token # Reset the vable token --- XXX really too much special logic here:-( if jd.index_of_virtualizable >= 0: self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # # Here we join Path A and Path B again self._call_assembler_patch_jmp(jmp_location)
def set(value): assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.rgc import _make_sure_does_not_move from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) if not running_on_llinterp: if gcref: _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) else: self.local.value = value
def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): self._store_force_index(guard_op) descr = op.getdescr() assert isinstance(descr, JitCellToken) # # Write a call to the target assembler # we need to allocate the frame, keep in sync with runner's # execute_token jd = descr.outermost_jitdriver_sd self._call_assembler_emit_call(self.imm(descr._ll_function_addr), argloc, tmploc) if op.result is None: assert result_loc is None value = self.cpu.done_with_this_frame_descr_void else: kind = op.result.type if kind == INT: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_int elif kind == REF: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_ref elif kind == FLOAT: value = self.cpu.done_with_this_frame_descr_float else: raise AssertionError(kind) gcref = cast_instance_to_gcref(value) if gcref: rgc._make_sure_does_not_move(gcref) value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) self._call_assembler_emit_helper_call(self.imm(asm_helper_adr), [tmploc, vloc], result_loc) jmp_location = self._call_assembler_patch_je(result_loc, je_location) # Path B: fast path. Must load the return value # self._call_assembler_load_result(op, result_loc) # # Here we join Path A and Path B again self._call_assembler_patch_jmp(jmp_location)
def prepare_op_finish(self, op, fcond): # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] else: locs = [imm(fail_descr)] return locs
def rewrite_assembler(self, cpu, operations, gcrefs_output_list): rewriter = GcRewriterAssembler(self, cpu) newops = rewriter.rewrite(operations) # the key is an operation that contains a ConstPtr as an argument and # this ConstPtrs pointer might change as it points to an object that # can't be made non-moving (e.g. the object is pinned). ops_with_movable_const_ptr = {} # # a list of such not really constant ConstPtrs. changeable_const_pointers = [] for op in newops: # record all GCREFs, because the GC (or Boehm) cannot see them and # keep them alive if they end up as constants in the assembler. # If such a GCREF can change and we can't make the object it points # to non-movable, we have to handle it seperatly. Such GCREF's are # returned as ConstPtrs in 'changeable_const_pointers' and the # affected operation is returned in 'op_with_movable_const_ptr'. # For this special case see 'rewrite_changeable_constptrs'. self._record_constptrs(op, gcrefs_output_list, ops_with_movable_const_ptr, changeable_const_pointers) # # handle pointers that are not guaranteed to stay the same if len(ops_with_movable_const_ptr) > 0: moving_obj_tracker = MovableObjectTracker( cpu, changeable_const_pointers) # if not we_are_translated(): # used for testing self.last_moving_obj_tracker = moving_obj_tracker # make sure the array containing the pointers is not collected by # the GC (or Boehm) gcrefs_output_list.append(moving_obj_tracker.ptr_array_gcref) rgc._make_sure_does_not_move(moving_obj_tracker.ptr_array_gcref) ops = newops newops = [] for op in ops: if op in ops_with_movable_const_ptr: rewritten_ops = self._rewrite_changeable_constptrs( op, ops_with_movable_const_ptr, moving_obj_tracker) newops.extend(rewritten_ops) else: newops.append(op) # return newops
def rewrite_assembler(self, cpu, operations, gcrefs_output_list): rewriter = GcRewriterAssembler(self, cpu) newops = rewriter.rewrite(operations) # the key is an operation that contains a ConstPtr as an argument and # this ConstPtrs pointer might change as it points to an object that # can't be made non-moving (e.g. the object is pinned). ops_with_movable_const_ptr = {} # # a list of such not really constant ConstPtrs. changeable_const_pointers = [] for op in newops: # record all GCREFs, because the GC (or Boehm) cannot see them and # keep them alive if they end up as constants in the assembler. # If such a GCREF can change and we can't make the object it points # to non-movable, we have to handle it seperatly. Such GCREF's are # returned as ConstPtrs in 'changeable_const_pointers' and the # affected operation is returned in 'op_with_movable_const_ptr'. # For this special case see 'rewrite_changeable_constptrs'. self._record_constptrs(op, gcrefs_output_list, ops_with_movable_const_ptr, changeable_const_pointers) # # handle pointers that are not guaranteed to stay the same if len(ops_with_movable_const_ptr) > 0: moving_obj_tracker = MovableObjectTracker(cpu, changeable_const_pointers) # if not we_are_translated(): # used for testing self.last_moving_obj_tracker = moving_obj_tracker # make sure the array containing the pointers is not collected by # the GC (or Boehm) gcrefs_output_list.append(moving_obj_tracker.ptr_array_gcref) rgc._make_sure_does_not_move(moving_obj_tracker.ptr_array_gcref) ops = newops newops = [] for op in ops: if op in ops_with_movable_const_ptr: rewritten_ops = self._rewrite_changeable_constptrs(op, ops_with_movable_const_ptr, moving_obj_tracker) newops.extend(rewritten_ops) else: newops.append(op) # return newops
def _record_constptrs(self, op, gcrefs_output_list, ops_with_movable_const_ptr, changeable_const_pointers): ops_with_movable_const_ptr[op] = [] for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): p = v.value if rgc._make_sure_does_not_move(p): gcrefs_output_list.append(p) else: ops_with_movable_const_ptr[op].append(i) if v not in changeable_const_pointers: changeable_const_pointers.append(v) # if op.is_guard() or op.getopnum() == rop.FINISH: llref = cast_instance_to_gcref(op.getdescr()) assert rgc._make_sure_does_not_move(llref) gcrefs_output_list.append(llref) # if len(ops_with_movable_const_ptr[op]) == 0: del ops_with_movable_const_ptr[op]
def call_assembler(self, op, argloc, vloc, result_loc, tmploc): """ * argloc: location of the frame argument that we're passing to the called assembler (this is the first return value of locs_for_call_assembler()) * vloc: location of the virtualizable (not in a register; this is the optional second return value of locs_for_call_assembler(), or imm(0) if none returned) * result_loc: location of op.result (which is not be confused with the next one) * tmploc: location where the actual call to the other piece of assembler will return its jitframe result (which is always a REF), before the helper may be called """ descr = op.getdescr() assert isinstance(descr, JitCellToken) # # Write a call to the target assembler # we need to allocate the frame, keep in sync with runner's # execute_token jd = descr.outermost_jitdriver_sd self._call_assembler_emit_call(self.imm(descr._ll_function_addr), argloc, tmploc) if op.type == 'v': assert result_loc is None value = self.cpu.done_with_this_frame_descr_void else: kind = op.type if kind == INT: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_int elif kind == REF: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_ref elif kind == FLOAT: value = self.cpu.done_with_this_frame_descr_float else: raise AssertionError(kind) gcref = cast_instance_to_gcref(value) if gcref: rgc._make_sure_does_not_move(gcref) value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) self._call_assembler_emit_helper_call(self.imm(asm_helper_adr), [tmploc, vloc], result_loc) jmp_location = self._call_assembler_patch_je(result_loc, je_location) # Path B: fast path. Must load the return value # self._call_assembler_load_result(op, result_loc) # # Here we join Path A and Path B again self._call_assembler_patch_jmp(jmp_location)
def call_assembler(self, op, argloc, vloc, result_loc, tmploc): """ * argloc: location of the frame argument that we're passing to the called assembler (this is the first return value of locs_for_call_assembler()) * vloc: location of the virtualizable (not in a register; this is the optional second return value of locs_for_call_assembler(), or imm(0) if none returned) * result_loc: location of op.result (which is not be confused with the next one) * tmploc: location where the actual call to the other piece of assembler will return its jitframe result (which is always a REF), before the helper may be called """ descr = op.getdescr() assert isinstance(descr, JitCellToken) # # Write a call to the target assembler # we need to allocate the frame, keep in sync with runner's # execute_token jd = descr.outermost_jitdriver_sd self._call_assembler_emit_call(self.imm(descr._ll_function_addr), argloc, tmploc) if op.type == 'v': assert result_loc is None value = self.cpu.done_with_this_frame_descr_void else: kind = op.type if kind == INT: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_int elif kind == REF: assert result_loc is tmploc value = self.cpu.done_with_this_frame_descr_ref elif kind == FLOAT: value = self.cpu.done_with_this_frame_descr_float else: raise AssertionError(kind) gcref = cast_instance_to_gcref(value) if gcref: rgc._make_sure_does_not_move(gcref) # but should be prebuilt value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr assert jd is not None asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr) self._call_assembler_emit_helper_call(self.imm(asm_helper_adr), [tmploc, vloc], result_loc) jmp_location = self._call_assembler_patch_je(result_loc, je_location) # Path B: fast path. Must load the return value # self._call_assembler_load_result(op, result_loc) # # Here we join Path A and Path B again self._call_assembler_patch_jmp(jmp_location)