def _prepare_call(self, op, save_all_regs=False, first_arg_index=1): args = [None] * (op.numargs() + 3) calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - first_arg_index for i in range(op.numargs()): args[i + 3] = self.loc(op.getarg(i)) size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm(1) else: sign_loc = imm(0) args[1] = imm(size) args[2] = sign_loc effectinfo = calldescr.get_extra_info() if save_all_regs: gc_level = 2 elif effectinfo is None or effectinfo.check_can_collect(): gc_level = 1 else: gc_level = 0 args[0] = self._call(op, args, gc_level) return args
def _prepare_threadlocalref_get(self, op, fcond): ofs_loc = imm(op.getarg(1).getint()) calldescr = op.getdescr() size_loc = imm(calldescr.get_result_size()) sign_loc = imm(calldescr.is_result_signed()) res_loc = self.force_allocate_reg(op) return [ofs_loc, size_loc, sign_loc, res_loc]
def prepare_op_gc_store_indexed(self, op, fcond): boxes = op.getarglist() base_loc = self.make_sure_var_in_reg(boxes[0], boxes) value_loc = self.make_sure_var_in_reg(boxes[2], boxes) index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[3].getint() == 1 # scale ofs = boxes[4].getint() size = boxes[5].getint() return [value_loc, base_loc, index_loc, imm(size), imm(ofs)]
def prepare_op_gc_store_indexed(self, op, fcond): boxes = op.getarglist() base_loc = self.make_sure_var_in_reg(boxes[0], boxes) value_loc = self.make_sure_var_in_reg(boxes[2], boxes) index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[3].getint() == 1 # scale ofs = boxes[4].getint() size = boxes[5].getint() assert check_imm_arg(ofs) return [value_loc, base_loc, index_loc, imm(size), imm(ofs)]
def _prepare_op_gc_load_indexed(self, op, fcond): boxes = op.getarglist() base_loc = self.make_sure_var_in_reg(boxes[0], boxes) index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[2].getint() == 1 # scale ofs = boxes[3].getint() nsize = boxes[4].getint() self.possibly_free_vars_for_op(op) self.free_temp_vars() res_loc = self.force_allocate_reg(op) return [res_loc, base_loc, index_loc, imm(nsize), imm(ofs)]
def _prepare_op_gc_load_indexed(self, op, fcond): boxes = op.getarglist() base_loc = self.make_sure_var_in_reg(boxes[0], boxes) index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[2].getint() == 1 # scale ofs = boxes[3].getint() nsize = boxes[4].getint() assert check_imm_arg(ofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() res_loc = self.force_allocate_reg(op) return [res_loc, base_loc, index_loc, imm(nsize), imm(ofs)]
def prepare_op_gc_store(self, op, fcond): boxes = op.getarglist() base_loc = self.make_sure_var_in_reg(boxes[0], boxes) ofs = boxes[1].getint() value_loc = self.make_sure_var_in_reg(boxes[2], boxes) size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) else: ofs_loc = self.get_scratch_reg(INT, boxes) self.assembler.load(ofs_loc, imm(ofs)) return [value_loc, base_loc, ofs_loc, imm(size)]
def prepare_op_finish(self, op, fcond): # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] else: locs = [imm(fail_descr)] return locs
def prepare_op_guard_exception(self, op, fcond): boxes = op.getarglist() arg0 = ConstInt(rffi.cast(lltype.Signed, op.getarg(0).getint())) loc = self.make_sure_var_in_reg(arg0) loc1 = self.get_scratch_reg(INT, boxes) if op in self.longevity: resloc = self.force_allocate_reg(op, boxes) self.possibly_free_var(op) else: resloc = None pos_exc_value = imm(self.cpu.pos_exc_value()) pos_exception = imm(self.cpu.pos_exception()) arglocs = self._prepare_guard(op, [loc, loc1, resloc, pos_exc_value, pos_exception]) return arglocs
def prepare_op_guard_exception(self, op, fcond): boxes = op.getarglist() arg0 = ConstInt(rffi.cast(lltype.Signed, op.getarg(0).getint())) loc = self.make_sure_var_in_reg(arg0) loc1 = self.get_scratch_reg(INT, boxes) if op in self.longevity: resloc = self.force_allocate_reg(op, boxes) self.possibly_free_var(op) else: resloc = None pos_exc_value = imm(self.cpu.pos_exc_value()) pos_exception = imm(self.cpu.pos_exception()) arglocs = self._prepare_guard( op, [loc, loc1, resloc, pos_exc_value, pos_exception]) return arglocs
def prepare_op_guard_class(self, op, fcond): assert not isinstance(op.getarg(0), Const) boxes = op.getarglist() x = self.make_sure_var_in_reg(boxes[0], boxes) y_val = rffi.cast(lltype.Signed, boxes[1].getint()) return self._prepare_guard(op, [x, imm(y_val)])
def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): if scale.value == 3: assert value_loc.is_vfp_reg() # vstr only supports imm offsets # so if the ofset is too large we add it to the base and use an # offset of 0 if ofs_loc.is_core_reg(): tmploc, save = self.get_tmp_reg([value_loc, base_loc, ofs_loc]) assert not save self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) base_loc = tmploc ofs_loc = imm(0) else: assert ofs_loc.is_imm() assert ofs_loc.value % 4 == 0 self.mc.VSTR(value_loc.value, base_loc.value, ofs_loc.value) elif scale.value == 2: if ofs_loc.is_imm(): self.mc.STR_ri(value_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.STR_rr(value_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 1: if ofs_loc.is_imm(): self.mc.STRH_ri(value_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.STRH_rr(value_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 0: if ofs_loc.is_imm(): self.mc.STRB_ri(value_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.STRB_rr(value_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: assert 0
def _prepare_op_gc_load(self, op, fcond): a0 = op.getarg(0) ofs = op.getarg(1).getint() nsize = op.getarg(2).getint() # negative for "signed" base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) ofs_size = default_imm_size if abs(nsize) < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: ofs_loc = self.get_scratch_reg(INT, [a0]) self.assembler.load(ofs_loc, immofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() res_loc = self.force_allocate_reg(op) return [base_loc, ofs_loc, res_loc, imm(nsize)]
def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): base_loc, ofs_loc, res_loc, nsize_loc = arglocs nsize = nsize_loc.value signed = nsize < 0 scale = get_scale(abs(nsize)) self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), signed, fcond) return fcond
def test_mov_imm_to_big_stacklock(self): val = imm(100) s = stack(8191) expected = [ mi('gen_load_int', lr.value, 100, cond=AL), mi('gen_load_int', ip.value, s.value, cond=AL), mi('STR_rr', lr.value, fp.value, ip.value, cond=AL), ] self.mov(val, s, expected)
def test_mov_big_imm_to_stacklock(self): val = imm(65536) s = stack(7) expected = [ mi('gen_load_int', lr.value, 65536, cond=AL), mi('STR_ri', lr.value, fp.value, imm=s.value, cond=AL), ] self.mov(val, s, expected)
def _genop_call_assembler(self, op, arglocs, regalloc, fcond): if len(arglocs) == 4: [argloc, vloc, result_loc, tmploc] = arglocs else: [argloc, result_loc, tmploc] = arglocs vloc = imm(0) self._store_force_index(self._find_nearby_operation(+1)) self.call_assembler(op, argloc, vloc, result_loc, tmploc) return fcond
def emit_guard_call_assembler(self, op, guard_op, arglocs, regalloc, fcond): if len(arglocs) == 4: [argloc, vloc, result_loc, tmploc] = arglocs else: [argloc, result_loc, tmploc] = arglocs vloc = imm(0) self.call_assembler(op, guard_op, argloc, vloc, result_loc, tmploc) self._emit_guard_may_force(guard_op, regalloc._prepare_guard(guard_op)) return fcond
def test_from_imm(self): s = raw_stack(1024) i = imm(999) e = [ mi('gen_load_int', lr.value, i.value, cond=AL), mi('gen_load_int', ip.value, s.value, cond=AL), mi('STR_rr', lr.value, sp.value, ip.value, cond=AL), ] self.mov(i, s, e)
def _prepare_guard(self, op, args=None): if args is None: args = [] args.append(imm(self.frame_manager.get_frame_depth())) for arg in op.getfailargs(): if arg: args.append(self.loc(arg)) else: args.append(None) return args
def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs assert index_loc.is_core_reg() # add the base offset if ofs_loc.value > 0: self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) index_loc = r.ip scale = get_scale(size_loc.value) self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond
def emit_opx_threadlocalref_get(self, op, arglocs, regalloc, fcond): ofs_loc, size_loc, sign_loc, res_loc = arglocs assert ofs_loc.is_imm() assert size_loc.is_imm() assert sign_loc.is_imm() ofs = self.saved_threadlocal_addr self.load_reg(self.mc, res_loc, r.sp, ofs) scale = get_scale(size_loc.value) signed = sign_loc.value != 0 self._load_from_mem(res_loc, res_loc, ofs_loc, imm(scale), signed, fcond) return fcond
def _prepare_call(self, op, force_store=[], save_all_regs=False, first_arg_index=1): args = [None] * (op.numargs() + 3) calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) assert len(calldescr.arg_classes) == op.numargs() - first_arg_index for i in range(op.numargs()): args[i + 3] = self.loc(op.getarg(i)) size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: sign_loc = imm(1) else: sign_loc = imm(0) args[1] = imm(size) args[2] = sign_loc args[0] = self._call(op, args, force_store, save_all_regs) return args
def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs assert index_loc.is_core_reg() nsize = nsize_loc.value signed = nsize < 0 # add the base offset if ofs_loc.value > 0: self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) index_loc = r.ip # scale = get_scale(abs(nsize)) self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), signed, fcond) return fcond
def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, value_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs scale = get_scale(fieldsize.value) tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) assert not save self.mc.gen_load_int(tmploc.value, itemsize.value) self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) if ofs.value > 0: if ofs_loc.is_imm(): self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) return fcond
def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): # Load a value of '1 << scale' bytes, from the memory location # 'base_loc + ofs_loc'. Note that 'scale' is not used to scale # the offset! # if scale.value == 3: assert res_loc.is_vfp_reg() # vldr only supports imm offsets # if the offset is in a register we add it to the base and use a # tmp reg if ofs_loc.is_core_reg(): tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) assert not save self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) base_loc = tmploc ofs_loc = imm(0) else: assert ofs_loc.is_imm() assert ofs_loc.value % 4 == 0 self.mc.VLDR(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 2: if ofs_loc.is_imm(): self.mc.LDR_ri(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.LDR_rr(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 1: if ofs_loc.is_imm(): if signed: self.mc.LDRSH_ri(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.LDRH_ri(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: if signed: self.mc.LDRSH_rr(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.LDRH_rr(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 0: if ofs_loc.is_imm(): if signed: self.mc.LDRSB_ri(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.LDRB_ri(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: if signed: self.mc.LDRSB_rr(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: self.mc.LDRB_rr(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) else: assert 0
def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs scale = get_scale(fieldsize.value) tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) assert not save self.mc.gen_load_int(tmploc.value, itemsize.value) self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) descr = op.getdescr() assert isinstance(descr, InteriorFieldDescr) signed = descr.fielddescr.is_field_signed() if ofs.value > 0: if ofs_loc.is_imm(): self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) ofs_loc = tmploc self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), signed, fcond) return fcond
def _emit_copystrcontent(self, op, regalloc, fcond, is_unicode): # compute the source address args = op.getarglist() base_loc = regalloc.rm.make_sure_var_in_reg(args[0], args) ofs_loc = regalloc.rm.make_sure_var_in_reg(args[2], args) assert args[0] is not args[1] # forbidden case of aliasing srcaddr_box = TempBox() forbidden_vars = [args[1], args[3], args[4], srcaddr_box] srcaddr_loc = regalloc.rm.force_allocate_reg(srcaddr_box, forbidden_vars) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc, is_unicode=is_unicode) # compute the destination address base_loc = regalloc.rm.make_sure_var_in_reg(args[1], forbidden_vars) ofs_loc = regalloc.rm.make_sure_var_in_reg(args[3], forbidden_vars) forbidden_vars = [args[4], srcaddr_box] dstaddr_box = TempBox() dstaddr_loc = regalloc.rm.force_allocate_reg(dstaddr_box, forbidden_vars) self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc, is_unicode=is_unicode) # compute the length in bytes length_box = args[4] length_loc = regalloc.loc(length_box) if is_unicode: forbidden_vars = [srcaddr_box, dstaddr_box] bytes_box = TempBox() bytes_loc = regalloc.rm.force_allocate_reg(bytes_box, forbidden_vars) scale = self._get_unicode_item_scale() if not length_loc.is_core_reg(): self.regalloc_mov(length_loc, bytes_loc) length_loc = bytes_loc assert length_loc.is_core_reg() self.mc.MOV_ri(r.ip.value, 1 << scale) self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value) length_box = bytes_box length_loc = bytes_loc # call memcpy() regalloc.before_call() self.simple_call_no_collect(imm(self.memcpy_addr), [dstaddr_loc, srcaddr_loc, length_loc]) regalloc.rm.possibly_free_var(length_box) regalloc.rm.possibly_free_var(dstaddr_box) regalloc.rm.possibly_free_var(srcaddr_box)
def test_mov_large_imm_loc_to_loc(self): self.a.gen_func_prolog() self.a.mov_loc_loc(imm(2478), r.r0) self.a.gen_func_epilog() assert run_asm(self.a) == 2478
def test_mov_imm_to_reg(self): val = imm(123) reg = r(7) expected = [mi('gen_load_int', 7, 123, cond=AL)] self.mov(val, reg, expected)
def test_push_imm(self): i = imm(12) e = [mi('gen_load_int', ip.value, 12, cond=AL), mi('PUSH', [ip.value], cond=AL)] self.push(i, e)
def prepare_op_int_signext(self, op, fcond): argloc = self.make_sure_var_in_reg(op.getarg(0)) numbytes = op.getarg(1).getint() resloc = self.force_allocate_reg(op) return [argloc, imm(numbytes), resloc]
def test_mov_small_imm_loc_to_loc(self): self.a.gen_func_prolog() self.a.mov_loc_loc(imm(12), r.r0) self.a.gen_func_epilog() assert run_asm(self.a) == 12
def test_mov_large_imm_to_reg(self): val = imm(65536) reg = r(7) expected = [mi('gen_load_int', 7, 65536, cond=AL)] self.mov(val, reg, expected)