def prepare_vec_int_signext(self, op): assert isinstance(op, VectorOp) a0 = op.getarg(0) assert isinstance(a0, VectorOp) loc0 = self.ensure_vector_reg(a0) resloc = self.force_allocate_vector_reg(op) return [resloc, loc0, imm(a0.bytesize), imm(op.bytesize)]
def prepare_unicodelen(self, op): basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) base_loc = self.ensure_reg(op.getarg(0)) self.free_op_vars() result_loc = self.force_allocate_reg(op) return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)]
def _prepare_getfield(self, op): ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) self.free_op_vars() res = self.force_allocate_reg(op) return [base_loc, ofs_loc, res, imm(size), imm(sign)]
def prepare_vec_int_signext(self, op): assert isinstance(op, VectorOp) a0 = op.getarg(0) assert isinstance(a0, VectorOp) loc0 = self.ensure_vector_reg(a0) resloc = self.force_allocate_vector_reg(op) return [resloc, loc0, imm(a0.bytesize), imm(op.bytesize)]
def test_immediate_to_reg(self): self.asm.regalloc_mov(imm(5), r10) big = 2 << 28 self.asm.regalloc_mov(imm(big), r0) exp_instr = [MI("load_imm", r10, 5), MI("load_imm", r0, big)] assert self.asm.mc.instrs == exp_instr
def prepare_vec_unpack_f(self, op): index = op.getarg(1) count = op.getarg(2) assert isinstance(index, ConstInt) assert isinstance(count, ConstInt) srcloc = self.ensure_vector_reg(op.getarg(0)) resloc = self.force_allocate_reg(op) return [resloc, srcloc, imm(index.value), imm(count.value)]
def prepare_vec_unpack_f(self, op): index = op.getarg(1) count = op.getarg(2) assert isinstance(index, ConstInt) assert isinstance(count, ConstInt) srcloc = self.ensure_vector_reg(op.getarg(0)) resloc = self.force_allocate_reg(op) return [resloc, srcloc, imm(index.value), imm(count.value)]
def prepare_raw_store(self, op): size, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) value_loc = self.ensure_reg(op.getarg(2)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) return [base_loc, index_loc, value_loc, ofs_loc, imm(1), imm(size)]
def test_immediate_to_reg(self): self.asm.regalloc_mov(imm(5), r10) big = 2 << 28 self.asm.regalloc_mov(imm(big), r0) exp_instr = [MI("load_imm", r10, 5), MI("load_imm", r0, big)] assert self.asm.mc.instrs == exp_instr
def prepare_setinteriorfield_gc(self, op): t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, _ = t base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) value_loc = self.ensure_reg(op.getarg(2)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) return [base_loc, index_loc, value_loc, ofs_loc, imm(itemsize), imm(fieldsize)]
def _prepare_raw_load(self, op): size, ofs, sign = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) self.free_op_vars() result_loc = self.force_allocate_reg(op) return [base_loc, index_loc, result_loc, ofs_loc, imm(1), imm(size), imm(sign)]
def _prepare_getinteriorfield(self, op): t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) self.free_op_vars() result_loc = self.force_allocate_reg(op) return [base_loc, index_loc, result_loc, ofs_loc, imm(itemsize), imm(fieldsize), imm(sign)]
def prepare_unicodegetitem(self, op): basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) self.free_op_vars() result_loc = self.force_allocate_reg(op) imm_size = imm(itemsize) return [base_loc, index_loc, result_loc, ofs_loc, imm_size, imm_size, imm(0)]
def prepare_finish(self, op): descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] else: locs = [imm(fail_descr)] return locs
def prepare_finish(self, op): descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] else: locs = [imm(fail_descr)] return locs
def prepare_vec_pack_i(self, op): # new_res = vec_pack_i(res, src, index, count) assert isinstance(op, VectorOp) arg = op.getarg(1) index = op.getarg(2) count = op.getarg(3) assert isinstance(index, ConstInt) assert isinstance(count, ConstInt) vloc = self.ensure_vector_reg(op.getarg(0)) srcloc = self.ensure_reg(arg) resloc = self.force_allocate_vector_reg(op) residx = index.value # where to put it in result? srcidx = 0 return [resloc, vloc, srcloc, imm(residx), imm(srcidx), imm(count.value)]
def _prepare_gc_load(self, op): base_loc = self.ensure_reg(op.getarg(0)) ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) self.free_op_vars() res_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) nsize = size_box.value # negative for "signed" size_loc = imm(abs(nsize)) if nsize < 0: sign = 1 else: sign = 0 return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)]
def _prepare_gc_load(self, op): base_loc = self.ensure_reg(op.getarg(0)) ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) self.free_op_vars() res_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) nsize = size_box.value # negative for "signed" size_loc = imm(abs(nsize)) if nsize < 0: sign = 1 else: sign = 0 return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)]
def _prepare_load(self, op): descr = op.getdescr() assert isinstance(descr, ArrayDescr) assert not descr.is_array_of_pointers() and \ not descr.is_array_of_structs() itemsize, ofs, _ = unpack_arraydescr(descr) integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT) args = op.getarglist() a0 = op.getarg(0) a1 = op.getarg(1) base_loc = self.ensure_reg(a0) ofs_loc = self.ensure_reg(a1) result_loc = self.force_allocate_vector_reg(op) return [result_loc, base_loc, ofs_loc, imm(itemsize), imm(ofs), imm(integer)]
def test_immediate_to_mem(self): self.asm.regalloc_mov(imm(5), stack(6)) big = 2 << 28 self.asm.regalloc_mov(imm(big), stack(7)) exp_instr = [MI("alloc_scratch_reg"), MI("load_imm", r0, 5), MI("store", r0.value, SPP.value, get_spp_offset(6)), MI("free_scratch_reg"), MI("alloc_scratch_reg"), MI("load_imm", r0, big), MI("store", r0.value, SPP.value, get_spp_offset(7)), MI("free_scratch_reg")] assert self.asm.mc.instrs == exp_instr
def prepare_vec_arith_unary(self, op): assert isinstance(op, VectorOp) a0 = op.getarg(0) loc0 = self.ensure_vector_reg(a0) resloc = self.force_allocate_vector_reg(op) sizeloc = imm(op.bytesize) return [resloc, loc0, sizeloc]
def ensure_reg_or_16bit_imm(self, box): if box.type == FLOAT: return self.fprm.ensure_reg(box) else: if check_imm_box(box): return imm(box.getint()) return self.rm.ensure_reg(box)
def prepare_vec_int_is_true(self, op): assert isinstance(op, VectorOp) arg = op.getarg(0) assert isinstance(arg, VectorOp) argloc = self.ensure_vector_reg(arg) resloc = self.force_allocate_vector_reg_or_cc(op) return [resloc, argloc, imm(arg.bytesize)]
def ensure_reg_or_any_imm(self, box): if box.type == FLOAT: return self.fprm.ensure_reg(box) else: if isinstance(box, Const): return imm(box.getint()) return self.rm.ensure_reg(box)
def prepare_vec_arith_unary(self, op): assert isinstance(op, VectorOp) a0 = op.getarg(0) loc0 = self.ensure_vector_reg(a0) resloc = self.force_allocate_vector_reg(op) sizeloc = imm(op.bytesize) return [resloc, loc0, sizeloc]
def _prepare_guard(self, op, args=None): if args is None: args = [] args.append(imm(self.fm.get_frame_depth())) for arg in op.getfailargs(): if arg: args.append(self.loc(arg)) else: args.append(None) self.possibly_free_vars(op.getfailargs()) # # generate_quick_failure() produces up to 14 instructions per guard self.limit_loop_break -= 14 * 4 # specifically for vecopt descr = op.getdescr() if not descr: return args assert isinstance(descr, AbstractFailDescr) if descr.rd_vector_info: accuminfo = descr.rd_vector_info while accuminfo: i = accuminfo.getpos_in_failargs() + 1 accuminfo.location = args[i] loc = self.loc(accuminfo.getoriginal()) args[i] = loc accuminfo = accuminfo.next() return args
def ensure_reg_or_16bit_imm(self, box): if box.type == FLOAT: return self.fprm.ensure_reg(box) else: if check_imm_box(box): return imm(box.getint()) return self.rm.ensure_reg(box)
def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) length_loc = self.ensure_reg_or_16bit_imm(op.getarg(2)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize)]
def test_immediate_to_mem(self): self.asm.regalloc_mov(imm(5), stack(6)) big = 2 << 28 self.asm.regalloc_mov(imm(big), stack(7)) exp_instr = [ MI("alloc_scratch_reg"), MI("load_imm", r0, 5), MI("store", r0.value, SPP.value, get_spp_offset(6)), MI("free_scratch_reg"), MI("alloc_scratch_reg"), MI("load_imm", r0, big), MI("store", r0.value, SPP.value, get_spp_offset(7)), MI("free_scratch_reg") ] assert self.asm.mc.instrs == exp_instr
def prepare_vec_int_is_true(self, op): assert isinstance(op, VectorOp) arg = op.getarg(0) assert isinstance(arg, VectorOp) argloc = self.ensure_vector_reg(arg) resloc = self.force_allocate_vector_reg_or_cc(op) return [resloc, argloc, imm(arg.bytesize)]
def _prepare_guard(self, op, args=None): if args is None: args = [] args.append(imm(self.fm.get_frame_depth())) for arg in op.getfailargs(): if arg: args.append(self.loc(arg)) else: args.append(None) self.possibly_free_vars(op.getfailargs()) # # generate_quick_failure() produces up to 14 instructions per guard self.limit_loop_break -= 14 * 4 # specifically for vecopt descr = op.getdescr() if not descr: return args assert isinstance(descr, AbstractFailDescr) if descr.rd_vector_info: accuminfo = descr.rd_vector_info while accuminfo: i = accuminfo.getpos_in_failargs()+1 accuminfo.location = args[i] loc = self.loc(accuminfo.getoriginal()) args[i] = loc accuminfo = accuminfo.next() return args
def prepare_vec_store(self, op): descr = op.getdescr() assert isinstance(descr, ArrayDescr) assert not descr.is_array_of_pointers() and \ not descr.is_array_of_structs() itemsize, ofs, _ = unpack_arraydescr(descr) a0 = op.getarg(0) a1 = op.getarg(1) a2 = op.getarg(2) baseloc = self.ensure_reg(a0) ofsloc = self.ensure_reg(a1) valueloc = self.ensure_vector_reg(a2) integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT) return [baseloc, ofsloc, valueloc, imm(itemsize), imm(ofs), imm(integer)]
def ensure_reg_or_any_imm(self, box): if box.type == FLOAT: return self.fprm.ensure_reg(box) else: if isinstance(box, Const): return imm(box.getint()) return self.rm.ensure_reg(box)
def _prepare_gc_load_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) assert op.getarg(2).getint() == 1 # scale ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() res_loc = self.force_allocate_reg(op) size_box = op.getarg(4) assert isinstance(size_box, ConstInt) nsize = size_box.value # negative for "signed" size_loc = imm(abs(nsize)) if nsize < 0: sign = 1 else: sign = 0 return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)]
def _prepare_gc_load_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) assert op.getarg(2).getint() == 1 # scale ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() res_loc = self.force_allocate_reg(op) size_box = op.getarg(4) assert isinstance(size_box, ConstInt) nsize = size_box.value # negative for "signed" size_loc = imm(abs(nsize)) if nsize < 0: sign = 1 else: sign = 0 return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)]
def prepare_arraylen_gc(self, op): arraydescr = op.getdescr() assert isinstance(arraydescr, ArrayDescr) ofs = arraydescr.lendescr.offset assert _check_imm_arg(ofs) base_loc = self.ensure_reg(op.getarg(0)) self.free_op_vars() res = self.force_allocate_reg(op) return [res, base_loc, imm(ofs)]
def prepare_unicodesetitem(self, op): basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) value_loc = self.ensure_reg(op.getarg(2)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) imm_size = imm(itemsize) return [base_loc, index_loc, value_loc, ofs_loc, imm_size, imm_size]
def prepare_vec_bool(self, op): assert isinstance(op, VectorOp) a0 = op.getarg(0) a1 = op.getarg(1) size = op.bytesize args = op.getarglist() loc0 = self.ensure_vector_reg(a0) loc1 = self.ensure_vector_reg(a1) resloc = self.force_allocate_vector_reg_or_cc(op) return [resloc, loc0, loc1, imm(size)]
def prepare_vec_bool(self, op): assert isinstance(op, VectorOp) a0 = op.getarg(0) a1 = op.getarg(1) size = op.bytesize args = op.getarglist() loc0 = self.ensure_vector_reg(a0) loc1 = self.ensure_vector_reg(a1) resloc = self.force_allocate_vector_reg_or_cc(op) return [resloc, loc0, loc1, imm(size)]
def prepare_int_sub(self, op): l0 = self.ensure_reg(op.getarg(0)) a1 = op.getarg(1) if check_imm_box(a1, -2**15+1, 2**15): l1 = imm(a1.getint()) else: l1 = self.ensure_reg(a1) self.free_op_vars() res = self.force_allocate_reg(op) return [l0, l1, res]
def prepare_vec_pack_i(self, op): # new_res = vec_pack_i(res, src, index, count) assert isinstance(op, VectorOp) arg = op.getarg(1) index = op.getarg(2) count = op.getarg(3) assert isinstance(index, ConstInt) assert isinstance(count, ConstInt) vloc = self.ensure_vector_reg(op.getarg(0)) srcloc = self.ensure_reg(arg) resloc = self.force_allocate_vector_reg(op) residx = index.value # where to put it in result? srcidx = 0 return [ resloc, vloc, srcloc, imm(residx), imm(srcidx), imm(count.value) ]
def prepare_int_sub(self, op): l0 = self.ensure_reg(op.getarg(0)) a1 = op.getarg(1) if check_imm_box(a1, -2**15 + 1, 2**15): l1 = imm(a1.getint()) else: l1 = self.ensure_reg(a1) self.free_op_vars() res = self.force_allocate_reg(op) return [l0, l1, res]
def f(self, op): l0 = self.ensure_reg(op.getarg(0)) a1 = op.getarg(1) if check_imm_box(a1, lower_bound, upper_bound): l1 = imm(a1.getint()) else: l1 = self.ensure_reg(a1) self.free_op_vars() res = self.force_allocate_reg_or_cc(op) return [l0, l1, res]
def f(self, op): l0 = self.ensure_reg(op.getarg(0)) a1 = op.getarg(1) if check_imm_box(a1, lower_bound, upper_bound): l1 = imm(a1.getint()) else: l1 = self.ensure_reg(a1) self.free_op_vars() res = self.force_allocate_reg_or_cc(op) return [l0, l1, res]
def prepare_vec_unpack_i(self, op): assert isinstance(op, VectorOp) index = op.getarg(1) count = op.getarg(2) assert isinstance(index, ConstInt) assert isinstance(count, ConstInt) arg = op.getarg(0) if arg.is_vector(): srcloc = self.ensure_vector_reg(arg) assert isinstance(arg, VectorOp) size = arg.bytesize else: # unpack srcloc = self.ensure_reg(arg) size = WORD if op.is_vector(): resloc = self.force_allocate_vector_reg(op) else: resloc = self.force_allocate_reg(op) return [resloc, srcloc, imm(index.value), imm(count.value), imm(size)]
def _prepare_load(self, op): descr = op.getdescr() assert isinstance(descr, ArrayDescr) assert not descr.is_array_of_pointers() and \ not descr.is_array_of_structs() itemsize, ofs, _ = unpack_arraydescr(descr) integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT) args = op.getarglist() a0 = op.getarg(0) a1 = op.getarg(1) base_loc = self.ensure_reg(a0) ofs_loc = self.ensure_reg(a1) result_loc = self.force_allocate_vector_reg(op) return [ result_loc, base_loc, ofs_loc, imm(itemsize), imm(ofs), imm(integer) ]
def prepare_vec_unpack_i(self, op): assert isinstance(op, VectorOp) index = op.getarg(1) count = op.getarg(2) assert isinstance(index, ConstInt) assert isinstance(count, ConstInt) arg = op.getarg(0) if arg.is_vector(): srcloc = self.ensure_vector_reg(arg) assert isinstance(arg, VectorOp) size = arg.bytesize else: # unpack srcloc = self.ensure_reg(arg) size = WORD if op.is_vector(): resloc = self.force_allocate_vector_reg(op) else: resloc = self.force_allocate_reg(op) return [resloc, srcloc, imm(index.value), imm(count.value), imm(size)]
def prepare_vec_expand_i(self, op): assert isinstance(op, VectorOp) arg = op.getarg(0) mc = self.assembler.mc if arg.is_constant(): assert isinstance(arg, ConstInt) l0 = self.rm.get_scratch_reg() mc.load_imm(l0, arg.value) else: l0 = self.ensure_reg(arg) res = self.force_allocate_vector_reg(op) return [res, l0, imm(PARAM_SAVE_AREA_OFFSET)]
def prepare_vec_store(self, op): descr = op.getdescr() assert isinstance(descr, ArrayDescr) assert not descr.is_array_of_pointers() and \ not descr.is_array_of_structs() itemsize, ofs, _ = unpack_arraydescr(descr) a0 = op.getarg(0) a1 = op.getarg(1) a2 = op.getarg(2) baseloc = self.ensure_reg(a0) ofsloc = self.ensure_reg(a1) valueloc = self.ensure_vector_reg(a2) integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT) return [ baseloc, ofsloc, valueloc, imm(itemsize), imm(ofs), imm(integer) ]
def prepare_vec_expand_i(self, op): assert isinstance(op, VectorOp) arg = op.getarg(0) mc = self.assembler.mc if arg.is_constant(): assert isinstance(arg, ConstInt) l0 = self.rm.get_scratch_reg() mc.load_imm(l0, arg.value) else: l0 = self.ensure_reg(arg) res = self.force_allocate_vector_reg(op) return [res, l0, imm(PARAM_SAVE_AREA_OFFSET)]
def prepare_int_add_or_mul(self, op): a0 = op.getarg(0) a1 = op.getarg(1) if check_imm_box(a0): a0, a1 = a1, a0 l0 = self.ensure_reg(a0) if check_imm_box(a1): l1 = imm(a1.getint()) else: l1 = self.ensure_reg(a1) self.free_op_vars() res = self.force_allocate_reg(op) return [l0, l1, res]
def _prepare_guard(self, op, args=None): if args is None: args = [] args.append(imm(self.fm.get_frame_depth())) for arg in op.getfailargs(): if arg: args.append(self.loc(arg)) else: args.append(None) self.possibly_free_vars(op.getfailargs()) # # generate_quick_failure() produces up to 14 instructions per guard self.limit_loop_break -= 14 * 4 # return args
def prepare_guard_class(self, op): x = self.ensure_reg(op.getarg(0)) y_val = force_int(op.getarg(1).getint()) arglocs = self._prepare_guard(op, [x, imm(y_val)]) return arglocs
def prepare_unary_cmp(self, op): l0 = self.ensure_reg(op.getarg(0)) l1 = imm(0) self.free_op_vars() res = self.force_allocate_reg_or_cc(op) return [l0, l1, res]