def _genop_vec_getarrayitem(self, op, arglocs, resloc): # considers item scale (raw_load does not) base_loc, ofs_loc, size_loc, ofs, integer_loc, aligned_loc = arglocs scale = get_scale(size_loc.value) src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale) self._vec_load(resloc, src_addr, integer_loc.value, size_loc.value, aligned_loc.value)
def _genop_discard_vec_setarrayitem(self, op, arglocs): # considers item scale (raw_store does not) base_loc, ofs_loc, value_loc, size_loc, baseofs, integer_loc, aligned_loc = arglocs scale = get_scale(size_loc.value) dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, scale) self._vec_store(dest_loc, value_loc, integer_loc.value, size_loc.value, aligned_loc.value)
def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): base_loc, ofs_loc, res_loc, nsize_loc = arglocs nsize = nsize_loc.value signed = nsize < 0 scale = get_scale(abs(nsize)) self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), signed, fcond) return fcond
def emit_op_gc_store_indexed(self, op, arglocs): value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs assert index_loc.is_core_reg() # add the base offset if ofs_loc.value > 0: self.mc.ADD_ri(r.ip0.value, index_loc.value, ofs_loc.value) index_loc = r.ip0 scale = get_scale(size_loc.value) self._write_to_mem(value_loc, base_loc, index_loc, scale)
def threadlocalref_get(self, op, arglocs): res_loc, = arglocs ofs_loc = self.imm(op.getarg(1).getint()) calldescr = op.getdescr() ofs = self.saved_threadlocal_addr self.load_reg(self.mc, res_loc, r.sp, ofs) scale = get_scale(calldescr.get_result_size()) signed = (calldescr.is_result_signed() != 0) self._load_from_mem(res_loc, res_loc, ofs_loc, scale, signed)
def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs assert index_loc.is_core_reg() # add the base offset if ofs_loc.value > 0: self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) index_loc = r.ip scale = get_scale(size_loc.value) self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond
def emit_opx_threadlocalref_get(self, op, arglocs, regalloc, fcond): ofs_loc, size_loc, sign_loc, res_loc = arglocs assert ofs_loc.is_imm() assert size_loc.is_imm() assert sign_loc.is_imm() ofs = self.saved_threadlocal_addr self.load_reg(self.mc, res_loc, r.sp, ofs) scale = get_scale(size_loc.value) signed = sign_loc.value != 0 self._load_from_mem(res_loc, res_loc, ofs_loc, imm(scale), signed, fcond) return fcond
def _emit_op_gc_load_indexed(self, op, arglocs): res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs assert index_loc.is_core_reg() nsize = nsize_loc.value signed = (nsize < 0) # add the base offset if ofs_loc.value > 0: self.mc.ADD_ri(r.ip0.value, index_loc.value, ofs_loc.value) index_loc = r.ip0 # scale = get_scale(abs(nsize)) self._load_from_mem(res_loc, base_loc, index_loc, scale, signed)
def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs assert index_loc.is_core_reg() nsize = nsize_loc.value signed = nsize < 0 # add the base offset if ofs_loc.value > 0: self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) index_loc = r.ip # scale = get_scale(abs(nsize)) self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), signed, fcond) return fcond
def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, value_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs scale = get_scale(fieldsize.value) tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) assert not save self.mc.gen_load_int(tmploc.value, itemsize.value) self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) if ofs.value > 0: if ofs_loc.is_imm(): self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) return fcond
def emit_op_gc_store_indexed(self, op, arglocs): value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs assert index_loc.is_core_reg() # add the base offset if ofs_loc.value != 0: if check_imm_arg(ofs_loc.value): self.mc.ADD_ri(r.ip0.value, index_loc.value, ofs_loc.value) else: # ofs_loc.value is too large for an ADD_ri self.load(r.ip0, ofs_loc) self.mc.ADD_rr(r.ip0.value, r.ip0.value, index_loc.value) index_loc = r.ip0 scale = get_scale(size_loc.value) self._write_to_mem(value_loc, base_loc, index_loc, scale)
def _consider_vec_load(self, op): descr = op.getdescr() assert isinstance(descr, ArrayDescr) assert not descr.is_array_of_pointers() and \ not descr.is_array_of_structs() itemsize, _, _ = unpack_arraydescr(descr) integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT) args = op.getarglist() scale = get_scale(op.getarg(2).getint()) ofs = op.getarg(3).getint() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) result_loc = self.force_allocate_reg(op) self.perform(op, [base_loc, ofs_loc, imm(itemsize), imm(scale), imm(ofs), imm(integer)], result_loc)
def flush_vec_cc(self, rev_cond, lhsloc, resloc, size): # After emitting an instruction that leaves a boolean result in # a condition code (cc), call this. In the common case, result_loc # will be set to SPP by the regalloc, which in this case means # "propagate it between this operation and the next guard by keeping # it in the cc". In the uncommon case, result_loc is another # register, and we emit a load from the cc into this register. if resloc is ebp: self.guard_success_cc = rev_cond else: assert lhsloc is xmm0 maskloc = X86_64_XMM_SCRATCH_REG self.mc.MOVAPD(maskloc, heap(self.element_ones[get_scale(size)])) self.mc.PXOR(resloc, resloc) # note that resloc contains true false for each element by the last compare operation self.mc.PBLENDVB_xx(resloc.value, maskloc.value)
def _emit_op_gc_load_indexed(self, op, arglocs): res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs assert index_loc.is_core_reg() nsize = nsize_loc.value signed = (nsize < 0) # add the base offset if ofs_loc.value != 0: if check_imm_arg(ofs_loc.value): self.mc.ADD_ri(r.ip0.value, index_loc.value, ofs_loc.value) else: # ofs_loc.value is too large for an ADD_ri self.load(r.ip0, ofs_loc) self.mc.ADD_rr(r.ip0.value, r.ip0.value, index_loc.value) index_loc = r.ip0 # scale = get_scale(abs(nsize)) self._load_from_mem(res_loc, base_loc, index_loc, scale, signed)
def consider_vec_store(self, op): # TODO descr = op.getdescr() assert isinstance(descr, ArrayDescr) assert not descr.is_array_of_pointers() and \ not descr.is_array_of_structs() itemsize, _, _ = unpack_arraydescr(descr) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) value_loc = self.make_sure_var_in_reg(op.getarg(2), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) scale = get_scale(op.getarg(3).getint()) ofs = op.getarg(4).getint() integer = not (descr.is_array_of_floats() or \ descr.getconcrete_type() == FLOAT) self.perform_discard(op, [base_loc, ofs_loc, value_loc, imm(itemsize), imm(scale), imm(ofs), imm(integer)])
def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs scale = get_scale(fieldsize.value) tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) assert not save self.mc.gen_load_int(tmploc.value, itemsize.value) self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) descr = op.getdescr() assert isinstance(descr, InteriorFieldDescr) signed = descr.fielddescr.is_field_signed() if ofs.value > 0: if ofs_loc.is_imm(): self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) ofs_loc = tmploc self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), signed, fcond) return fcond
def emit_op_gc_store(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs_loc, size_loc = arglocs scale = get_scale(size_loc.value) self._write_to_mem(value_loc, base_loc, ofs_loc, imm(scale), fcond) return fcond
def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond): base_loc, ofs, res, size = arglocs signed = op.getdescr().is_field_signed() scale = get_scale(size.value) self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) return fcond
def emit_op_gc_store(self, op, arglocs): value_loc, base_loc, ofs_loc, size_loc = arglocs scale = get_scale(size_loc.value) self._write_to_mem(value_loc, base_loc, ofs_loc, scale)
def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs, size = arglocs scale = get_scale(size.value) self._write_to_mem(value_loc, base_loc, ofs, imm(scale), fcond) return fcond
def _emit_op_gc_load(self, op, arglocs): base_loc, ofs_loc, res_loc, nsize_loc = arglocs nsize = nsize_loc.value signed = (nsize < 0) scale = get_scale(abs(nsize)) self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed)