def _call_packet_copy(i, **kwargs): # If you're thinking 'wow, this function looks complicated,' just go # take a look at kernel/bpf/verifier.c:find_good_pkt_pointers to see # the hoops that we're jumping through here. ret = [] fn, skb, dst_ptr, offset, num_bytes = i.src_vars # TODO: also support ptr to context? if not isinstance(skb, _mem.ArgVar): raise TranslationError( i.starts_line, 'First argument to packet_copy must be SkBuffContext argument') elif not isinstance(num_bytes, _mem.ConstVar): raise TranslationError( i.starts_line, 'Num bytes must not be dynamically defined for packet_copy') skb_data_mem = bi.Mem(_get_var_reg(skb), skb.var_type.data.offset, bi.Size.Word) skb_data_end_mem = bi.Mem(_get_var_reg(skb), skb.var_type.data_end.offset, bi.Size.Word) out_of_bounds = _make_tmp_label() # ret = _mov(dst_ptr, bi.Reg.R1) dst_ptr = _convert_var(dst_ptr) # %r2 = skb->data ret.append(bi.Mov(skb_data_mem, bi.Reg.R2)) # %r2 += offset if isinstance(offset, _mem.ConstVar): off_val = offset.val if hasattr(off_val, 'value'): off_val = off_val.value if off_val != 0: ret.append(bi.Add(bi.Imm(off_val), bi.Reg.R2)) else: ret.extend(_mov(offset, bi.Reg.R3)) ret.append(bi.Add(bi.Reg.R3, bi.Reg.R2)) # %r3 = %r2 ret.append(bi.Mov(bi.Reg.R2, bi.Reg.R3)) # %r2 += num_bytes ret.append(bi.Add(bi.Imm(num_bytes.val), bi.Reg.R2)) # %r4 = skb->data_end ret.append(bi.Mov(skb_data_end_mem, bi.Reg.R4)) # if skb->data + offset + num_bytes > skb->data_end: goto out_of_bounds ret.append(bi.JumpIfGreaterThan(bi.Reg.R4, bi.Reg.R2, out_of_bounds)) ret.extend(_memcpy_packet(i, dst_ptr, bi.Reg.R3, num_bytes.val)) ret.append(bi.Label(out_of_bounds)) return ret
def _load_attr_addr(i, **kwargs): sv, dv = i.src_vars[0], i.dst_vars[0] if _is_ptr(sv.var_type): off = getattr(sv.var_type.var_type, i.argval).offset return (_mov(sv, bi.Reg.R0) + ([bi.Add(bi.Imm(off), bi.Reg.R0)] if off != 0 else []) + _mov(bi.Reg.R0, dv)) else: off = getattr(sv.var_type, i.argval).offset assert isinstance(sv, _stack.StackVar) or isinstance(sv, _mem.ArgVar) if isinstance(sv, _stack.StackVar): off += sv.offset return (_mov(_get_var_reg(sv), bi.Reg.R0) + ([bi.Add(bi.Imm(off), bi.Reg.R0)] if off != 0 else []) + _mov(bi.Reg.R0, dv))
def _lea(i, src, dst, stack, **kwargs): if issubclass(src.var_type, FileDescriptorDatastructure): if not isinstance(src, _mem.ConstVar): raise TranslationError( i.starts_line, 'Cannot handle non-const file descriptor datastructures') return _mov(bi.MapFdImm(src.val.fd), dst) setup = [] if isinstance(src, _mem.ConstVar): # We have to lay it down in memory ourselves, sadly tmp_src = stack.alloc(src.var_type) tmp_reg = _get_var_reg(tmp_src) setup = _mov_const(src.var_type, src.val, tmp_reg, tmp_src.offset) src = tmp_src # TODO: fix types. Right now, dt may by a ulong for addrof, because we # can't plug in real return types yet. # if not isinstance(dst, bi.Reg): # st, dt = src.var_type, dst.var_type # assert issubclass(dt,_types.Ptr) and dt.var_type == st reg = _get_var_reg(src) if src.offset == 0: return setup + _mov(reg, dst) else: return ( setup + [bi.Mov(reg, bi.Reg.R0), bi.Add(bi.Imm(src.offset), bi.Reg.R0)] + _mov(bi.Reg.R0, dst))
def _load_arr_element_addr(i, arr, index, dst_reg): if not isinstance(index, _mem.ConstVar): raise TranslationError(i.starts_line, 'Illegal to use dynamic index to array') if _is_ptr(arr.var_type): el_off = index.val.value * ctypes.sizeof(arr.var_type.var_type._type_) ret = _mov(arr, dst_reg) else: el_off = index.val.value * ctypes.sizeof( arr.var_type._type_) + arr.offset ret = [bi.Mov(_get_var_reg(arr), dst_reg)] if el_off != 0: ret.append(bi.Add(bi.Imm(el_off), dst_reg)) return ret
def _inplace_add(i, **kwargs): return (_mov(i.src_vars[0], bi.Reg.R0) + _mov(i.src_vars[1], bi.Reg.R1) + [bi.Add(bi.Reg.R1, bi.Reg.R0)] + _mov(bi.Reg.R0, i.dst_vars[0]))