def _ail_handle_Convert(self, expr): operand_expr = self._expr(expr.operand) if type(operand_expr) is Top: return Top(expr.to_bits // 8) if type(operand_expr) is Expr.Convert: if expr.from_bits == operand_expr.to_bits and expr.to_bits == operand_expr.from_bits: # eliminate the redundant Convert return operand_expr.operand else: return Expr.Convert(expr.idx, operand_expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operand) elif type(operand_expr) is Expr.Const: # do the conversion right away value = operand_expr.value mask = (2**expr.to_bits) - 1 value &= mask return Expr.Const(expr.idx, operand_expr.variable, value, expr.to_bits) converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr, **expr.tags) return converted
def _ail_handle_Convert(self, expr: Expr.Convert): operand_expr = self._expr(expr.operand) # import ipdb; ipdb.set_trace() if type(operand_expr) is Expr.Convert: if expr.from_bits == operand_expr.to_bits and expr.to_bits == operand_expr.from_bits: # eliminate the redundant Convert return operand_expr.operand else: return Expr.Convert(expr.idx, operand_expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operand, **expr.tags) elif type(operand_expr) is Expr.Const: # do the conversion right away value = operand_expr.value mask = (2**expr.to_bits) - 1 value &= mask return Expr.Const(expr.idx, operand_expr.variable, value, expr.to_bits, **expr.tags) elif type(operand_expr) is Expr.BinaryOp \ and operand_expr.op in {'Mul', 'Shl', 'Div', 'DivMod', 'Add', 'Sub'}: if isinstance(operand_expr.operands[1], Expr.Const): if isinstance(operand_expr.operands[0], Expr.Register) and \ expr.from_bits == operand_expr.operands[0].bits: converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operands[0]) return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [converted, operand_expr.operands[1]], operand_expr.signed, **expr.tags) # TODO: the below optimization was unsound # Conv(32->64, (Conv(64->32, r14<8>) + 0x1<32>)) became Add(r14<8>, 0x1<32>) # ideally it should become Conv(32->64, Conv(64->32, r14<8> + 0x1<64>)) # and then the double convert can be pretty-printed away #elif isinstance(operand_expr.operands[0], Expr.Convert) and \ # expr.from_bits == operand_expr.operands[0].to_bits and \ # expr.to_bits == operand_expr.operands[0].from_bits: # return Expr.BinaryOp(operand_expr.idx, operand_expr.op, # [operand_expr.operands[0].operand, operand_expr.operands[1]], # operand_expr.signed, # **operand_expr.tags) elif isinstance(operand_expr.operands[0], Expr.Convert) \ and isinstance(operand_expr.operands[1], Expr.Convert) \ and operand_expr.operands[0].from_bits == operand_expr.operands[1].from_bits: if operand_expr.operands[0].to_bits == operand_expr.operands[1].to_bits \ and expr.from_bits == operand_expr.operands[0].to_bits \ and expr.to_bits == operand_expr.operands[1].from_bits: return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [ operand_expr.operands[0].operand, operand_expr.operands[1].operand ], expr.is_signed, **operand_expr.tags) converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr, **expr.tags) return converted
def _ail_handle_Convert(self, expr): operand_expr = self._expr(expr.operand) # import ipdb; ipdb.set_trace() if type(operand_expr) is Expr.Convert: if expr.from_bits == operand_expr.to_bits and expr.to_bits == operand_expr.from_bits: # eliminate the redundant Convert return operand_expr.operand else: return Expr.Convert(expr.idx, operand_expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operand, **expr.tags) elif type(operand_expr) is Expr.Const: # do the conversion right away value = operand_expr.value mask = (2**expr.to_bits) - 1 value &= mask return Expr.Const(expr.idx, operand_expr.variable, value, expr.to_bits, **expr.tags) elif type(operand_expr) is Expr.BinaryOp \ and operand_expr.op in {'Mul', 'Shl', 'Div', 'DivMod', 'Add', 'Sub'}: if isinstance(operand_expr.operands[1], Expr.Const): if isinstance(operand_expr.operands[0], Expr.Register) and \ expr.from_bits == operand_expr.operands[0].bits: converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operands[0]) return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [converted, operand_expr.operands[1]], **expr.tags) elif isinstance(operand_expr.operands[0], Expr.Convert) and \ expr.from_bits == operand_expr.operands[0].to_bits and \ expr.to_bits == operand_expr.operands[0].from_bits: return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [ operand_expr.operands[0].operand, operand_expr.operands[1] ], **operand_expr.tags) elif isinstance(operand_expr.operands[0], Expr.Convert) \ and isinstance(operand_expr.operands[1], Expr.Convert) \ and operand_expr.operands[0].from_bits == operand_expr.operands[1].from_bits: if operand_expr.operands[0].to_bits == operand_expr.operands[1].to_bits \ and expr.from_bits == operand_expr.operands[0].to_bits \ and expr.to_bits == operand_expr.operands[1].from_bits: return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [ operand_expr.operands[0].operand, operand_expr.operands[1].operand ], **operand_expr.tags) converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr, **expr.tags) return converted
def _test_concatenation(pv: PropValue): if pv.offset_and_details is not None and len(pv.offset_and_details) == 2 and 0 in pv.offset_and_details: lo_value = pv.offset_and_details[0] hi_offset = next(iter(k for k in pv.offset_and_details if k != 0)) hi_value = pv.offset_and_details[hi_offset] if lo_value.def_at == hi_value.def_at: # it's the same value! we can apply concatenation here if isinstance(hi_value.expr, Expr.Const) and hi_value.expr.value == 0: # it's probably an up-cast mappings = { # (lo_value.size, hi_value.size): (from_bits, to_bits) (1, 1): (8, 16), # char to short (1, 3): (8, 32), # char to int (1, 7): (8, 64), # char to int64 (2, 2): (16, 32), # short to int (2, 6): (16, 64), # short to int64 (4, 4): (32, 64), # int to int64 } key = (lo_value.size, hi_value.size) if key in mappings: from_bits, to_bits = mappings[key] result_expr = Expr.Convert(None, from_bits, to_bits, False, lo_value.expr) return True, result_expr result_expr = Expr.BinaryOp(None, "Concat", [hi_value.expr, lo_value.expr], False) return True, result_expr return False, None
def _ail_handle_Convert(self, expr: Expr.Convert) -> PropValue: o_value = self._expr(expr.operand) if o_value is None or self.state.is_top(o_value.value): new_value = self.state.top(expr.to_bits) else: if expr.from_bits < expr.to_bits: if expr.is_signed: new_value = claripy.SignExt(expr.to_bits - expr.from_bits, o_value.value) else: new_value = claripy.ZeroExt(expr.to_bits - expr.from_bits, o_value.value) elif expr.from_bits > expr.to_bits: new_value = claripy.Extract(expr.to_bits - 1, 0, o_value.value) else: new_value = o_value.value o_expr = o_value.one_expr o_defat = o_value.one_defat if o_expr is not None: # easy cases if type(o_expr) is Expr.Convert: if expr.from_bits == o_expr.to_bits and expr.to_bits == o_expr.from_bits: # eliminate the redundant Convert new_expr = o_expr.operand else: new_expr = Expr.Convert(expr.idx, o_expr.from_bits, expr.to_bits, expr.is_signed, o_expr.operand) elif type(o_expr) is Expr.Const: # do the conversion right away value = o_expr.value mask = (2 ** expr.to_bits) - 1 value &= mask new_expr = Expr.Const(expr.idx, o_expr.variable, value, expr.to_bits) else: new_expr = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, o_expr, **expr.tags) if isinstance(new_expr, Expr.Convert) and not new_expr.is_signed \ and new_expr.to_bits > new_expr.from_bits and new_expr.from_bits % self.arch.byte_width == 0: # special handling for zero-extension: it simplifies the code if we explicitly model zeros new_size = new_expr.from_bits // self.arch.byte_width offset_and_details = { 0: Detail(new_size, new_expr.operand, o_defat), new_size: Detail( new_expr.size - new_size, Expr.Const(expr.idx, None, 0, new_expr.to_bits - new_expr.from_bits), self._codeloc()), } else: offset_and_details = {0: Detail(expr.size, new_expr, self._codeloc())} return PropValue(new_value, offset_and_details=offset_and_details) elif o_value.offset_and_details: # hard cases... we will keep certain labels and eliminate other labels start_offset = 0 end_offset = expr.to_bits // self.arch.byte_width # end_offset is exclusive offset_and_details = {} max_offset = max(o_value.offset_and_details.keys()) for offset_, detail_ in o_value.offset_and_details.items(): if offset_ < start_offset < offset_ + detail_.size: # we start here off = 0 siz = min(end_offset, offset_ + detail_.size) - start_offset expr_ = PropValue.extract_ail_expression( (start_offset - offset_) * self.arch.byte_width, siz * self.arch.byte_width, detail_.expr ) offset_and_details[off] = Detail(siz, expr_, detail_.def_at) elif offset_ >= start_offset and offset_ + detail_.size <= end_offset: # we include the whole thing off = offset_ - start_offset siz = detail_.size if off == max_offset and off + siz < end_offset: # extend the expr expr_ = PropValue.extend_ail_expression( (end_offset - (off + siz)) * self.arch.byte_width, detail_.expr ) siz = end_offset - off else: expr_ = detail_.expr offset_and_details[off] = Detail(siz, expr_, detail_.def_at) elif offset_ < end_offset <= offset_ + detail_.size: # we include all the way until end_offset if offset_ < start_offset: off = 0 siz = end_offset - start_offset else: off = offset_ - start_offset siz = end_offset - offset_ expr_ = PropValue.extract_ail_expression(0, siz * self.arch.byte_width, detail_.expr) offset_and_details[off] = Detail(siz, expr_, detail_.def_at) return PropValue( new_value, offset_and_details=offset_and_details ) else: # it's empty... no expression is available for whatever reason return PropValue.from_value_and_details(new_value, expr.size, expr, self._codeloc())
def _rewrite(self, ccall: Expr.VEXCCallExpression) -> Optional[Expr.Expression]: if ccall.cee_name == "amd64g_calculate_condition": cond = ccall.operands[0] op = ccall.operands[1] dep_1 = ccall.operands[2] dep_2 = ccall.operands[3] if isinstance(cond, Expr.Const) and isinstance(op, Expr.Const): cond_v = cond.value op_v = op.value if cond_v == AMD64_CondTypes['CondLE']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 <=s dep_2 r = Expr.BinaryOp(ccall.idx, "CmpLE", (dep_1, dep_2), True, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) if cond_v == AMD64_CondTypes['CondZ']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 - dep_2 == 0 r = Expr.BinaryOp(ccall.idx, "CmpEQ", (dep_1, dep_2), False, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) elif cond_v == AMD64_CondTypes['CondL']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 - dep_2 <s 0 r = Expr.BinaryOp(ccall.idx, "CmpLT", (dep_1, dep_2), True, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) elif cond_v == AMD64_CondTypes['CondNBE']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 - dep_2 > 0 r = Expr.BinaryOp(ccall.idx, "CmpGT", (dep_1, dep_2), False, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) elif ccall.cee_name == "amd64g_calculate_rflags_c": # calculate the carry flag op = ccall.operands[0] dep_1 = ccall.operands[1] dep_2 = ccall.operands[2] ndep = ccall.operands[3] if isinstance(op, Expr.Const): op_v = op.value if op_v in { AMD64_OpTypes['G_CC_OP_ADDB'], AMD64_OpTypes['G_CC_OP_ADDW'], AMD64_OpTypes['G_CC_OP_ADDL'], AMD64_OpTypes['G_CC_OP_ADDQ'] }: # pc_actions_ADD cf = Expr.ITE( None, Expr.BinaryOp( None, "CmpLE", [ Expr.BinaryOp(None, "Add", [dep_1, dep_2], False), dep_1, ], False, ), Expr.Const(None, None, 0, ccall.bits), Expr.Const(None, None, 1, ccall.bits), **ccall.tags) return cf if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # pc_actions_SUB cf = Expr.BinaryOp(None, "CmpLT", [ dep_1, dep_2, ], False) if cf.bits == ccall.bits: return cf return Expr.Convert(None, cf.bits, ccall.bits, False, cf, **ccall.tags) if op_v in { AMD64_OpTypes['G_CC_OP_DECB'], AMD64_OpTypes['G_CC_OP_DECW'], AMD64_OpTypes['G_CC_OP_DECL'], AMD64_OpTypes['G_CC_OP_DECQ'] }: # pc_actions_DEC cf = Expr.BinaryOp(None, "Shr", [ Expr.BinaryOp(None, "And", [ ndep, Expr.Const(None, None, AMD64_CondBitMasks['G_CC_MASK_C'], 64) ], False), Expr.Const(None, None, AMD64_CondBitOffsets['G_CC_SHIFT_C'], 64), ], False, **ccall.tags) return cf return None