def _ail_handle_Sub(self, expr): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) if type(operand_0) is Top or type(operand_1) is Top: return Top(operand_0.size) if isinstance(operand_0, Expr.Const) and isinstance( operand_1, Expr.Const): return Expr.Const(expr.idx, None, operand_0.value - operand_1.value, expr.bits) elif isinstance(operand_0, Expr.BasePointerOffset) and isinstance( operand_1, Expr.Const): r = operand_0.copy() r.offset -= operand_1.value return r if type(operand_0) is Top or type(operand_1) is Top: return Top(expr.bits // 8) return Expr.BinaryOp(expr.idx, 'Sub', [ operand_0 if operand_0 is not None else expr.operands[0], operand_1 if operand_1 is not None else expr.operands[1] ], expr.signed, **expr.tags)
def _ail_handle_And(self, expr): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) if type(operand_0) is Top or type(operand_1) is Top: return Top(operand_0.size) # Special logic for SP alignment if type(operand_0) is Expr.StackBaseOffset and \ type(operand_1) is Expr.Const and is_alignment_mask(operand_1.value): return operand_0 return Expr.BinaryOp(expr.idx, 'And', [ operand_0, operand_1 ], expr.signed, **expr.tags)
def _ail_handle_Not(self, expr): o_value = self._expr(expr.operand) value = self.state.top(expr.bits) if o_value is None: new_expr = expr else: o_expr = o_value.one_expr new_expr = Expr.UnaryOp(expr.idx, 'Not', o_expr if o_expr is not None else expr.operands[0], **expr.tags) return PropValue.from_value_and_details(value, expr.size, new_expr, self._codeloc())
def _ail_handle_Concat(self, expr): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) if self.state.is_top(operand_0): return self.state.top(expr.bits) elif self.state.is_top(operand_1): return self.state.top(expr.bits) return Expr.BinaryOp(expr.idx, 'Concat', [ operand_0, operand_1, ], expr.signed, **expr.tags)
def _ail_handle_Reinterpret(self, expr: Expr.Reinterpret) -> Optional[PropValue]: arg = self._expr(expr.operand) if self.state.is_top(arg.value): one_expr = arg.one_expr if one_expr is not None: expr = Expr.Reinterpret(expr.idx, expr.from_bits, expr.from_type, expr.to_bits, expr.to_type, one_expr, **expr.tags) return PropValue.from_value_and_details( arg.value, expr.size, expr, self._codeloc() )
def _ail_handle_Convert(self, expr): operand_expr = self._expr(expr.operand) if type(operand_expr) is Expr.Convert: if expr.from_bits == operand_expr.to_bits and expr.to_bits == operand_expr.from_bits: # eliminate the redundant Convert return operand_expr.operand else: return Expr.Convert(expr.idx, operand_expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operand) elif type(operand_expr) is Expr.Const: # do the conversion right away value = operand_expr.value mask = (2**expr.to_bits) - 1 value &= mask return Expr.Const(expr.idx, operand_expr.variable, value, expr.to_bits) converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr) return converted
def _ail_handle_Register(self, expr): # Special handling for SP and BP if self._stack_pointer_tracker is not None: if expr.reg_offset == self.arch.sp_offset: sb_offset = self._stack_pointer_tracker.offset_before(self.ins_addr, self.arch.sp_offset) if sb_offset is not None: new_expr = Expr.StackBaseOffset(None, self.arch.bits, sb_offset) self.state.add_replacement(self._codeloc(), expr, new_expr) return new_expr elif expr.reg_offset == self.arch.bp_offset: sb_offset = self._stack_pointer_tracker.offset_before(self.ins_addr, self.arch.bp_offset) if sb_offset is not None: new_expr = Expr.StackBaseOffset(None, self.arch.bits, sb_offset) self.state.add_replacement(self._codeloc(), expr, new_expr) return new_expr new_expr = self.state.get_variable(expr) if new_expr is not None: l.debug("Add a replacement: %s with %s", expr, new_expr) self.state.add_replacement(self._codeloc(), expr, new_expr) expr = new_expr return expr
def _ail_handle_Sub(self, expr: Expr.BinaryOp): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) if self.state.is_top(operand_0): return self.state.top(expr.bits) elif self.state.is_top(operand_1): return self.state.top(expr.bits) if isinstance(operand_0, Expr.Const) and isinstance( operand_1, Expr.Const): return Expr.Const(expr.idx, None, operand_0.value - operand_1.value, expr.bits) elif isinstance(operand_0, Expr.BasePointerOffset) and isinstance( operand_1, Expr.Const): r = operand_0.copy() r.offset -= operand_1.value return r return Expr.BinaryOp(expr.idx, 'Sub', [ operand_0 if operand_0 is not None else expr.operands[0], operand_1 if operand_1 is not None else expr.operands[1] ], expr.signed, **expr.tags)
def _rewrite(self, ccall: Expr.VEXCCallExpression) -> Optional[Expr.Expression]: if ccall.cee_name == "amd64g_calculate_condition": cond = ccall.operands[0] op = ccall.operands[1] dep_1 = ccall.operands[2] dep_2 = ccall.operands[3] if isinstance(cond, Expr.Const) and isinstance(op, Expr.Const): cond_v = cond.value op_v = op.value if cond_v == AMD64_CondTypes['CondLE'] and op_v == AMD64_OpTypes['G_CC_OP_SUBL']: # dep_1 <=s dep_2 return Expr.BinaryOp(ccall.idx, "CmpLE", (dep_1, dep_2), True, **ccall.tags) if cond_v == AMD64_CondTypes['CondLE'] and op_v == AMD64_OpTypes['G_CC_OP_SUBB']: # dep_1 <=s dep_2 return Expr.BinaryOp(ccall.idx, "CmpLE", (dep_1, dep_2), True, **ccall.tags) return None
def _ail_handle_Load(self, expr): addr = self._expr(expr.addr) if type(addr) is Top: return Top(expr.size) if isinstance(addr, Expr.StackBaseOffset): var = self.state.get_stack_variable(addr, expr.size, endness=expr.endness) if var is not None: return var if addr != expr.addr: return Expr.Load(expr.idx, addr, expr.size, expr.endness, **expr.tags) return expr
def _ail_handle_Cmp(self, expr: Expr.BinaryOp): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) if self.state.is_top(operand_0): operand_0 = expr.operands[0] if self.state.is_top(operand_1): operand_1 = expr.operands[1] if operand_0 is expr.operands[0] and operand_1 is expr.operands[1]: # nothing changed return expr return Expr.BinaryOp(expr.idx, expr.op, [operand_0, operand_1], expr.signed, **expr.tags)
def _ail_handle_Register(self, expr: Expr.Register) -> Optional[PropValue]: self.state: 'PropagatorAILState' # Special handling for SP and BP if self._stack_pointer_tracker is not None: if expr.reg_offset == self.arch.sp_offset: sb_offset = self._stack_pointer_tracker.offset_before(self.ins_addr, self.arch.sp_offset) if sb_offset is not None: new_expr = Expr.StackBaseOffset(None, self.arch.bits, sb_offset) self.state.add_replacement(self._codeloc(), expr, new_expr) return PropValue.from_value_and_details( self.sp_offset(sb_offset), expr.size, new_expr, self._codeloc() ) elif expr.reg_offset == self.arch.bp_offset: sb_offset = self._stack_pointer_tracker.offset_before(self.ins_addr, self.arch.bp_offset) if sb_offset is not None: new_expr = Expr.StackBaseOffset(None, self.arch.bits, sb_offset) self.state.add_replacement(self._codeloc(), expr, new_expr) return PropValue.from_value_and_details( self.sp_offset(sb_offset), expr.size, new_expr, self._codeloc() ) new_expr = self.state.load_register(expr) if new_expr is not None: # check if this new_expr uses any expression that has been overwritten all_subexprs = list(new_expr.all_exprs()) if not any(self.is_using_outdated_def(subexpr) for subexpr in all_subexprs) and \ len(all_subexprs) == 1: subexpr = all_subexprs[0] if subexpr.size == expr.size: l.debug("Add a replacement: %s with %s", expr, subexpr) self.state.add_replacement(self._codeloc(), expr, subexpr) return new_expr return PropValue.from_value_and_details(self.state.top(expr.bits), expr.size, expr, self._codeloc())
def _ail_handle_Convert(self, expr): if expr.from_bits == 128 and expr.to_bits == 64: operand_expr = self._expr(expr.operand) if isinstance(operand_expr, Expr.BinaryOp) \ and operand_expr.op == 'Mul' \ and isinstance(operand_expr.operands[1], Expr.Const) \ and isinstance(operand_expr.operands[0], Expr.BinaryOp): if operand_expr.operands[0].op in {'Shr', 'DivMod'} \ and isinstance(operand_expr.operands[0].operands[1], Expr.Const): if operand_expr.operands[0].op == 'Shr': Y = operand_expr.operands[0].operands[1].value else: Y = int( math.log2( operand_expr.operands[0].operands[1].value)) C = operand_expr.operands[1].value divisor = self._check_divisor(pow(2, 64 + Y), C) if divisor: X = operand_expr.operands[0].operands[0] new_const = Expr.Const(expr.idx, None, divisor, 64) return Expr.BinaryOp(expr.idx, 'DivMod', [X, new_const], **expr.tags) return super()._ail_handle_Convert(expr)
def _ail_handle_Convert(self, expr): operand_expr = self._expr(expr.operand) # import ipdb; ipdb.set_trace() if type(operand_expr) is Expr.Convert: if expr.from_bits == operand_expr.to_bits and expr.to_bits == operand_expr.from_bits: # eliminate the redundant Convert return operand_expr.operand else: return Expr.Convert(expr.idx, operand_expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operand, **expr.tags) elif type(operand_expr) is Expr.Const: # do the conversion right away value = operand_expr.value mask = (2**expr.to_bits) - 1 value &= mask return Expr.Const(expr.idx, operand_expr.variable, value, expr.to_bits, **expr.tags) elif type(operand_expr) is Expr.BinaryOp \ and operand_expr.op in {'Mul', 'Shl', 'Div', 'DivMod', 'Add', 'Sub'}: if isinstance(operand_expr.operands[1], Expr.Const): if isinstance(operand_expr.operands[0], Expr.Register) and \ expr.from_bits == operand_expr.operands[0].bits: converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operands[0]) return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [converted, operand_expr.operands[1]], **expr.tags) elif isinstance(operand_expr.operands[0], Expr.Convert) and \ expr.from_bits == operand_expr.operands[0].to_bits and \ expr.to_bits == operand_expr.operands[0].from_bits: return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [ operand_expr.operands[0].operand, operand_expr.operands[1] ], **operand_expr.tags) elif isinstance(operand_expr.operands[0], Expr.Convert) \ and isinstance(operand_expr.operands[1], Expr.Convert) \ and operand_expr.operands[0].from_bits == operand_expr.operands[1].from_bits: if operand_expr.operands[0].to_bits == operand_expr.operands[1].to_bits \ and expr.from_bits == operand_expr.operands[0].to_bits \ and expr.to_bits == operand_expr.operands[1].from_bits: return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [ operand_expr.operands[0].operand, operand_expr.operands[1].operand ], **operand_expr.tags) converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr, **expr.tags) return converted
def _ail_handle_Convert(self, expr: Expr.Convert): operand_expr = self._expr(expr.operand) # import ipdb; ipdb.set_trace() if type(operand_expr) is Expr.Convert: if expr.from_bits == operand_expr.to_bits and expr.to_bits == operand_expr.from_bits: # eliminate the redundant Convert return operand_expr.operand else: return Expr.Convert(expr.idx, operand_expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operand, **expr.tags) elif type(operand_expr) is Expr.Const: # do the conversion right away value = operand_expr.value mask = (2**expr.to_bits) - 1 value &= mask return Expr.Const(expr.idx, operand_expr.variable, value, expr.to_bits, **expr.tags) elif type(operand_expr) is Expr.BinaryOp \ and operand_expr.op in {'Mul', 'Shl', 'Div', 'DivMod', 'Add', 'Sub'}: if isinstance(operand_expr.operands[1], Expr.Const): if isinstance(operand_expr.operands[0], Expr.Register) and \ expr.from_bits == operand_expr.operands[0].bits: converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr.operands[0]) return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [converted, operand_expr.operands[1]], operand_expr.signed, **expr.tags) # TODO: the below optimization was unsound # Conv(32->64, (Conv(64->32, r14<8>) + 0x1<32>)) became Add(r14<8>, 0x1<32>) # ideally it should become Conv(32->64, Conv(64->32, r14<8> + 0x1<64>)) # and then the double convert can be pretty-printed away #elif isinstance(operand_expr.operands[0], Expr.Convert) and \ # expr.from_bits == operand_expr.operands[0].to_bits and \ # expr.to_bits == operand_expr.operands[0].from_bits: # return Expr.BinaryOp(operand_expr.idx, operand_expr.op, # [operand_expr.operands[0].operand, operand_expr.operands[1]], # operand_expr.signed, # **operand_expr.tags) elif isinstance(operand_expr.operands[0], Expr.Convert) \ and isinstance(operand_expr.operands[1], Expr.Convert) \ and operand_expr.operands[0].from_bits == operand_expr.operands[1].from_bits: if operand_expr.operands[0].to_bits == operand_expr.operands[1].to_bits \ and expr.from_bits == operand_expr.operands[0].to_bits \ and expr.to_bits == operand_expr.operands[1].from_bits: return Expr.BinaryOp(operand_expr.idx, operand_expr.op, [ operand_expr.operands[0].operand, operand_expr.operands[1].operand ], expr.is_signed, **operand_expr.tags) converted = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, operand_expr, **expr.tags) return converted
def _ail_handle_Concat(self, expr): o0_value = self._expr(expr.operands[0]) o1_value = self._expr(expr.operands[1]) value = self.state.top(expr.bits) if o0_value is None or o1_value is None: new_expr = expr else: o0_expr = o0_value.one_expr o1_expr = o1_value.one_expr new_expr = Expr.BinaryOp(expr.idx, 'Concat', [ o0_expr if o0_expr is not None else expr.operands[0], o1_expr if o1_expr is not None else expr.operands[1], ], expr.signed, **expr.tags) return PropValue.from_value_and_details(value, expr.size, new_expr, self._codeloc())
def _ail_handle_And(self, expr: Expr.BinaryOp): self.state: 'PropagatorAILState' operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) if self.state.is_top(operand_0): return self.state.top(expr.bits) elif self.state.is_top(operand_1): return self.state.top(expr.bits) # Special logic for stack pointer alignment sp_offset = self.extract_offset_to_sp(operand_0) if sp_offset is not None and type(operand_1) is Expr.Const and is_alignment_mask(operand_1.value): return operand_0 return Expr.BinaryOp(expr.idx, 'And', [ operand_0, operand_1 ], expr.signed, **expr.tags)
def _ail_handle_Cmp(self, expr: Expr.BinaryOp) -> PropValue: operand_0_value = self._expr(expr.operands[0]) operand_1_value = self._expr(expr.operands[1]) if operand_0_value is not None and operand_1_value is not None: operand_0_oneexpr = operand_0_value.one_expr operand_1_oneexpr = operand_1_value.one_expr if operand_0_oneexpr is expr.operands[0] and operand_1_oneexpr is expr.operands[1]: # nothing changed return PropValue.from_value_and_details(self.state.top(expr.bits), expr.size, expr, self._codeloc()) else: operand_0 = operand_0_oneexpr if operand_0_oneexpr is not None else expr.operands[0] operand_1 = operand_1_oneexpr if operand_1_oneexpr is not None else expr.operands[1] new_expr = Expr.BinaryOp(expr.idx, expr.op, [operand_0, operand_1], expr.signed, **expr.tags) else: new_expr = expr return PropValue.from_value_and_details( self.state.top(expr.bits), expr.size, new_expr, self._codeloc() )
def _ail_handle_Call(self, expr_stmt: Stmt.Call): if isinstance(expr_stmt.target, Expr.Expression): _ = self._expr(expr_stmt.target) self.state._inside_call_stmt = True if expr_stmt.args: for arg in expr_stmt.args: _ = self._expr(arg) if expr_stmt.ret_expr is not None: if isinstance(expr_stmt.ret_expr, Expr.Register): # it has a return expression. awesome - treat it as an assignment # assume the return value always uses a full-width register # FIXME: Expose it as a configuration option return_value_use_full_width_reg = True if return_value_use_full_width_reg: v = PropValue.from_value_and_details( self.state.top(self.arch.bits), self.arch.bytes, expr_stmt.ret_expr, self._codeloc() ) self.state.store_register( Expr.Register(None, expr_stmt.ret_expr.variable, expr_stmt.ret_expr.reg_offset, self.arch.bits), v ) else: v = PropValue.from_value_and_details( self.state.top(expr_stmt.ret_expr.size * self.arch.byte_width), expr_stmt.ret_expr.size, expr_stmt.ret_expr, self._codeloc() ) self.state.store_register(expr_stmt.ret_expr, v) # set equivalence self.state.add_equivalence(self._codeloc(), expr_stmt.ret_expr, expr_stmt) else: l.warning("Unsupported ret_expr type %s.", expr_stmt.ret_expr.__class__) self.state._inside_call_stmt = False
def _ail_handle_Load(self, expr: Expr.Load) -> Optional[PropValue]: self.state: 'PropagatorAILState' addr = self._expr(expr.addr) addr_expr = addr.one_expr if addr_expr is not None: sp_offset = self.extract_offset_to_sp(addr_expr) if sp_offset is not None: # Stack variable. var = self.state.load_stack_variable(sp_offset, expr.size, endness=expr.endness) if var is not None: # We do not add replacements here since in AIL function and block simplifiers we explicitly forbid # replacing stack variables, unless this is in the middle of a call statement. if self.state._inside_call_stmt and var.one_expr is not None: if not self.is_using_outdated_def(var.one_expr, avoid=expr.addr): l.debug("Add a replacement: %s with %s", expr, var.one_expr) self.state.add_replacement(self._codeloc(), expr, var.one_expr) if not self.state.is_top(var.value): return var if addr_expr is not None and addr_expr is not expr.addr: new_expr = Expr.Load(expr.idx, addr_expr, expr.size, expr.endness, **expr.tags) else: new_expr = expr prop_value = PropValue.from_value_and_details( self.state.top(expr.size * self.arch.byte_width), expr.size, new_expr, self._codeloc()) return prop_value
def _analyze(self): if not self.block.statements: return last_stmt = self.block.statements[-1] if not type(last_stmt) is Stmt.Call: self.result_block = self.block return target = self._get_call_target(last_stmt) if target is None: return if target not in self.kb.functions: return func = self.kb.functions[target] if func.prototype is None: func.find_declaration() args = [] arg_locs = None if func.calling_convention is None: l.warning('%s has an unknown calling convention.', repr(func)) else: if func.prototype is not None: # Make arguments arg_locs = func.calling_convention.arg_locs() if func.prototype.variadic: # determine the number of variadic arguments variadic_args = self._determine_variadic_arguments( func, func.calling_convention, last_stmt) if variadic_args: arg_sizes = [arg.size // self.project.arch.byte_width for arg in func.prototype.args] + \ ([self.project.arch.bytes] * variadic_args) is_fp = [False] * len(arg_sizes) arg_locs = func.calling_convention.arg_locs( is_fp=is_fp, sizes=arg_sizes) else: if func.calling_convention.args is not None: arg_locs = func.calling_convention.arg_locs() stack_arg_locs: List[SimStackArg] = [] if arg_locs is not None: for arg_loc in arg_locs: if type(arg_loc) is SimRegArg: size = arg_loc.size offset = arg_loc._fix_offset(None, size, arch=self.project.arch) _, the_arg = self._resolve_register_argument( last_stmt, arg_loc) if the_arg is not None: args.append(the_arg) else: # Reaching definitions are not available. Create a register expression instead. args.append( Expr.Register(None, None, offset, size * 8, reg_name=arg_loc.reg_name)) elif type(arg_loc) is SimStackArg: stack_arg_locs.append(arg_loc) _, the_arg = self._resolve_stack_argument( last_stmt, arg_loc) if the_arg is not None: args.append(the_arg) else: args.append(None) else: raise NotImplementedError('Not implemented yet.') new_stmts = self.block.statements[:-1] # remove the statement that stores the return address if self.project.arch.call_pushes_ret: # check if the last statement is storing the return address onto the top of the stack if len(new_stmts) >= 1: the_stmt = new_stmts[-1] if isinstance(the_stmt, Stmt.Store) and isinstance( the_stmt.data, Expr.Const): if isinstance(the_stmt.addr, Expr.StackBaseOffset) and \ the_stmt.data.value == self.block.addr + self.block.original_size: # yes it is! new_stmts = new_stmts[:-1] else: # if there is an lr register... lr_offset = None if archinfo.arch_arm.is_arm_arch( self.project.arch) or self.project.arch.name in { 'PPC32', 'PPC64' }: lr_offset = self.project.arch.registers['lr'][0] elif self.project.arch.name in {'MIPS32', 'MIPS64'}: lr_offset = self.project.arch.registers['ra'][0] if lr_offset is not None: # remove the assignment to the lr register if len(new_stmts) >= 1: the_stmt = new_stmts[-1] if (isinstance(the_stmt, Stmt.Assignment) and isinstance(the_stmt.dst, Expr.Register) and the_stmt.dst.reg_offset == lr_offset): # found it new_stmts = new_stmts[:-1] # remove statements that stores arguments on the stack if stack_arg_locs: sp_offset = self._stack_pointer_tracker.offset_before( last_stmt.ins_addr, self.project.arch.sp_offset) if sp_offset is None: l.warning( "Failed to calculate the stack pointer offset at pc %#x. You may find redundant Store " "statements.", last_stmt.ins_addr) else: stack_arg_offsets = set( (arg.stack_offset + sp_offset) for arg in stack_arg_locs) old_stmts = new_stmts new_stmts = [] for stmt in old_stmts: if isinstance(stmt, Stmt.Store) and isinstance( stmt.addr, Expr.StackBaseOffset): offset = stmt.addr.offset if offset < 0: offset &= (1 << self.project.arch.bits) - 1 if offset in stack_arg_offsets: continue new_stmts.append(stmt) ret_expr = last_stmt.ret_expr if ret_expr is None: ret_expr = None if func.prototype is not None: if func.prototype.returnty is not None and not isinstance( func.prototype.returnty, SimTypeBottom): # it has a return value if func.calling_convention is not None: ret_expr_size = func.prototype.returnty._with_arch( self.project.arch).size reg_offset = func.calling_convention.RETURN_VAL._fix_offset( None, ret_expr_size, arch=self.project.arch, ) ret_expr = Expr.Register(None, None, reg_offset, ret_expr_size * 8) new_stmts.append( Stmt.Call( last_stmt, last_stmt.target, calling_convention=func.calling_convention, prototype=func.prototype, args=args, ret_expr=ret_expr, **last_stmt.tags, )) new_block = self.block.copy() new_block.statements = new_stmts self.result_block = new_block
def _ail_handle_Sub(self, expr): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) # x + x = 2*x if type(operand_0) in [Expr.Convert, Expr.Register]: if isinstance(operand_1, (Expr.Convert, Expr.Register)): if operand_0 == operand_1: count = Expr.Const(expr.idx, None, 0, 8) new_expr = Expr.BinaryOp(expr.idx, 'Mul', [operand_1, count], expr.signed, **expr.tags) return new_expr # 2*x - x = x if Expr.BinaryOp in [type(operand_0), type(operand_1)]: if isinstance(operand_1, Expr.BinaryOp) and operand_1.op == 'Mul' and \ (not isinstance(operand_0, Expr.BinaryOp) or \ (isinstance(operand_0, Expr.BinaryOp) and operand_0.op != 'Mul')): x0 = operand_0 x1_index = 0 if isinstance(operand_1.operands[1], Expr.Const) else 1 x1 = operand_1.operands[x1_index] const_x1 = operand_1.operands[1 - x1_index] if x0 == x1: new_const = Expr.Const(const_x1.idx, None, const_x1.value - 1, const_x1.bits) new_expr = Expr.BinaryOp(expr.idx, 'Mul', [x0, new_const], expr.signed, **expr.tags) return new_expr elif isinstance(operand_0, Expr.BinaryOp) and operand_0.op == 'Mul' and \ (not isinstance(operand_1, Expr.BinaryOp) or \ (isinstance(operand_1, Expr.BinaryOp) and operand_1.op != 'Mul')): x1 = operand_1 x0_index = 0 if isinstance(operand_0.operands[1], Expr.Const) else 1 x0 = operand_0.operands[x0_index] const_x0 = operand_0.operands[1 - x0_index] if x0 == x1: new_const = Expr.Const(const_x0.idx, None, const_x0.value - 1, const_x0.bits) new_expr = Expr.BinaryOp(expr.idx, 'Mul', [x1, new_const], expr.signed, **expr.tags) return new_expr # 3*x - 2*x = x elif isinstance(operand_0, Expr.BinaryOp) and isinstance(operand_1, Expr.BinaryOp) and \ operand_0.op == 'Mul' and operand_1.op == 'Mul': if Expr.Const in [type(operand_0.operands[0]), type(operand_0.operands[1])] \ and Expr.Const in [type(operand_1.operands[0]), type(operand_1.operands[1])]: x0_index = 0 if isinstance(operand_0.operands[1], Expr.Const) else 1 x0 = operand_0.operands[x0_index] const_x0 = operand_0.operands[1 - x0_index] x1_index = 0 if isinstance(operand_1.operands[1], Expr.Const) else 1 x1 = operand_1.operands[x1_index] const_x1 = operand_1.operands[1 - x1_index] if x0 == x1: new_const = Expr.Const(const_x1.idx, None, const_x0.value - const_x1.value, const_x1.bits) new_expr = Expr.BinaryOp(expr.idx, 'Mul', [x0, new_const], expr.signed, **expr.tags) return new_expr if (operand_0, operand_1) != (expr.operands[0], expr.operands[1]): return Expr.BinaryOp(expr.idx, 'Sub', [operand_0, operand_1], expr.signed, **expr.tags) return expr
def _ail_handle_Xor(self, expr): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) return Expr.BinaryOp(expr.idx, 'Xor', [operand_0, operand_1])
def _analyze(self): if not self.block.statements: return last_stmt = self.block.statements[-1] if not type(last_stmt) is Stmt.Call: self.result_block = self.block return target = self._get_call_target(last_stmt) if target is None: return if target not in self.kb.functions: return func = self.kb.functions[target] if func.prototype is None: func.find_declaration() args = [] arg_locs = None if func.calling_convention is None: l.warning('%s has an unknown calling convention.', repr(func)) else: if func.prototype is not None: # Make arguments arg_locs = func.calling_convention.arg_locs() else: if func.calling_convention.args is not None: arg_locs = func.calling_convention.arg_locs() if arg_locs is not None: for arg_loc in arg_locs: if type(arg_loc) is SimRegArg: size = arg_loc.size offset = arg_loc._fix_offset(None, size, arch=self.project.arch) the_arg = self._resolve_register_argument( last_stmt, arg_loc) if the_arg is not None: args.append(the_arg) else: # Reaching definitions are not available. Create a register expression instead. args.append( Expr.Register(None, None, offset, size * 8, reg_name=arg_loc.reg_name)) elif type(arg_loc) is SimStackArg: the_arg = self._resolve_stack_argument(last_stmt, arg_loc) if the_arg is not None: args.append(the_arg) else: args.append(None) else: raise NotImplementedError('Not implemented yet.') new_stmts = self.block.statements[:-1] if self.project.arch.call_pushes_ret: # check if the last statement is storing the return address onto the top of the stack if len(new_stmts) >= 1: the_stmt = new_stmts[-1] if isinstance(the_stmt, Stmt.Store) and isinstance( the_stmt.data, Expr.Const): if isinstance(the_stmt.variable, SimStackVariable) and \ the_stmt.data.value == self.block.addr + self.block.original_size: # yes it is! new_stmts = new_stmts[:-1] ret_expr = last_stmt.ret_expr if ret_expr is None: ret_expr = None if func.prototype is not None: if func.prototype.returnty is not None and not isinstance( func.prototype.returnty, SimTypeBottom): # it has a return value if func.calling_convention is not None: ret_expr_size = func.prototype.returnty._with_arch( self.project.arch).size reg_offset = func.calling_convention.RETURN_VAL._fix_offset( None, ret_expr_size, arch=self.project.arch, ) ret_expr = Expr.Register(None, None, reg_offset, ret_expr_size * 8) new_stmts.append( Stmt.Call( last_stmt, last_stmt.target, calling_convention=func.calling_convention, prototype=func.prototype, args=args, ret_expr=ret_expr, **last_stmt.tags, )) new_block = self.block.copy() new_block.statements = new_stmts self.result_block = new_block
def _ail_handle_CmpNE(self, expr): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) return Expr.BinaryOp(expr.idx, 'CmpNE', [operand_0, operand_1], expr.signed)
def _analyze(self): if not self.block.statements: return last_stmt = self.block.statements[-1] if not type(last_stmt) is Stmt.Call: self.result_block = self.block return cc = None prototype = None args = None stack_arg_locs: List[SimStackArg] = [] stackarg_sp_diff = 0 target = self._get_call_target(last_stmt) if target is not None and target in self.kb.functions: # function-specific logic when the calling target is known func = self.kb.functions[target] if func.prototype is None: func.find_declaration() cc = func.calling_convention prototype = func.prototype args = [] arg_locs = None if func.calling_convention is None: l.warning('%s has an unknown calling convention.', repr(func)) else: stackarg_sp_diff = func.calling_convention.STACKARG_SP_DIFF if func.prototype is not None: # Make arguments arg_locs = func.calling_convention.arg_locs() if func.prototype.variadic: # determine the number of variadic arguments variadic_args = self._determine_variadic_arguments( func, func.calling_convention, last_stmt) if variadic_args: arg_sizes = [arg.size // self.project.arch.byte_width for arg in func.prototype.args] + \ ([self.project.arch.bytes] * variadic_args) is_fp = [False] * len(arg_sizes) arg_locs = func.calling_convention.arg_locs( is_fp=is_fp, sizes=arg_sizes) else: if func.calling_convention.args is not None: arg_locs = func.calling_convention.arg_locs() if arg_locs is not None: for arg_loc in arg_locs: if type(arg_loc) is SimRegArg: size = arg_loc.size offset = arg_loc._fix_offset(None, size, arch=self.project.arch) _, the_arg = self._resolve_register_argument( last_stmt, arg_loc) if the_arg is not None: args.append(the_arg) else: # Reaching definitions are not available. Create a register expression instead. args.append( Expr.Register(self._atom_idx(), None, offset, size * 8, reg_name=arg_loc.reg_name)) elif type(arg_loc) is SimStackArg: stack_arg_locs.append(arg_loc) _, the_arg = self._resolve_stack_argument( last_stmt, arg_loc) if the_arg is not None: args.append(the_arg) else: args.append(None) else: raise NotImplementedError('Not implemented yet.') # Remove the old call statement new_stmts = self.block.statements[:-1] # remove the statement that stores the return address if self.project.arch.call_pushes_ret: # check if the last statement is storing the return address onto the top of the stack if len(new_stmts) >= 1: the_stmt = new_stmts[-1] if isinstance(the_stmt, Stmt.Store) and isinstance( the_stmt.data, Expr.Const): if isinstance(the_stmt.addr, Expr.StackBaseOffset) and \ the_stmt.data.value == self.block.addr + self.block.original_size: # yes it is! new_stmts = new_stmts[:-1] else: # if there is an lr register... lr_offset = None if archinfo.arch_arm.is_arm_arch( self.project.arch) or self.project.arch.name in { 'PPC32', 'PPC64' }: lr_offset = self.project.arch.registers['lr'][0] elif self.project.arch.name in {'MIPS32', 'MIPS64'}: lr_offset = self.project.arch.registers['ra'][0] if lr_offset is not None: # remove the assignment to the lr register if len(new_stmts) >= 1: the_stmt = new_stmts[-1] if (isinstance(the_stmt, Stmt.Assignment) and isinstance(the_stmt.dst, Expr.Register) and the_stmt.dst.reg_offset == lr_offset): # found it new_stmts = new_stmts[:-1] # calculate stack offsets for arguments that are put on the stack. these offsets will be consumed by # simplification steps in the future, which may decide to remove statements that stores arguments on the stack. if stack_arg_locs: sp_offset = self._stack_pointer_tracker.offset_before( last_stmt.ins_addr, self.project.arch.sp_offset) if sp_offset is None: l.warning( "Failed to calculate the stack pointer offset at pc %#x. You may find redundant Store " "statements.", last_stmt.ins_addr) self.stack_arg_offsets = None else: self.stack_arg_offsets = set( (last_stmt.ins_addr, sp_offset + arg.stack_offset - stackarg_sp_diff) for arg in stack_arg_locs) ret_expr = last_stmt.ret_expr # if ret_expr is None, it means in previous steps (such as during AIL simplification) we have deemed the return # value of this call statement as useless and is removed. new_stmts.append( Stmt.Call( last_stmt, last_stmt.target, calling_convention=cc, prototype=prototype, args=args, ret_expr=ret_expr, **last_stmt.tags, )) new_block = self.block.copy() new_block.statements = new_stmts self.result_block = new_block
def _ail_handle_Convert(self, expr: Expr.Convert) -> PropValue: o_value = self._expr(expr.operand) if o_value is None or self.state.is_top(o_value.value): new_value = self.state.top(expr.to_bits) else: if expr.from_bits < expr.to_bits: if expr.is_signed: new_value = claripy.SignExt(expr.to_bits - expr.from_bits, o_value.value) else: new_value = claripy.ZeroExt(expr.to_bits - expr.from_bits, o_value.value) elif expr.from_bits > expr.to_bits: new_value = claripy.Extract(expr.to_bits - 1, 0, o_value.value) else: new_value = o_value.value o_expr = o_value.one_expr o_defat = o_value.one_defat if o_expr is not None: # easy cases if type(o_expr) is Expr.Convert: if expr.from_bits == o_expr.to_bits and expr.to_bits == o_expr.from_bits: # eliminate the redundant Convert new_expr = o_expr.operand else: new_expr = Expr.Convert(expr.idx, o_expr.from_bits, expr.to_bits, expr.is_signed, o_expr.operand) elif type(o_expr) is Expr.Const: # do the conversion right away value = o_expr.value mask = (2 ** expr.to_bits) - 1 value &= mask new_expr = Expr.Const(expr.idx, o_expr.variable, value, expr.to_bits) else: new_expr = Expr.Convert(expr.idx, expr.from_bits, expr.to_bits, expr.is_signed, o_expr, **expr.tags) if isinstance(new_expr, Expr.Convert) and not new_expr.is_signed \ and new_expr.to_bits > new_expr.from_bits and new_expr.from_bits % self.arch.byte_width == 0: # special handling for zero-extension: it simplifies the code if we explicitly model zeros new_size = new_expr.from_bits // self.arch.byte_width offset_and_details = { 0: Detail(new_size, new_expr.operand, o_defat), new_size: Detail( new_expr.size - new_size, Expr.Const(expr.idx, None, 0, new_expr.to_bits - new_expr.from_bits), self._codeloc()), } else: offset_and_details = {0: Detail(expr.size, new_expr, self._codeloc())} return PropValue(new_value, offset_and_details=offset_and_details) elif o_value.offset_and_details: # hard cases... we will keep certain labels and eliminate other labels start_offset = 0 end_offset = expr.to_bits // self.arch.byte_width # end_offset is exclusive offset_and_details = {} max_offset = max(o_value.offset_and_details.keys()) for offset_, detail_ in o_value.offset_and_details.items(): if offset_ < start_offset < offset_ + detail_.size: # we start here off = 0 siz = min(end_offset, offset_ + detail_.size) - start_offset expr_ = PropValue.extract_ail_expression( (start_offset - offset_) * self.arch.byte_width, siz * self.arch.byte_width, detail_.expr ) offset_and_details[off] = Detail(siz, expr_, detail_.def_at) elif offset_ >= start_offset and offset_ + detail_.size <= end_offset: # we include the whole thing off = offset_ - start_offset siz = detail_.size if off == max_offset and off + siz < end_offset: # extend the expr expr_ = PropValue.extend_ail_expression( (end_offset - (off + siz)) * self.arch.byte_width, detail_.expr ) siz = end_offset - off else: expr_ = detail_.expr offset_and_details[off] = Detail(siz, expr_, detail_.def_at) elif offset_ < end_offset <= offset_ + detail_.size: # we include all the way until end_offset if offset_ < start_offset: off = 0 siz = end_offset - start_offset else: off = offset_ - start_offset siz = end_offset - offset_ expr_ = PropValue.extract_ail_expression(0, siz * self.arch.byte_width, detail_.expr) offset_and_details[off] = Detail(siz, expr_, detail_.def_at) return PropValue( new_value, offset_and_details=offset_and_details ) else: # it's empty... no expression is available for whatever reason return PropValue.from_value_and_details(new_value, expr.size, expr, self._codeloc())
def _ail_handle_Register(self, expr: Expr.Register) -> Optional[PropValue]: self.state: 'PropagatorAILState' # Special handling for SP and BP if self._stack_pointer_tracker is not None: if expr.reg_offset == self.arch.sp_offset: sb_offset = self._stack_pointer_tracker.offset_before(self.ins_addr, self.arch.sp_offset) if sb_offset is not None: new_expr = Expr.StackBaseOffset(None, self.arch.bits, sb_offset) self.state.add_replacement(self._codeloc(), expr, new_expr) return PropValue.from_value_and_details( self.sp_offset(sb_offset), expr.size, new_expr, self._codeloc() ) elif expr.reg_offset == self.arch.bp_offset: sb_offset = self._stack_pointer_tracker.offset_before(self.ins_addr, self.arch.bp_offset) if sb_offset is not None: new_expr = Expr.StackBaseOffset(None, self.arch.bits, sb_offset) self.state.add_replacement(self._codeloc(), expr, new_expr) return PropValue.from_value_and_details( self.sp_offset(sb_offset), expr.size, new_expr, self._codeloc() ) def _test_concatenation(pv: PropValue): if pv.offset_and_details is not None and len(pv.offset_and_details) == 2 and 0 in pv.offset_and_details: lo_value = pv.offset_and_details[0] hi_offset = next(iter(k for k in pv.offset_and_details if k != 0)) hi_value = pv.offset_and_details[hi_offset] if lo_value.def_at == hi_value.def_at: # it's the same value! we can apply concatenation here if isinstance(hi_value.expr, Expr.Const) and hi_value.expr.value == 0: # it's probably an up-cast mappings = { # (lo_value.size, hi_value.size): (from_bits, to_bits) (1, 1): (8, 16), # char to short (1, 3): (8, 32), # char to int (1, 7): (8, 64), # char to int64 (2, 2): (16, 32), # short to int (2, 6): (16, 64), # short to int64 (4, 4): (32, 64), # int to int64 } key = (lo_value.size, hi_value.size) if key in mappings: from_bits, to_bits = mappings[key] result_expr = Expr.Convert(None, from_bits, to_bits, False, lo_value.expr) return True, result_expr result_expr = Expr.BinaryOp(None, "Concat", [hi_value.expr, lo_value.expr], False) return True, result_expr return False, None new_expr = self.state.load_register(expr) if new_expr is not None: # check if this new_expr uses any expression that has been overwritten replaced = False outdated = False all_subexprs = list(new_expr.all_exprs()) for _, detail in new_expr.offset_and_details.items(): if detail.expr is None: break if self.is_using_outdated_def(detail.expr, detail.def_at, avoid=expr): outdated = True break if all_subexprs and None not in all_subexprs and not outdated: if len(all_subexprs) == 1: # trivial case subexpr = all_subexprs[0] if subexpr.size == expr.size: replaced = True l.debug("Add a replacement: %s with %s", expr, subexpr) self.state.add_replacement(self._codeloc(), expr, subexpr) else: is_concatenation, result_expr = _test_concatenation(new_expr) if is_concatenation: replaced = True l.debug("Add a replacement: %s with %s", expr, result_expr) self.state.add_replacement(self._codeloc(), expr, result_expr) if not replaced: l.debug("Add a replacement: %s with TOP", expr) self.state.add_replacement(self._codeloc(), expr, self.state.top(expr.bits)) return new_expr return PropValue.from_value_and_details(self.state.top(expr.bits), expr.size, expr, self._codeloc())
def _ail_handle_Shr(self, expr): operand_0 = self._expr(expr.operands[0]) operand_1 = self._expr(expr.operands[1]) X = None divisor = None if isinstance(operand_1, Expr.Const) \ and isinstance(operand_0, Expr.BinaryOp) \ and operand_0.op == 'DivMod' \ and isinstance(operand_0.operands[1], Expr.Const): divisor = operand_0.operands[1].value * pow(2, operand_1.value) X = operand_0.operands[0] if isinstance(operand_1, Expr.Const) \ and isinstance(operand_0, Expr.Convert) \ and isinstance(operand_0.operand, Expr.BinaryOp) \ and operand_0.operand.op == 'DivMod' \ and isinstance(operand_0.operand.operands[1], Expr.Const): divisor = operand_0.operand.operands[1].value * pow( 2, operand_1.value) X = operand_0.operand.operands[0] if isinstance(operand_1, Expr.Const) \ and isinstance(operand_0, Expr.Convert) \ and operand_0.from_bits == 128 \ and operand_0.to_bits == 64: if isinstance(operand_0.operand, Expr.BinaryOp)\ and operand_0.operand.op == 'Mul': if isinstance(operand_0.operand.operands[1], Expr.Const): C = operand_0.operand.operands[1].value Y = operand_1.value divisor = self._check_divisor(pow(2, 64 + Y), C) X = operand_0.operand.operands[0] elif isinstance(operand_0.operand.operands[0], Expr.BinaryOp) \ and operand_0.operand.operands[0].op in {'Shr', 'DivMod'}: C = operand_0.operand.operands[1].value Z = operand_1.value if operand_0.operand.operands[0].op == 'Shr': Y = operand_0.operand.operands[0].operands[1].value else: Y = int( math.log2(operand_0.operand.operands[0]. operands[1].value)) divisor = self._check_divisor(pow(2, 64 + Z + Y), C) X = operand_0.operand.operands[0].operands[0] if isinstance(operand_1, Expr.Const) \ and isinstance(operand_0, Expr.BinaryOp) \ and operand_0.op == 'Add': add_0, add_1 = operand_0.operands Z = operand_1.value if add_0.has_atom(add_1) or add_1.has_atom(add_0): xC = add_1 if add_0.has_atom(add_1) else add_0 x_xC = add_0 if add_0.has_atom(add_1) else add_1 if isinstance(xC, Expr.Convert) and (xC.from_bits > xC.to_bits): Y = xC.from_bits - xC.to_bits if isinstance(xC.operand, Expr.BinaryOp) and xC.operand.op == 'Mul': xC_ = xC.operand if isinstance(xC_.operands[1], Expr.Const): C = xC_.operands[1].value X = xC_.operands[0] if isinstance(x_xC, Expr.BinaryOp) and x_xC.op == 'Shr': V_, V = x_xC.operands if isinstance(V, Expr.Const): V = V.value if isinstance( V_, Expr.BinaryOp) and V_.op == 'Sub': if V_.operands[0] == X and V_.operands[ 1] == xC: divisor = self._check_divisor( pow(2, Y + V + Z), C * (pow(2, V) - 1) + pow(2, Y)) # unsigned int here if isinstance(xC, Expr.BinaryOp) and xC.op == 'Mul': if isinstance(xC.operands[1], Expr.Const) \ and isinstance(xC.operands[0], Expr.Convert): C = xC.operands[1].value X = xC.operands[0] Y = X.from_bits - X.to_bits if isinstance(x_xC, Expr.BinaryOp) and x_xC.op == 'Shr': V_, V = x_xC.operands if isinstance(V, Expr.Const): V = V.value if isinstance( V_, Expr.BinaryOp) and V_.op == 'Sub': if V_.operands[1] == xC: divisor = self._check_divisor( pow(2, Y + V + Z), C * (pow(2, V) - 1) + pow(2, Y)) elif isinstance(xC, Expr.BinaryOp) and xC.op == 'Shr': if isinstance(xC.operands[1], Expr.Const) \ and isinstance(xC.operands[0], Expr.BinaryOp) \ and xC.operands[0].op == 'Mul' \ and isinstance(xC.operands[0].operands[1], Expr.Const): if isinstance(x_xC, Expr.BinaryOp) \ and isinstance(x_xC.operands[1], Expr.Const) \ and isinstance(x_xC.operands[0], Expr.BinaryOp) \ and x_xC.op == 'Shr' and x_xC.operands[0].op == 'Sub': X = xC.operands[0].operands[0] C = xC.operands[0].operands[1].value Y = xC.operands[1].value V = x_xC.operands[1].value if X == x_xC.operands[0].operands[0]: divisor = self._check_divisor( pow(2, Y + V + Z), C * (pow(2, V) - 1) + pow(2, Y)) # unsigned int if isinstance(operand_1, Expr.Const) \ and isinstance(operand_0, Expr.BinaryOp) \ and operand_0.op == 'Mul' \ and isinstance(operand_0.operands[1], Expr.Const): if isinstance(operand_0.operands[0], Expr.Convert): V = operand_0.operands[0].from_bits - operand_0.operands[ 0].to_bits C = operand_0.operands[1].value Z = operand_1.value X = operand_0.operands[0] divisor = self._check_divisor(pow(2, V + Z), C) elif isinstance(operand_0.operands[0], Expr.BinaryOp) \ and isinstance(operand_0.operands[0].operands[1], Expr.Const) \ and operand_0.operands[0].op in {'Shr', 'DivMod'}: X = operand_0.operands[0].operands[0] V = 0 ndigits = 6 if isinstance(X, Expr.Convert): V = X.from_bits - X.to_bits if V == 32: ndigits = 5 C = operand_0.operands[1].value Y = operand_0.operands[0].operands[1].value if operand_0.operands[0].op == 'DivMod': Y = int(math.log2(operand_0.operands[0].operands[1].value)) Z = operand_1.value divisor = self._check_divisor(pow(2, Y + Z + V), C, ndigits) else: X = operand_0.operands[0] Y = operand_1.value C = operand_0.operands[1].value divisor = self._check_divisor(pow(2, Y), C) if divisor and X: new_const = Expr.Const(expr.idx, None, divisor, 64) return Expr.BinaryOp(expr.idx, 'DivMod', [X, new_const], expr.signed, **expr.tags) if isinstance(operand_1, Expr.Const): if isinstance(operand_0, Expr.Register): new_operand = Expr.Const(operand_1.idx, None, 2**operand_1.value, operand_1.bits) return Expr.BinaryOp(expr.idx, 'DivMod', [operand_0, new_operand], expr.signed) elif isinstance(operand_0, Expr.BinaryOp) \ and operand_0.op == 'Shr' \ and isinstance(operand_0.operands[1], Expr.Const): new_const = Expr.Const( operand_1.idx, None, operand_0.operands[1].value + operand_1.value, operand_1.bits) return Expr.BinaryOp(expr.idx, 'Shr', [operand_0.operands[0], new_const], expr.signed, **expr.tags) if (operand_0, operand_1) != (expr.operands[0], expr.operands[1]): return Expr.BinaryOp(expr.idx, 'Shr', [operand_0, operand_1], expr.signed) return expr
def _rewrite(self, ccall: Expr.VEXCCallExpression) -> Optional[Expr.Expression]: if ccall.cee_name == "amd64g_calculate_condition": cond = ccall.operands[0] op = ccall.operands[1] dep_1 = ccall.operands[2] dep_2 = ccall.operands[3] if isinstance(cond, Expr.Const) and isinstance(op, Expr.Const): cond_v = cond.value op_v = op.value if cond_v == AMD64_CondTypes['CondLE']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 <=s dep_2 r = Expr.BinaryOp(ccall.idx, "CmpLE", (dep_1, dep_2), True, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) if cond_v == AMD64_CondTypes['CondZ']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 - dep_2 == 0 r = Expr.BinaryOp(ccall.idx, "CmpEQ", (dep_1, dep_2), False, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) elif cond_v == AMD64_CondTypes['CondL']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 - dep_2 <s 0 r = Expr.BinaryOp(ccall.idx, "CmpLT", (dep_1, dep_2), True, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) elif cond_v == AMD64_CondTypes['CondNBE']: if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # dep_1 - dep_2 > 0 r = Expr.BinaryOp(ccall.idx, "CmpGT", (dep_1, dep_2), False, **ccall.tags) return Expr.Convert(None, r.bits, ccall.bits, False, r, **ccall.tags) elif ccall.cee_name == "amd64g_calculate_rflags_c": # calculate the carry flag op = ccall.operands[0] dep_1 = ccall.operands[1] dep_2 = ccall.operands[2] ndep = ccall.operands[3] if isinstance(op, Expr.Const): op_v = op.value if op_v in { AMD64_OpTypes['G_CC_OP_ADDB'], AMD64_OpTypes['G_CC_OP_ADDW'], AMD64_OpTypes['G_CC_OP_ADDL'], AMD64_OpTypes['G_CC_OP_ADDQ'] }: # pc_actions_ADD cf = Expr.ITE( None, Expr.BinaryOp( None, "CmpLE", [ Expr.BinaryOp(None, "Add", [dep_1, dep_2], False), dep_1, ], False, ), Expr.Const(None, None, 0, ccall.bits), Expr.Const(None, None, 1, ccall.bits), **ccall.tags) return cf if op_v in { AMD64_OpTypes['G_CC_OP_SUBB'], AMD64_OpTypes['G_CC_OP_SUBW'], AMD64_OpTypes['G_CC_OP_SUBL'], AMD64_OpTypes['G_CC_OP_SUBQ'] }: # pc_actions_SUB cf = Expr.BinaryOp(None, "CmpLT", [ dep_1, dep_2, ], False) if cf.bits == ccall.bits: return cf return Expr.Convert(None, cf.bits, ccall.bits, False, cf, **ccall.tags) if op_v in { AMD64_OpTypes['G_CC_OP_DECB'], AMD64_OpTypes['G_CC_OP_DECW'], AMD64_OpTypes['G_CC_OP_DECL'], AMD64_OpTypes['G_CC_OP_DECQ'] }: # pc_actions_DEC cf = Expr.BinaryOp(None, "Shr", [ Expr.BinaryOp(None, "And", [ ndep, Expr.Const(None, None, AMD64_CondBitMasks['G_CC_MASK_C'], 64) ], False), Expr.Const(None, None, AMD64_CondBitOffsets['G_CC_SHIFT_C'], 64), ], False, **ccall.tags) return cf return None