Exemplo n.º 1
0
def create_cmp_j_instructions(mode, expr, val, target, kind):
    cmp_inst = instruction_x86("CMP", mode, [expr, val])
    cmp_inst.additional_info = additional_info()
    cmp_inst.additional_info.g1.value = 0

    jz_inst = instruction_x86(kind, mode, [target])
    jz_inst.additional_info = additional_info()
    jz_inst.additional_info.g1.value = 0
    return [cmp_inst, jz_inst]
Exemplo n.º 2
0
def create_jump_instruction(mode, target):
    """
    :param mode: 32 or 64, depends on architecture
    :param target: Expr to jump to
    :return: created instruction
    """
    tmp_ln = instruction_x86("JMP", mode, [target])
    tmp_ln.additional_info = additional_info()
    tmp_ln.additional_info.g1.value = 0
    return tmp_ln
Exemplo n.º 3
0
    def process(self, pending, merging_val, reached_funcs):
        if len(self.flat_loops) == 0:
            # add all reached functions
            for func_addr, possible_merge_vars, loc_key in self.possible_merge_funcs:
                reached_funcs.add(func_addr)
                for expr, val in possible_merge_vars:
                    pending.setdefault(func_addr,
                                       {}).setdefault(expr, set()).add(val)
            return None

        assert len(self.asmcfg.heads()) == 1

        # add merging var to the ircfg
        if self.pad:
            initial_block_bak = self.ircfg.blocks[LocKey(0)]
            if merging_val and self.merging_var:
                asgn_blk = AssignBlock(
                    [ExprAssign(self.merging_var, merging_val)])
            else:
                asgn_blk = AssignBlock()
            assignblks = tuple(
                [asgn_blk, *self.ircfg.blocks[LocKey(0)].assignblks])
            self.ircfg.blocks[LocKey(0)] = IRBlock(LocKey(0), assignblks)

        head = self.asmcfg.heads()[0]
        head_block = self.asmcfg.loc_key_to_block(head)
        new_head = self._deobfuscate_cff_loops(
            head_block, self.asmcfg.machine.mn.regs.regs_init)

        if self.pad:
            self.ircfg.blocks[LocKey(0)] = initial_block_bak
            if merging_val and self.merging_var:
                mode = self.asmcfg.mode
                fix_dct = {
                    self.asmcfg.machine.mn.regs.regs_init[self.ir_arch.sp]:
                    self.ir_arch.sp
                }
                mov = instruction_x86(
                    "MOV", mode,
                    [self.merging_var.replace_expr(fix_dct), merging_val])
                mov.additional_info = additional_info()
                mov.additional_info.g1.value = 0
                self.out_asmcfg.loc_key_to_block(LocKey(0)).lines.insert(
                    0, mov)

        loc_keys = self.relevant_nodes
        for func_addr, possible_merge_vars, loc_key in self.possible_merge_funcs:
            if loc_key in loc_keys:
                reached_funcs.add(func_addr)
                for expr, val in possible_merge_vars:
                    pending.setdefault(func_addr,
                                       {}).setdefault(expr, set()).add(val)

        return new_head
Exemplo n.º 4
0
    def _insert_flat_block(self, source_flat_block, symb_exec,
                           flat_block_to_loc_key):
        """
        Copies source_flat_block and sets its successors according to flat_block_to_loc_key
        :param flat_block_to_loc_key: dictionary mapping flat_blocks to respective loc_keys
        :param symb_exec: instance of current symbolic execution engine
        :param source_flat_block: flat_block to be inserted
        :return: dictionary mapping old successor loc_keys to the new ones
        """
        # we're not using redirect_successors after copying to avoid executing the same loops multiple times
        source_block = self.asmcfg.loc_key_to_block(
            source_flat_block.block_loc_key)
        tobe_processed = {}
        new_flat_blocks = set()
        new_block_loc_key = flat_block_to_loc_key[source_flat_block]
        if self.out_asmcfg.loc_key_to_block(new_block_loc_key) is not None:
            raise Exception("Target loc_key is already associated to a block")
        new_block = AsmBlock(new_block_loc_key)

        # copy instructions
        for ln in source_block.lines:
            tmp_ln = instruction_x86(ln.name, ln.mode,
                                     [i.copy() for i in ln.args],
                                     ln.additional_info)
            tmp_ln.b = ln.b
            tmp_ln.l = ln.l
            tmp_ln.offset = ln.offset
            new_block.addline(tmp_ln)

        constraints = source_block.bto
        # try to simplify the destination if it's a primary flattening block
        if not self.flat_loops[source_block.loc_key].is_default:
            logger.debug("current block is a part of primary loc_keys")
            simplified_target = symb_exec.eval_expr(self.ircfg.IRDst)
            if isinstance(simplified_target, ExprInt):
                simplified_target = self.asmcfg.loc_db.get_offset_location(
                    int(simplified_target))
            elif isinstance(simplified_target, ExprLoc):
                simplified_target = simplified_target.loc_key
            else:
                # there's probably a(n) (series of) unknown instruction(s) causing an implicit conditional assignment
                # such as CMOV or SBB->AND->ADD, prepend comparison + cond jump if it happens to be common, or add it to
                # ExtendedAsmCFG.extended_discovery and split flow on the final instruction

                # it's also possible that it's not related to any cff loop at all
                addr = self.asmcfg.loc_db.get_location_offset(
                    source_flat_block.block_loc_key)
                addr = hex(addr) if addr else addr
                logger.warning(
                    "Couldn't simplify loc_key %s at %s, continuing" %
                    (str(source_flat_block.block_loc_key), addr))
                logger.warning("the simplified target is %s of instance %s" %
                               (simplified_target, type(simplified_target)))
                simplified_target = None
            if simplified_target:
                constraints = {AsmConstraintTo(simplified_target)}
                mode = self.asmcfg.mode

                # remove redundant comparison
                dp = DependencyGraph(self.ircfg, True)
                block_loc_key = source_block.loc_key
                res = next(
                    dp.get(block_loc_key, {self.ircfg.IRDst}, None,
                           {block_loc_key}))
                for depnode in res.relevant_nodes:
                    ind = depnode.line_nb
                    ind -= (len(self.ircfg.blocks[block_loc_key]) -
                            len(new_block.lines))
                    if new_block.lines[ind].name == "CMP":
                        new_block.lines.pop(ind)

                new_block.lines[-1] = create_jump_instruction(
                    mode, ExprLoc(simplified_target, mode))

        # copy constraints
        new_bto = set()
        for constraint in constraints:
            if not self.asmcfg.loc_key_to_block(constraint.loc_key):
                logger.debug("Skipping bad constraint %s" % constraint.loc_key)
                continue
            flat_block = self.flat_loops.get_block(constraint.loc_key,
                                                   symb_exec,
                                                   source_flat_block)
            if flat_block not in flat_block_to_loc_key:
                new_flat_blocks.add(flat_block)
                new_loc_key = self.out_asmcfg.loc_db.add_location()
                tobe_processed[constraint.loc_key] = (new_loc_key, flat_block)
                flat_block_to_loc_key[flat_block] = new_loc_key
            else:
                new_loc_key = flat_block_to_loc_key[flat_block]
            new_bto.add(AsmConstraint(new_loc_key, constraint.c_t))
        new_block.bto = new_bto
        new_block.alignment = source_block.alignment

        # change jmp targets
        if new_block.lines:
            for ind, arg in enumerate(list(new_block.lines[-1].args)):
                if isinstance(arg, ExprLoc):
                    if not self.asmcfg.loc_key_to_block(arg.loc_key):
                        logger.debug("Skipping bad constraint %s" %
                                     arg.loc_key)
                        continue
                    new_target, flat_block = tobe_processed.get(
                        arg.loc_key, (None, None))
                    if not new_target:
                        flat_block = self.flat_loops.get_block(
                            arg.loc_key, symb_exec, source_flat_block)
                        new_target = flat_block_to_loc_key.get(flat_block)
                    # None in case of irrelevant calls
                    logger.debug("new target: %s" % new_target)
                    if new_target:
                        new_block.lines[-1].args[ind] = ExprLoc(
                            new_target, arg.size)

        self.out_asmcfg.add_block(new_block)
        return new_flat_blocks
Exemplo n.º 5
0
def create_nop(mode):
    nop_inst = instruction_x86("NOP", mode, [])
    nop_inst.additional_info = additional_info()
    nop_inst.additional_info.g1.value = 0
    return nop_inst
Exemplo n.º 6
0
def create_cond_branch_instruction(mode, name, target):
    tmp_ln = instruction_x86(name, mode, [target])
    tmp_ln.additional_info = additional_info()
    tmp_ln.additional_info.g1.value = 0
    return tmp_ln
Exemplo n.º 7
0
def create_mov_instruction(mode, dst, src):
    tmp_ln = instruction_x86("MOV", mode, [dst, src])
    tmp_ln.additional_info = additional_info()
    tmp_ln.additional_info.g1.value = 0
    return tmp_ln