コード例 #1
0
    def __init__(self):
        self.current_ea = None
        self.cfa = None
        self.current_state = None
        self.current_node_ids = []
        #: last run config
        self.current_config = None
        #: config to be edited
        self.edit_config = None
        #: Analyzer instance - protects against merciless garbage collector
        self.analyzer = None
        self.hooks = None
        self.netnode = idabincat.netnode.Netnode("$ com.bincat.bcplugin")
        #: acts as a List of ("eip", "register name", "taint mask")
        #: XXX store in IDB?
        self.overrides = CallbackWrappedList()
        #: list of (name, config)
        self.configurations = AnalyzerConfigurations(self)
        # XXX store in idb after encoding?
        self.last_cfaout_marshal = None
        #: filepath to last dumped remapped binary
        self.remapped_bin_path = None
        self.remap_binary = True
        # for debugging purposes, to interact with this object from the console
        global bc_state
        bc_state = self

        self.gui = GUI(self)
        if PluginOptions.get("load_from_idb") == "True":
            self.load_from_idb()
コード例 #2
0
class State(object):
    """
    Container for (static) plugin state related data & methods.
    """
    def __init__(self):
        self.current_ea = None
        self.cfa = None
        self.current_state = None
        self.current_node_ids = []
        #: last run config
        self.current_config = None
        #: config to be edited
        self.edit_config = None
        #: Analyzer instance - protects against merciless garbage collector
        self.analyzer = None
        self.hooks = None
        self.netnode = idabincat.netnode.Netnode("$ com.bincat.bcplugin")
        #: acts as a List of ("eip", "register name", "taint mask")
        #: XXX store in IDB?
        self.overrides = CallbackWrappedList()
        #: list of (name, config)
        self.configurations = AnalyzerConfigurations(self)
        # XXX store in idb after encoding?
        self.last_cfaout_marshal = None
        #: filepath to last dumped remapped binary
        self.remapped_bin_path = None
        self.remap_binary = True
        # for debugging purposes, to interact with this object from the console
        global bc_state
        bc_state = self

        self.gui = GUI(self)
        if PluginOptions.get("load_from_idb") == "True":
            self.load_from_idb()

    def new_analyzer(self, *args, **kwargs):
        """
        returns current Analyzer class (web or local)
        """
        if (PluginOptions.get("web_analyzer") == "True"
                and PluginOptions.get("server_url") != ""):
            return WebAnalyzer(*args, **kwargs)
        else:
            return LocalAnalyzer(*args, **kwargs)

    def load_from_idb(self):
        if "out.ini" in self.netnode and "analyzer.log" in self.netnode:
            bc_log.info("Loading analysis results from idb")
            path = tempfile.mkdtemp(suffix='bincat')
            outfname = os.path.join(path, "out.ini")
            logfname = os.path.join(path, "analyzer.log")
            with open(outfname, 'wb') as outfp:
                outfp.write(self.netnode["out.ini"])
            with open(logfname, 'wb') as logfp:
                logfp.write(self.netnode["analyzer.log"])
            if "current_ea" in self.netnode:
                ea = self.netnode["current_ea"]
            else:
                ea = None
            self.analysis_finish_cb(outfname,
                                    logfname,
                                    cfaoutfname=None,
                                    ea=ea)
        if "remapped_bin_path" in self.netnode:
            fname = self.netnode["remapped_bin_path"]
            if os.path.isfile(fname):
                self.remapped_bin_path = fname
        if "remap_binary" in self.netnode:
            self.remap_binary = self.netnode["remap_binary"]

    def clear_background(self):
        """
        reset background color for previous analysis
        """
        if self.cfa:
            color = idaapi.calc_bg_color(idaapi.NIF_BG_COLOR)
            for v in self.cfa.states:
                ea = v.value
                idaapi.set_item_color(ea, color)

    def analysis_finish_cb(self, outfname, logfname, cfaoutfname, ea=None):
        bc_log.debug("Parsing analyzer result file")
        try:
            cfa = cfa_module.CFA.parse(outfname, logs=logfname)
        except (pybincat.PyBinCATException):
            bc_log.error("Could not parse result file")
            return None
        self.clear_background()
        self.cfa = cfa
        if cfa:
            # XXX add user preference for saving to idb? in that case, store
            # reference to marshalled cfa elsewhere
            bc_log.info("Storing analysis results to idb")
            with open(outfname, 'rb') as f:
                self.netnode["out.ini"] = f.read()
            with open(logfname, 'rb') as f:
                self.netnode["analyzer.log"] = f.read()
            if self.remapped_bin_path:
                self.netnode["remapped_bin_path"] = self.remapped_bin_path
            self.netnode["remap_binary"] = self.remap_binary
            if cfaoutfname is not None and os.path.isfile(cfaoutfname):
                with open(cfaoutfname, 'rb') as f:
                    self.last_cfaout_marshal = f.read()
        else:
            bc_log.info("Empty or unparseable result file.")
        bc_log.debug("----------------------------")
        # Update current RVA to start address (nodeid = 0)
        # by default, use current ea - e.g, in case there is no results (cfa is
        # None) or no node 0 (happens in backward mode)
        current_ea = self.current_ea
        if ea is not None:
            current_ea = ea
        else:
            try:
                node0 = cfa['0']
                if node0:
                    current_ea = node0.address.value
            except (KeyError, TypeError):
                # no cfa is None, or no node0
                pass
        self.set_current_ea(current_ea, force=True)
        self.netnode["current_ea"] = current_ea
        if not cfa:
            return
        for addr, nodeids in cfa.states.items():
            ea = addr.value
            tainted = False
            for n_id in nodeids:
                # is it tainted?
                # find children state
                state = cfa[n_id]
                if state.tainted:
                    tainted = True
                    break

            if tainted:
                idaapi.set_item_color(ea, 0xDDFFDD)
            else:
                idaapi.set_item_color(ea, 0xCDCFCE)

    def set_current_node(self, node_id):
        if self.cfa:
            state = self.cfa[node_id]
            if state:
                self.set_current_ea(state.address.value,
                                    force=True,
                                    node_id=node_id)

    def set_current_ea(self, ea, force=False, node_id=None):
        """
        :param ea: int or long
        """
        if not (force or ea != self.current_ea):
            return
        self.gui.before_change_ea()
        self.current_ea = ea
        nonempty_state = False
        if self.cfa:
            node_ids = self.cfa.node_id_from_addr(ea)
            if node_ids:
                nonempty_state = True
                if node_id in node_ids:
                    self.current_state = self.cfa[node_id]
                else:
                    self.current_state = self.cfa[node_ids[0]]
                self.current_node_ids = node_ids
        if not nonempty_state:
            self.current_state = None
            self.current_node_ids = []

        self.gui.after_change_ea()

    def guess_filepath(self):
        filepath = self.current_config.binary_filepath
        if os.path.isfile(filepath):
            return filepath
        filepath = ConfigHelpers.guess_filepath()
        if os.path.isfile(filepath):
            return filepath
        # give up
        return None

    def start_analysis(self, config_str=None):
        """
        Creates new temporary dir. File structure:
        input files: init.ini, cfain.marshal
        output files: out.ini, cfaout.marshal
        """
        if config_str:
            self.current_config = AnalyzerConfig.load_from_str(config_str)
        binary_filepath = self.guess_filepath()
        if not binary_filepath:
            bc_log.error(
                "File %s does not exit. Please fix path in configuration.",
                self.current_config.binary_filepath)
            return
        bc_log.debug("Using %s as source binary path", binary_filepath)
        self.current_config.binary_filepath = binary_filepath

        path = tempfile.mkdtemp(suffix='bincat')

        # instance variable: we don't want the garbage collector to delete the
        # *Analyzer instance, killing an unlucky QProcess in the process
        try:
            self.analyzer = self.new_analyzer(path, self.analysis_finish_cb)
        except AnalyzerUnavailable as e:
            bc_log.error("Analyzer is unavailable", exc_info=True)
            return

        bc_log.info("Current analyzer path: %s", path)

        # Update overrides
        self.current_config.update_overrides(self.overrides)
        analysis_method = self.current_config.analysis_method
        if analysis_method in ("forward_cfa", "backward"):
            if self.last_cfaout_marshal is None:
                bc_log.error("No marshalled CFA has been recorded - run a "
                             "forward analysis first.")
                return
            with open(self.analyzer.cfainfname, 'wb') as f:
                f.write(self.last_cfaout_marshal)
        # Set correct file names
        # Note: bincat_native expects a filename for in_marshalled_cfa_file -
        # may not exist if analysis mode is forward_binary
        self.current_config.set_cfa_options('true', self.analyzer.cfainfname,
                                            self.analyzer.cfaoutfname)
        bc_log.debug("Generating .no files...")

        headers_filenames = self.current_config.headers_files.split(',')
        # compile .c files for libs, if there are any
        bc_log.debug("Initial header files: %r", headers_filenames)
        new_headers_filenames = []
        for f in headers_filenames:
            f = f.strip()
            if not f:
                continue
            if f.endswith('.c'):
                # generate .npk from .c file
                if not os.path.isfile(f):
                    bc_log.warning(
                        "header file %s could not be found, continuing", f)
                    continue
                new_npk_fname = f[:-2] + '.no'
                headers_filenames.remove(f)
                if not os.path.isfile(new_npk_fname):
                    # compile
                    self.analyzer.generate_tnpk(fname=f,
                                                destfname=new_npk_fname)
                    if not os.path.isfile(new_npk_fname):
                        bc_log.warning(
                            ".no file containing type data for the headers "
                            "file %s could not be generated, continuing", f)
                        continue
                    f = new_npk_fname
            # Relative paths are copied
            elif f.endswith('.no') and os.path.isfile(f):
                if f[0] != os.path.sep:
                    temp_npk_fname = os.path.join(path, os.path.basename(f))
                    shutil.copyfile(f, temp_npk_fname)
                    f = temp_npk_fname
            else:
                bc_log.warning(
                    "Header file %s does not exist or does not match expected "
                    "extensions (.c, .no), ignoring.", f)
                continue

            new_headers_filenames.append(f)
        headers_filenames = new_headers_filenames
        # generate npk file for the binary being analyzed (unless it has
        # already been generated)
        if not any([s.endswith('pre-processed.no')
                    for s in headers_filenames]):
            npk_filename = self.analyzer.generate_tnpk()
            if not npk_filename:
                bc_log.warning(
                    ".no file containing type data for the file being "
                    "analyzed could not be generated, continuing. The "
                    "ida-generated header could be invalid.")
            else:
                headers_filenames.append(npk_filename)
            bc_log.debug("Final npk files: %r" % headers_filenames)
        self.current_config.headers_files = ','.join(headers_filenames)

        self.current_config.write(self.analyzer.initfname)
        self.analyzer.run()

    def re_run(self):
        """
        Re-run analysis, taking new overrides settings into account
        """
        if self.start_analysis is None:
            # XXX upload all required files in Web Analyzer
            # XXX Store all required files in IDB
            bc_log.error(
                "You have to run the analysis first using the 'Analyze From "
                "here (Ctrl-Shift-A)' menu - reloading previous results from "
                "IDB is not yet supported.")
        else:
            self.start_analysis()