コード例 #1
0
    def open_coverage_xref(self, address, dctx=None):
        """
        Open the 'Coverage Xref' dialog for a given address.
        """
        lctx = self.get_context(dctx)

        # show the coverage xref dialog
        dialog = CoverageXref(lctx.director, address)
        if not dialog.exec_():
            return

        # activate the user selected xref (if one was double clicked)
        if dialog.selected_coverage:
            lctx.director.select_coverage(dialog.selected_coverage)
            return

        # load a coverage file from disk
        disassembler.show_wait_box("Loading coverage from disk...")
        created_coverage, errors = lctx.director.load_coverage_files(
            [dialog.selected_filepath],
            disassembler.replace_wait_box
        )

        if not created_coverage:
            lmsg("No coverage files could be loaded...")
            disassembler.hide_wait_box()
            warn_errors(errors)
            return

        disassembler.replace_wait_box("Selecting coverage...")
        lctx.director.select_coverage(created_coverage[0].name)
        disassembler.hide_wait_box()
コード例 #2
0
    def aggregate_drcov_batch(self, drcov_list):
        """
        Aggregate a given list of DrcovData into a single coverage mapping.

        See create_coverage_from_drcov_list(...) for more verbose comments.
        """
        errors = []

        # create a new coverage set to manually aggregate data into
        coverage = DatabaseCoverage(self._palette)

        for i, drcov_data in enumerate(drcov_list, 1):

            # keep the user informed about our progress while aggregating
            disassembler.replace_wait_box(
                "Aggregating batch data %u/%u" % (i, len(drcov_list))
            )

            # normalize coverage data to the open database
            try:
                addresses = self._normalize_drcov_data(drcov_data)
            except Exception as e:
                errors.append((self.ERROR_COVERAGE_ABSENT, drcov_data.filepath))
                lmsg("Failed to normalize coverage %s" % drcov_data.filepath)
                lmsg("- %s" % e)
                continue

            # aggregate the addresses into the output coverage mapping
            coverage.add_addresses(addresses, False)

        # return the created coverage name
        return (coverage, errors)
コード例 #3
0
def metadata_progress(completed, total):
    """
    Handler for metadata collection callback, updates progress dialog.
    """
    disassembler.replace_wait_box(
        "Collected metadata for %u/%u Functions" % (completed, total)
    )
コード例 #4
0
 def _parse_module_symbol(self, f, reg, all_lines):
     disassembler.replace_wait_box("Building module_symbol metadata...")
     len_idx = 1
     while 1:
         try:
             module_line = f.readline().strip()
             if ("Opened" in module_line
                     or "start" in module_line) and "!" not in module_line:
                 break
             if module_line == None or module_line == "":
                 break
             else:
                 len_idx = len_idx + 1
                 disassembler.replace_wait_box(
                     "Building module symbol metadata %u/%u" %
                     (len_idx, all_lines))
                 regMatch = reg.match(module_line)
                 linebits = regMatch.groupdict()
                 dic = {}
                 for k, v in linebits.iteritems():
                     if k == "addr":
                         dic["addr"] = v
                     if k == "symbol":
                         module = v[0:v.find("!")]
                         symbol = v[v.find("!") + 1:]
                         dic["module"] = module
                         dic["symbol"] = symbol
                 self.all_symbol.append(dic)
         except Exception as e:
             continue
コード例 #5
0
ファイル: coverage.py プロジェクト: scpczc/DIYDynamoRIO
 def check_root_trace(self,all_Excute_Function_list):
     all_len = len(all_Excute_Function_list)
     len_idx = 0
     for cur_cov in all_Excute_Function_list:
         len_idx = len_idx + 1
         self.check_sub_trace(cur_cov, cur_cov.sub_func_coverage )
         if len_idx % 50 == 0 or len_idx == all_len:
              disassembler.replace_wait_box("Optimizing execute trace metadata %u/%u" % (len_idx, all_len))
コード例 #6
0
    def refresh_metadata(self):
        """
        Hard refresh of the director and table metadata layers.
        """
        disassembler.replace_wait_box("Building execute trace metadata...")

        self._model._director.refresh_execute_trace(self._model)

        # ensure the table's model gets refreshed
        disassembler.replace_wait_box("Refreshing execute trace Overview...")
コード例 #7
0
 def _fix_symbol(self, f):
     len_idx = 1
     all_lines = len(self.all_symbol)
     for dic in self.all_symbol:
         len_idx = len_idx + 1
         disassembler.replace_wait_box(
             "Fixing module symbol metadata %u/%u" % (len_idx, all_lines))
         for md in self.modules:
             if md.name == dic["module"]:
                 offset = int(dic["addr"], 16) - int(md.base, 16)
                 md.SymbolDic[offset] = dic["symbol"].split(
                     ' ')[0] if "" in dic["symbol"] else dic["symbol"]
コード例 #8
0
    def refresh_metadata(self):
        """
        Hard refresh of the director and table metadata layers.
        """
        disassembler.show_wait_box("Building database metadata...")
        self._model._director.refresh()

        # ensure the table's model gets refreshed
        disassembler.replace_wait_box("Refreshing Coverage Overview...")
        self._model.refresh()

        # all done
        disassembler.hide_wait_box()
コード例 #9
0
    def _parse_symbol_file(self, filepath):
        with open(filepath, "rb") as f:
            all_lines = len(f.read().splitlines())
            f.seek(0)

            rexp = "(?P<addr>[0-9a-fA-F]+)(\s+)(?P<symbol>.+)"
            reg = re.compile(rexp)
            self._parse_module_symbol(f, reg, all_lines)
            self._parse_symbol_header(f)
            self._parse_module_table(f)
            self._fix_symbol(f)
            disassembler.replace_wait_box(
                "Building module_symbol metadata Successfully")
コード例 #10
0
    def _refresh_database_coverage(self):
        """
        Refresh all the database coverage mappings managed by the director.
        """
        logger.debug("Refreshing database coverage mappings")

        for i, name in enumerate(self.all_names, 1):
            logger.debug(" - %s" % name)
            disassembler.replace_wait_box("Refreshing coverage mapping %u/%u" %
                                          (i, len(self.all_names)))
            coverage = self.get_coverage(name)
            coverage.update_metadata(self.metadata)
            coverage.refresh()
コード例 #11
0
    def interactive_load_file(self, dctx=None):
        """
        Perform the user-interactive loading of individual coverage files.
        """
        lctx = self.get_context(dctx)

        #
        # kick off an asynchronous metadata refresh. this will run in the
        # background while the user is selecting which coverage files to load
        #

        future = lctx.metadata.refresh_async(progress_callback=metadata_progress)

        #
        # we will now prompt the user with an interactive file dialog so they
        # can select the coverage files they would like to load from disk
        #

        filenames = lctx.select_coverage_files()
        if not filenames:
            lctx.metadata.abort_refresh()
            return

        #
        # to begin mapping the loaded coverage data, we require that the
        # asynchronous database metadata refresh has completed. if it is
        # not done yet, we will block here until it completes.
        #
        # a progress dialog depicts the work remaining in the refresh
        #

        disassembler.show_wait_box("Building database metadata...")
        lctx.metadata.go_synchronous()
        await_future(future)

        #
        # now that the database metadata is available, we can use the director
        # to load and normalize the selected coverage files
        #

        disassembler.replace_wait_box("Loading coverage from disk...")
        created_coverage, errors = lctx.director.load_coverage_files(filenames, disassembler.replace_wait_box)

        #
        # if the director failed to map any coverage, the user probably
        # provided bad files. emit any warnings and bail...
        #

        if not created_coverage:
            lmsg("No coverage files could be loaded...")
            disassembler.hide_wait_box()
            warn_errors(errors)
            return

        #
        # activate the first of the newly loaded coverage file(s). this is the
        # one that will be visible in the coverage overview once opened
        #

        disassembler.replace_wait_box("Selecting coverage...")
        lctx.director.select_coverage(created_coverage[0].name)

        # all done! pop the coverage overview to show the user their results
        disassembler.hide_wait_box()
        lmsg("Successfully loaded %u coverage file(s)..." % len(created_coverage))
        self.open_coverage_overview(lctx.dctx)

        # finally, emit any notable issues that occurred during load
        warn_errors(errors, lctx.director.suppressed_errors)
コード例 #12
0
    def interactive_load_batch(self, dctx=None):
        """
        Perform the user-interactive loading of a coverage batch.
        """
        lctx = self.get_context(dctx)

        #
        # kick off an asynchronous metadata refresh. this will run in the
        # background while the user is selecting which coverage files to load
        #

        future = lctx.metadata.refresh_async(progress_callback=metadata_progress)

        #
        # we will now prompt the user with an interactive file dialog so they
        # can select the coverage files they would like to load from disk
        #

        filepaths = lctx.select_coverage_files()
        if not filepaths:
            lctx.director.metadata.abort_refresh()
            return

        # prompt the user to name the new coverage aggregate
        default_name = "BATCH_%s" % lctx.director.peek_shorthand()
        ok, batch_name = prompt_string(
            "Batch Name:",
            "Please enter a name for this coverage",
            default_name
        )

        #
        # if user didn't enter a name for the batch (or hit cancel) we should
        # abort the loading process...
        #

        if not (ok and batch_name):
            lmsg("User failed to enter a name for the batch coverage...")
            lctx.director.metadata.abort_refresh()
            return

        #
        # to begin mapping the loaded coverage data, we require that the
        # asynchronous database metadata refresh has completed. if it is
        # not done yet, we will block here until it completes.
        #
        # a progress dialog depicts the work remaining in the refresh
        #

        disassembler.show_wait_box("Building database metadata...")
        lctx.metadata.go_synchronous()
        await_future(future)

        #
        # now that the database metadata is available, we can use the director
        # to normalize and condense (aggregate) all the coverage data
        #

        disassembler.replace_wait_box("Loading coverage from disk...")
        batch_coverage, errors = lctx.director.load_coverage_batch(
            filepaths,
            batch_name,
            disassembler.replace_wait_box
        )

        # if batch creation fails...
        if not batch_coverage:
            lmsg("Creation of batch '%s' failed..." % batch_name)
            disassembler.hide_wait_box()
            warn_errors(errors)
            return

        # select the newly created batch coverage
        disassembler.replace_wait_box("Selecting coverage...")
        lctx.director.select_coverage(batch_name)

        # all done! pop the coverage overview to show the user their results
        disassembler.hide_wait_box()
        lmsg("Successfully loaded batch %s..." % batch_name)
        self.open_coverage_overview(lctx.dctx)

        # finally, emit any notable issues that occurred during load
        warn_errors(errors, lctx.director.suppressed_errors)
コード例 #13
0
    def interactive_load_file(self):
        """
        Perform the user-interactive loading of individual coverage files.
        """
        self.palette.refresh_colors()

        #
        # kick off an asynchronous metadata refresh. this will run in the
        # background while the user is selecting which coverage files to load
        #

        future = self.director.refresh_metadata(
            progress_callback=metadata_progress)

        #
        # we will now prompt the user with an interactive file dialog so they
        # can select the coverage files they would like to load from disk
        #

        filenames = self._select_coverage_files()

        #
        # load the selected coverage files from disk (if any), returning a list
        # of loaded DrcovData objects (which contain coverage data)
        #

        disassembler.show_wait_box("Loading coverage from disk...")
        drcov_list = load_coverage_files(filenames)
        if not drcov_list:
            disassembler.hide_wait_box()
            self.director.metadata.abort_refresh()
            return

        #
        # to begin mapping the loaded coverage data, we require that the
        # asynchronous database metadata refresh has completed. if it is
        # not done yet, we will block here until it completes.
        #
        # a progress dialog depicts the work remaining in the refresh
        #

        disassembler.replace_wait_box("Building database metadata...")
        await_future(future)

        # insert the loaded drcov data objects into the director
        created_coverage, errors = self.director.create_coverage_from_drcov_list(
            drcov_list)

        #
        # if the director failed to map any coverage, the user probably
        # provided bad files. emit any warnings and bail...
        #

        if not created_coverage:
            lmsg("No coverage files could be loaded...")
            disassembler.hide_wait_box()
            warn_errors(errors)
            return

        #
        # activate the first of the newly loaded coverage file(s). this is the
        # one that will be visible in the coverage overview once opened
        #

        disassembler.replace_wait_box("Selecting coverage...")
        self.director.select_coverage(created_coverage[0])

        # all done! pop the coverage overview to show the user their results
        disassembler.hide_wait_box()
        lmsg("Successfully loaded %u coverage file(s)..." %
             len(created_coverage))
        self.open_coverage_overview()

        # finally, emit any notable issues that occurred during load
        warn_errors(errors)
コード例 #14
0
    def interactive_load_batch(self):
        """
        Perform the user-interactive loading of a coverage batch.
        """
        self.palette.refresh_colors()

        #
        # kick off an asynchronous metadata refresh. this will run in the
        # background while the user is selecting which coverage files to load
        #

        future = self.director.refresh_metadata(
            progress_callback=metadata_progress)

        #
        # we will now prompt the user with an interactive file dialog so they
        # can select the coverage files they would like to load from disk
        #

        filenames = self._select_coverage_files()

        #
        # load the selected coverage files from disk (if any), returning a list
        # of loaded DrcovData objects (which contain coverage data)
        #

        drcov_list = load_coverage_files(filenames)
        if not drcov_list:
            self.director.metadata.abort_refresh()
            return

        # prompt the user to name the new coverage aggregate
        default_name = "BATCH_%s" % self.director.peek_shorthand()
        ok, coverage_name = prompt_string(
            "Batch Name:", "Please enter a name for this coverage",
            default_name)

        #
        # if user didn't enter a name for the batch (or hit cancel) we should
        # abort the loading process...
        #

        if not (ok and coverage_name):
            lmsg("User failed to enter a name for the loaded batch...")
            self.director.metadata.abort_refresh()
            return

        #
        # to begin mapping the loaded coverage data, we require that the
        # asynchronous database metadata refresh has completed. if it is
        # not done yet, we will block here until it completes.
        #
        # a progress dialog depicts the work remaining in the refresh
        #

        disassembler.show_wait_box("Building database metadata...")
        await_future(future)

        #
        # now that the database metadata is available, we can use the director
        # to normalize and condense (aggregate) all the coverage data
        #

        new_coverage, errors = self.director.aggregate_drcov_batch(drcov_list)

        #
        # finally, we can inject the aggregated coverage data into the
        # director under the user specified batch name
        #

        disassembler.replace_wait_box("Mapping coverage...")
        self.director.create_coverage(coverage_name, new_coverage.data)

        # select the newly created batch coverage
        disassembler.replace_wait_box("Selecting coverage...")
        self.director.select_coverage(coverage_name)

        # all done! pop the coverage overview to show the user their results
        disassembler.hide_wait_box()
        lmsg("Successfully loaded batch %s..." % coverage_name)
        self.open_coverage_overview()

        # finally, emit any notable issues that occurred during load
        warn_errors(errors)
コード例 #15
0
    def create_coverage_from_drcov_list(self, drcov_list):
        """
        Create a number of database coverage mappings from a list of DrcovData.

        Returns a tuple of (created_coverage, errors)
        """
        created_coverage = []
        errors = []

        #
        # stop the director's aggregate from updating. this will prevent the
        # aggregate from recomputing after each individual mapping is created.
        # instead, we will wait till *all* have been created, computing the
        # new aggregate at the very end. this is far more performant.
        #

        self.suspend_aggregation()

        #
        # loop through the coverage data we been given (drcov_list), and begin
        # the normalization process to translate / filter / flatten its blocks
        # into a generic format the director can consume (a list of addresses)
        #

        for i, drcov_data in enumerate(drcov_list, 1):

            # keep the user informed about our progress while loading coverage
            disassembler.replace_wait_box(
                "Normalizing and mapping coverage %u/%u" % (i, len(drcov_list))
            )

            #
            # translate the coverage data's basic block addresses to the
            # imagebase of the open database, and flatten the blocks to a
            # list of instruction addresses
            #

            try:
                coverage_data = self._normalize_drcov_data(drcov_data)
            except ValueError as e:
                errors.append((self.ERROR_COVERAGE_ABSENT, drcov_data.filepath))
                lmsg("Failed to normalize coverage %s" % drcov_data.filepath)
                lmsg("- %s" % e)
                continue

            #
            # before injecting the new coverage data (now a list of instruction
            # addresses), we check to see if there is an existing coverage
            # object under the same name.
            #
            # if there is an existing coverage mapping, odds are that the user
            # is probably re-loading the same coverage file in which case we
            # simply overwrite the old DatabaseCoverage object.
            #
            # but we have to be careful for the case where the user loads a
            # coverage file from a different directory, but under the same name
            #
            # e.g:
            #  - C:\coverage\foo.log
            #  - C:\coverage\testing\foo.log
            #
            # in these cases, we will append a suffix to the new coverage file
            #

            coverage_name = os.path.basename(drcov_data.filepath)
            coverage = self.get_coverage(coverage_name)

            # assign a suffix to the coverage name in the event of a collision
            if coverage and coverage.filepath != drcov_data.filepath:
                for i in xrange(2,0x100000):
                    new_name = "%s_%u" % (coverage_name, i)
                    if not self.get_coverage(new_name):
                        break
                coverage_name = new_name

            #
            # finally, we can ask the director to create a coverage mapping
            # from the data we have pre-processed for it
            #

            coverage = self.create_coverage(
                coverage_name,
                coverage_data,
                drcov_data.filepath
            )
            created_coverage.append(coverage_name)

            # warn when loaded coverage appears to be poorly mapped (suspicious)
            if coverage.suspicious:
                errors.append((self.ERROR_COVERAGE_SUSPICIOUS, drcov_data.filepath))
                lmsg("Badly mapped coverage %s" % drcov_data.filepath)

            # warn when loaded coverage (for this module) appears to be empty
            if not len(coverage.nodes):
                errors.append((self.ERROR_COVERAGE_ABSENT, drcov_data.filepath))
                lmsg("No relevant coverage data in %s" % drcov_data.filepath)

        #
        # resume the director's aggregation service, triggering an update to
        # recompute the aggregate with the newly loaded coverage
        #

        disassembler.replace_wait_box("Recomputing coverage aggregate...")
        self.resume_aggregation()

        # done
        return (created_coverage, errors)
コード例 #16
0
ファイル: coverage.py プロジェクト: scpczc/DIYDynamoRIO
    def buidExecuteTrace(self):
        dic_bb = {}
        all_len=len(self.orgdata.basic_blocks)
        for  bb in self.orgdata.basic_blocks:
            if not dic_bb.has_key(bb.thread_id):
                dic_bb[bb.thread_id]=[]
            dic_bb[bb.thread_id].append(bb)
        disassembler.replace_wait_box("Building execute trace metadata...")
        len_idx = 0
        for ke,ve in   dic_bb.iteritems():
            per_thread_Excute_Function_list=[]
            per_thread_Excute_Function_list_cache=[]
            bb_start = 0
            bb_end = 0
            cur_func = None
            check_bb = False
            if len(ve)>3000000:
                for bb_idx, bb in enumerate(ve):
                    if bb_idx % 200000 ==0:
                       per_thread_Excute_Function_list_cache=self.shrink_Excute_Function_list_cache(per_thread_Excute_Function_list_cache)
                    len_idx = len_idx + 1
                    if len_idx % 30000 == 0:
                        disassembler.replace_wait_box("Building execute trace metadata %u/%u" % (len_idx, all_len))
                    if bb.call_type == lighthouse.core.BASIC_BLOCK:
                        bb_start = bb.start
                        bb_end = bb.start+bb.size
                        check_bb = True
                    if bb.call_type == lighthouse.core.FUNC_DIRECT_CALL or bb.call_type == lighthouse.core.FUNC_INDIRECT_CALL:
                        if cur_func is not None and ((check_bb is True and bb.start <= bb_end and bb.start >= bb_start) or (
                                check_bb is False and bb.start >= bb_end)):
                            sub_cov = ExecuteFunctionCoverage(cur_func, bb_idx, bb.thread_id, bb.mod_id, bb.start,
                                                              bb.mod_id_to, bb.to, bb.ret)
                            cur_func = sub_cov
                        elif cur_func is None:
                            root_cov = ExecuteFunctionCoverage(None, bb_idx, bb.thread_id, bb.mod_id, bb.start,
                                                               bb.mod_id_to, bb.to, bb.ret)
                            cur_func = root_cov
                        else:
                                root_cov = ExecuteFunctionCoverage(None, bb_idx, bb.thread_id, bb.mod_id, bb.start,
                                                                  bb.mod_id_to, bb.to, bb.ret)
                                cur_func = root_cov

                        if cur_func is not None and cur_func.trace_depth < 100:
                            per_thread_Excute_Function_list.append(cur_func)
                            per_thread_Excute_Function_list_cache.append(cur_func)
                        bb_start = bb.start
                        bb_end =bb.to
                        check_bb=False
                    if bb.call_type == lighthouse.core.FUNC_RETURN:
                        ret_cov = self.getReturnCaller(per_thread_Excute_Function_list_cache,bb.to, bb_idx,bb.thread_id,bb.mod_id_to)
                        if ret_cov is not None:
                            cur_func =ret_cov
                        else:
                            cur_func=None
                        bb_start = bb.start
                        bb_end = bb.to
                        check_bb = False
            else:
                for bb_idx, bb in enumerate(ve):
                    len_idx = len_idx + 1
                    if len_idx % 50 == 0 or len_idx == all_len:
                        disassembler.replace_wait_box("Building execute trace metadata %u/%u" % (len_idx, all_len))
                    if bb.call_type == lighthouse.core.BASIC_BLOCK:
                        bb_start = bb.start
                        bb_end = bb.start + bb.size
                        check_bb = True
                    if bb.call_type == lighthouse.core.FUNC_DIRECT_CALL or bb.call_type == lighthouse.core.FUNC_INDIRECT_CALL:
                        if cur_func is not None:
                            sub_cov = ExecuteFunctionCoverage(cur_func, bb_idx, bb.thread_id, bb.mod_id, bb.start,
                                                              bb.mod_id_to, bb.to, bb.ret)
                            cur_func = sub_cov
                        elif cur_func is None:
                            root_cov = ExecuteFunctionCoverage(None, bb_idx, bb.thread_id, bb.mod_id, bb.start,
                                                               bb.mod_id_to, bb.to, bb.ret)
                            cur_func = root_cov
                        else:
                            tmp_func = self.getFuncCaller(per_thread_Excute_Function_list, bb.start, bb_idx,
                                                          bb.thread_id, bb.mod_id)
                            if tmp_func is not None:
                                sub_cov = ExecuteFunctionCoverage(None, bb_idx, bb.thread_id, bb.mod_id, bb.start,
                                                                  bb.mod_id_to, bb.to, bb.ret)
                                cur_func = sub_cov
                            else:
                                root_cov = ExecuteFunctionCoverage(None, bb_idx, bb.thread_id, bb.mod_id, bb.start,
                                                                   bb.mod_id_to, bb.to, bb.ret)
                                cur_func = root_cov

                        if cur_func is not None:
                            per_thread_Excute_Function_list.append(cur_func)
                        bb_start = bb.start
                        bb_end = bb.to
                        check_bb = False
                    if bb.call_type == lighthouse.core.FUNC_RETURN:
                        ret_cov = self.getReturnCaller(per_thread_Excute_Function_list, bb.to, bb_idx, bb.thread_id,
                                                       bb.mod_id_to)
                        if ret_cov is not None:
                            cur_func = ret_cov
                        else:
                            cur_func = None
                        bb_start = bb.start
                        bb_end = bb.to
                        check_bb = False
            self.all_Excute_Function_list.extend(per_thread_Excute_Function_list)
        self.check_root_trace(self.all_Excute_Function_list)