Beispiel #1
0
    def Report(self, filenames, testcase, check_sanity):
        sys.stdout.flush()
        # TODO(timurrrr): support positive tests / check_sanity==True
        self.used_suppressions = defaultdict(int)

        to_report = []
        reports_for_this_test = set()
        for f in filenames:
            cur_reports = self.ParseReportFile(f, testcase)

            # Filter out the reports that were there in previous tests.
            for r in cur_reports:
                if r in reports_for_this_test:
                    # A similar report is about to be printed for this test.
                    pass
                elif r in self.known_errors:
                    # A similar report has already been printed in one of the prev tests.
                    to_report.append("This error was already printed in some "
                                     "other test, see 'hash=#%016X#'" %
                                     r.ErrorHash())
                    reports_for_this_test.add(r)
                else:
                    self.known_errors.add(r)
                    reports_for_this_test.add(r)
                    to_report.append(r)

        common.PrintUsedSuppressionsList(self.used_suppressions)

        if not to_report:
            logging.info("PASS: No error reports found")
            return 0

        sys.stdout.flush()
        sys.stderr.flush()
        logging.info("Found %i error reports" % len(to_report))
        for report in to_report:
            self.error_count += 1
            logging.info("Report #%d\n%s" % (self.error_count, report))
        logging.info("Total: %i error reports" % len(to_report))
        sys.stdout.flush()
        return -1
    def Report(self, files, testcase, check_sanity=False):
        '''Reads in a set of files and prints ThreadSanitizer report.

    Args:
      files: A list of filenames.
      check_sanity: if true, search for SANITY_TEST_SUPPRESSIONS
    '''

        # We set up _cur_testcase class-wide variable to avoid passing it through
        # about 5 functions.
        self._cur_testcase = testcase
        reports = self.GetReports(files)
        self._cur_testcase = None  # just in case, shouldn't be used anymore

        common.PrintUsedSuppressionsList(self.used_suppressions)

        retcode = 0
        if reports:
            sys.stdout.flush()
            sys.stderr.flush()
            logging.info("FAIL! Found %i report(s)" % len(reports))
            for report in reports:
                logging.info('\n' + report)
            sys.stdout.flush()
            retcode = -1

        # Report tool's insanity even if there were errors.
        if (check_sanity and TsanAnalyzer.SANITY_TEST_SUPPRESSION
                not in self.used_suppressions):
            logging.error("FAIL! Sanity check failed!")
            retcode = -3

        if retcode != 0:
            return retcode

        logging.info("PASS: No reports found")
        return 0
Beispiel #3
0
class MemcheckAnalyzer:
    ''' Given a set of Valgrind XML files, parse all the errors out of them,
  unique them and output the results.'''

    SANITY_TEST_SUPPRESSIONS = {
        "Memcheck sanity test 01 (memory leak).": 1,
        "Memcheck sanity test 02 (malloc/read left).": 1,
        "Memcheck sanity test 03 (malloc/read right).": 1,
        "Memcheck sanity test 04 (malloc/write left).": 1,
        "Memcheck sanity test 05 (malloc/write right).": 1,
        "Memcheck sanity test 06 (new/read left).": 1,
        "Memcheck sanity test 07 (new/read right).": 1,
        "Memcheck sanity test 08 (new/write left).": 1,
        "Memcheck sanity test 09 (new/write right).": 1,
        "Memcheck sanity test 10 (write after free).": 1,
        "Memcheck sanity test 11 (write after delete).": 1,
        "Memcheck sanity test 12 (array deleted without []).": 1,
        "Memcheck sanity test 13 (single element deleted with []).": 1,
        "Memcheck sanity test 14 (malloc/read uninit).": 1,
        "Memcheck sanity test 15 (new/read uninit).": 1,
    }

    # Max time to wait for memcheck logs to complete.
    LOG_COMPLETION_TIMEOUT = 180.0

    def __init__(self, source_dir, show_all_leaks=False, use_gdb=False):
        '''Create a parser for Memcheck logs.

    Args:
      source_dir: Path to top of source tree for this build
      show_all_leaks: Whether to show even less important leaks
      use_gdb: Whether to use gdb to resolve source filenames and line numbers
               in the report stacktraces
    '''
        self._source_dir = source_dir
        self._show_all_leaks = show_all_leaks
        self._use_gdb = use_gdb

        # Contains the set of unique errors
        self._errors = set()

        # Contains the time when the we started analyzing the first log file.
        # This variable is used to skip incomplete logs after some timeout.
        self._analyze_start_time = None

    def Report(self, files, testcase, check_sanity=False):
        '''Reads in a set of files and prints Memcheck report.

    Args:
      files: A list of filenames.
      check_sanity: if true, search for SANITY_TEST_SUPPRESSIONS
    '''
        # Beyond the detailed errors parsed by ValgrindError above,
        # the xml file contain records describing suppressions that were used:
        # <suppcounts>
        #  <pair>
        #    <count>28</count>
        #    <name>pango_font_leak_todo</name>
        #  </pair>
        #  <pair>
        #    <count>378</count>
        #    <name>bug_13243</name>
        #  </pair>
        # </suppcounts
        # Collect these and print them at the end.
        #
        # With our patch for https://bugs.kde.org/show_bug.cgi?id=205000 in,
        # the file also includes records of the form
        # <load_obj><obj>/usr/lib/libgcc_s.1.dylib</obj><ip>0x27000</ip></load_obj>
        # giving the filename and load address of each binary that was mapped
        # into the process.

        global TheAddressTable
        if self._use_gdb:
            TheAddressTable = gdb_helper.AddressTable()
        else:
            TheAddressTable = None
        cur_report_errors = set()
        suppcounts = defaultdict(int)
        badfiles = set()

        if self._analyze_start_time == None:
            self._analyze_start_time = time.time()
        start_time = self._analyze_start_time

        parse_failed = False
        for file in files:
            # Wait up to three minutes for valgrind to finish writing all files,
            # but after that, just skip incomplete files and warn.
            f = open(file, "r+")
            pid = re.match(".*\.([0-9]+)$", file)
            if pid:
                pid = pid.groups()[0]
            found = False
            running = True
            firstrun = True
            skip = False
            origsize = os.path.getsize(file)
            while (running and not found and not skip and (firstrun or (
                (time.time() - start_time) < self.LOG_COMPLETION_TIMEOUT))):
                firstrun = False
                f.seek(0)
                if pid:
                    # Make sure the process is still running so we don't wait for
                    # 3 minutes if it was killed. See http://crbug.com/17453
                    ps_out = subprocess.Popen("ps p %s" % pid,
                                              shell=True,
                                              stdout=subprocess.PIPE).stdout
                    if len(ps_out.readlines()) < 2:
                        running = False
                else:
                    skip = True
                    running = False
                found = log_is_finished(f, False)
                if not running and not found:
                    logging.warn(
                        "Valgrind process PID = %s is not running but its "
                        "XML log has not been finished correctly.\n"
                        "Make it up by adding some closing tags manually." %
                        pid)
                    found = log_is_finished(f, not running)
                if running and not found:
                    time.sleep(1)
            f.close()
            if not found:
                badfiles.add(file)
            else:
                newsize = os.path.getsize(file)
                if origsize > newsize + 1:
                    logging.warn(
                        str(origsize - newsize) +
                        " bytes of junk were after </valgrindoutput> in %s!" %
                        file)
                try:
                    parsed_file = parse(file)
                except ExpatError, e:
                    parse_failed = True
                    logging.warn("could not parse %s: %s" % (file, e))
                    lineno = e.lineno - 1
                    context_lines = 5
                    context_start = max(0, lineno - context_lines)
                    context_end = lineno + context_lines + 1
                    context_file = open(file, "r")
                    for i in range(0, context_start):
                        context_file.readline()
                    for i in range(context_start, context_end):
                        context_data = context_file.readline().rstrip()
                        if i != lineno:
                            logging.warn("  %s" % context_data)
                        else:
                            logging.warn("> %s" % context_data)
                    context_file.close()
                    continue
                if TheAddressTable != None:
                    load_objs = parsed_file.getElementsByTagName("load_obj")
                    for load_obj in load_objs:
                        obj = getTextOf(load_obj, "obj")
                        ip = getTextOf(load_obj, "ip")
                        TheAddressTable.AddBinaryAt(obj, ip)

                commandline = None
                preamble = parsed_file.getElementsByTagName("preamble")[0]
                for node in preamble.getElementsByTagName("line"):
                    if node.localName == "line":
                        for x in node.childNodes:
                            if x.nodeType == node.TEXT_NODE and "Command" in x.data:
                                commandline = x.data
                                break

                raw_errors = parsed_file.getElementsByTagName("error")
                for raw_error in raw_errors:
                    # Ignore "possible" leaks for now by default.
                    if (self._show_all_leaks or getTextOf(raw_error, "kind") !=
                            "Leak_PossiblyLost"):
                        error = ValgrindError(self._source_dir, raw_error,
                                              commandline, testcase)
                        if error not in cur_report_errors:
                            # We haven't seen such errors doing this report yet...
                            if error in self._errors:
                                # ... but we saw it in earlier reports, e.g. previous UI test
                                cur_report_errors.add("This error was already printed in "
                                                      "some other test, see 'hash=#%016X#'" % \
                                                      error.ErrorHash())
                            else:
                                # ... and we haven't seen it in other tests as well
                                self._errors.add(error)
                                cur_report_errors.add(error)

                suppcountlist = parsed_file.getElementsByTagName("suppcounts")
                if len(suppcountlist) > 0:
                    suppcountlist = suppcountlist[0]
                    for node in suppcountlist.getElementsByTagName("pair"):
                        count = getTextOf(node, "count")
                        name = getTextOf(node, "name")
                        suppcounts[name] += int(count)

        if len(badfiles) > 0:
            logging.warn("valgrind didn't finish writing %d files?!" %
                         len(badfiles))
            for file in badfiles:
                logging.warn("Last 20 lines of %s :" % file)
                os.system("tail -n 20 '%s' 1>&2" % file)

        if parse_failed:
            logging.error("FAIL! Couldn't parse Valgrind output file")
            return -2

        common.PrintUsedSuppressionsList(suppcounts)

        retcode = 0
        if cur_report_errors:
            logging.error("FAIL! There were %s errors: " %
                          len(cur_report_errors))

            if TheAddressTable != None:
                TheAddressTable.ResolveAll()

            for error in cur_report_errors:
                logging.error(error)

            retcode = -1

        # Report tool's insanity even if there were errors.
        if check_sanity:
            remaining_sanity_supp = MemcheckAnalyzer.SANITY_TEST_SUPPRESSIONS
            for (name, count) in suppcounts.iteritems():
                if (name in remaining_sanity_supp
                        and remaining_sanity_supp[name] == count):
                    del remaining_sanity_supp[name]
            if remaining_sanity_supp:
                logging.error("FAIL! Sanity check failed!")
                logging.info("The following test errors were not handled: ")
                for (name, count) in remaining_sanity_supp.iteritems():
                    logging.info("  * %dx %s" % (count, name))
                retcode = -3

        if retcode != 0:
            return retcode

        logging.info("PASS! No errors found!")
        return 0