def __init__(self, source_dir, files, use_gdb=False): '''Reads in a set of files. Args: source_dir: Path to top of source tree for this build files: A list of filenames. ''' self.use_gdb = use_gdb if use_gdb: global TheAddressTable TheAddressTable = gdb_helper.AddressTable() self.races = [] self.used_suppressions = {} for file in files: self.ParseReportFile(file) if self.use_gdb: TheAddressTable.ResolveAll()
def GetReports(self, files): '''Extracts reports from a set of files. Reads a set of files and returns a list of all discovered ThreadSanitizer race reports. As a side effect, populates self.used_suppressions with appropriate info. ''' global TheAddressTable if self._use_gdb: TheAddressTable = gdb_helper.AddressTable() else: TheAddressTable = None reports = [] self.used_suppressions = {} for file in files: reports.extend(self.ParseReportFile(file)) if self._use_gdb: TheAddressTable.ResolveAll() # Make each line of each report a string. reports = map(lambda (x): map(str, x), reports) return [''.join(report_lines) for report_lines in reports]
def Report(self, files, testcase, check_sanity=False): '''Reads in a set of files and prints Memcheck report. Args: files: A list of filenames. check_sanity: if true, search for SANITY_TEST_SUPPRESSIONS ''' # Beyond the detailed errors parsed by ValgrindError above, # the xml file contain records describing suppressions that were used: # <suppcounts> # <pair> # <count>28</count> # <name>pango_font_leak_todo</name> # </pair> # <pair> # <count>378</count> # <name>bug_13243</name> # </pair> # </suppcounts # Collect these and print them at the end. # # With our patch for https://bugs.kde.org/show_bug.cgi?id=205000 in, # the file also includes records of the form # <load_obj><obj>/usr/lib/libgcc_s.1.dylib</obj><ip>0x27000</ip></load_obj> # giving the filename and load address of each binary that was mapped # into the process. global TheAddressTable if self._use_gdb: TheAddressTable = gdb_helper.AddressTable() else: TheAddressTable = None cur_report_errors = set() suppcounts = defaultdict(int) badfiles = set() if self._analyze_start_time == None: self._analyze_start_time = time.time() start_time = self._analyze_start_time parse_failed = False for file in files: # Wait up to three minutes for valgrind to finish writing all files, # but after that, just skip incomplete files and warn. f = open(file, "r+") pid = re.match(".*\.([0-9]+)$", file) if pid: pid = pid.groups()[0] found = False running = True firstrun = True skip = False origsize = os.path.getsize(file) while (running and not found and not skip and (firstrun or ( (time.time() - start_time) < self.LOG_COMPLETION_TIMEOUT))): firstrun = False f.seek(0) if pid: # Make sure the process is still running so we don't wait for # 3 minutes if it was killed. See http://crbug.com/17453 ps_out = subprocess.Popen("ps p %s" % pid, shell=True, stdout=subprocess.PIPE).stdout if len(ps_out.readlines()) < 2: running = False else: skip = True running = False found = log_is_finished(f, False) if not running and not found: logging.warn( "Valgrind process PID = %s is not running but its " "XML log has not been finished correctly.\n" "Make it up by adding some closing tags manually." % pid) found = log_is_finished(f, not running) if running and not found: time.sleep(1) f.close() if not found: badfiles.add(file) else: newsize = os.path.getsize(file) if origsize > newsize + 1: logging.warn( str(origsize - newsize) + " bytes of junk were after </valgrindoutput> in %s!" % file) try: parsed_file = parse(file) except ExpatError, e: parse_failed = True logging.warn("could not parse %s: %s" % (file, e)) lineno = e.lineno - 1 context_lines = 5 context_start = max(0, lineno - context_lines) context_end = lineno + context_lines + 1 context_file = open(file, "r") for i in range(0, context_start): context_file.readline() for i in range(context_start, context_end): context_data = context_file.readline().rstrip() if i != lineno: logging.warn(" %s" % context_data) else: logging.warn("> %s" % context_data) context_file.close() continue if TheAddressTable != None: load_objs = parsed_file.getElementsByTagName("load_obj") for load_obj in load_objs: obj = getTextOf(load_obj, "obj") ip = getTextOf(load_obj, "ip") TheAddressTable.AddBinaryAt(obj, ip) commandline = None preamble = parsed_file.getElementsByTagName("preamble")[0] for node in preamble.getElementsByTagName("line"): if node.localName == "line": for x in node.childNodes: if x.nodeType == node.TEXT_NODE and "Command" in x.data: commandline = x.data break raw_errors = parsed_file.getElementsByTagName("error") for raw_error in raw_errors: # Ignore "possible" leaks for now by default. if (self._show_all_leaks or getTextOf(raw_error, "kind") != "Leak_PossiblyLost"): error = ValgrindError(self._source_dir, raw_error, commandline, testcase) if error not in cur_report_errors: # We haven't seen such errors doing this report yet... if error in self._errors: # ... but we saw it in earlier reports, e.g. previous UI test cur_report_errors.add("This error was already printed in " "some other test, see 'hash=#%016X#'" % \ error.ErrorHash()) else: # ... and we haven't seen it in other tests as well self._errors.add(error) cur_report_errors.add(error) suppcountlist = parsed_file.getElementsByTagName("suppcounts") if len(suppcountlist) > 0: suppcountlist = suppcountlist[0] for node in suppcountlist.getElementsByTagName("pair"): count = getTextOf(node, "count") name = getTextOf(node, "name") suppcounts[name] += int(count)
def __init__(self, source_dir, files, show_all_leaks=False, use_gdb=False): '''Reads in a set of files. Args: source_dir: Path to top of source tree for this build files: A list of filenames. show_all_leaks: whether to show even less important leaks ''' # Beyond the detailed errors parsed by ValgrindError above, # the xml file contain records describing suppressions that were used: # <suppcounts> # <pair> # <count>28</count> # <name>pango_font_leak_todo</name> # </pair> # <pair> # <count>378</count> # <name>bug_13243</name> # </pair> # </suppcounts # Collect these and print them at the end. # # With our patch for https://bugs.kde.org/show_bug.cgi?id=205000 in, # the file also includes records of the form # <load_obj><obj>/usr/lib/libgcc_s.1.dylib</obj><ip>0x27000</ip></load_obj> # giving the filename and load address of each binary that was mapped # into the process. global TheAddressTable if use_gdb: TheAddressTable = gdb_helper.AddressTable() self._errors = set() self._suppcounts = {} badfiles = set() start = time.time() self._parse_failed = False for file in files: # Wait up to three minutes for valgrind to finish writing all files, # but after that, just skip incomplete files and warn. f = open(file, "r+") found = False firstrun = True origsize = os.path.getsize(file) while (not found and (firstrun or ((time.time() - start) < 180.0))): firstrun = False f.seek(0) found = find_and_truncate(f) if not found: time.sleep(1) f.close() if not found: badfiles.add(file) else: newsize = os.path.getsize(file) if origsize > newsize + 1: logging.warn( str(origsize - newsize) + " bytes of junk were after </valgrindoutput> in %s!" % file) try: parsed_file = parse(file) except ExpatError, e: self._parse_failed = True logging.warn("could not parse %s: %s" % (file, e)) lineno = e.lineno - 1 context_lines = 5 context_start = max(0, lineno - context_lines) context_end = lineno + context_lines + 1 context_file = open(file, "r") for i in range(0, context_start): context_file.readline() for i in range(context_start, context_end): context_data = context_file.readline().rstrip() if i != lineno: logging.warn(" %s" % context_data) else: logging.warn("> %s" % context_data) context_file.close() continue if TheAddressTable != None: load_objs = parsed_file.getElementsByTagName("load_obj") for load_obj in load_objs: obj = getTextOf(load_obj, "obj") ip = getTextOf(load_obj, "ip") TheAddressTable.AddBinaryAt(obj, ip) commandline = None preamble = parsed_file.getElementsByTagName("preamble")[0] for node in preamble.getElementsByTagName("line"): if node.localName == "line": for x in node.childNodes: if x.nodeType == node.TEXT_NODE and "Command" in x.data: commandline = x.data break raw_errors = parsed_file.getElementsByTagName("error") for raw_error in raw_errors: # Ignore "possible" leaks for now by default. if (show_all_leaks or getTextOf(raw_error, "kind") != "Leak_PossiblyLost"): error = ValgrindError(source_dir, raw_error, commandline) self._errors.add(error) suppcountlist = parsed_file.getElementsByTagName("suppcounts") if len(suppcountlist) > 0: suppcountlist = suppcountlist[0] for node in suppcountlist.getElementsByTagName("pair"): count = getTextOf(node, "count") name = getTextOf(node, "name") if name in self._suppcounts: self._suppcounts[name] += int(count) else: self._suppcounts[name] = int(count)