def createSummary(self, log): self.warnCount = 0 # Now compile a regular expression from whichever warning pattern we're # using if not self.warningPattern: return wre = self.warningPattern if isinstance(wre, str): wre = re.compile(wre) # Check if each line in the output from this command matched our # warnings regular expressions. If did, bump the warnings count and # add the line to the collection of lines with warnings warnings = [] # TODO: use log.readlines(), except we need to decide about stdout vs # stderr for line in log.getText().split("\n"): if wre.match(line): warnings.append(line) self.warnCount += 1 # If there were any warnings, make the log if lines with warnings # available if self.warnCount: self.addCompleteLog("warnings", "\n".join(warnings) + "\n") try: old_count = self.getProperty("warnings-count") except KeyError: old_count = 0 self.setProperty("warnings-count", old_count + self.warnCount)
def createSummary(self, log): """ Match log lines against warningPattern. Warnings are collected into another log for this step, and the build-wide 'warnings-count' is updated.""" self.warnCount = 0 # Now compile a regular expression from whichever warning pattern we're # using if not self.warningPattern: return wre = self.warningPattern if isinstance(wre, str): wre = re.compile(wre) directoryEnterRe = self.directoryEnterPattern if directoryEnterRe != None and isinstance(directoryEnterRe, str): directoryEnterRe = re.compile(directoryEnterRe) directoryLeaveRe = self.directoryLeavePattern if directoryLeaveRe != None and isinstance(directoryLeaveRe, str): directoryLeaveRe = re.compile(directoryLeaveRe) # Check if each line in the output from this command matched our # warnings regular expressions. If did, bump the warnings count and # add the line to the collection of lines with warnings warnings = [] # TODO: use log.readlines(), except we need to decide about stdout vs # stderr for line in log.getText().split("\n"): if directoryEnterRe: match = directoryEnterRe.search(line) if match: self.directoryStack.append(match.group(1)) if (directoryLeaveRe and self.directoryStack and directoryLeaveRe.search(line)): self.directoryStack.pop() match = wre.match(line) if match: self.maybeAddWarning(warnings, line, match) # If there were any warnings, make the log if lines with warnings # available if self.warnCount: self.addCompleteLog("warnings (%d)" % self.warnCount, "\n".join(warnings) + "\n") warnings_stat = self.step_status.getStatistic('warnings', 0) self.step_status.setStatistic('warnings', warnings_stat + self.warnCount) try: old_count = self.getProperty("warnings-count") except KeyError: old_count = 0 self.setProperty("warnings-count", old_count + self.warnCount, "WarningCountingShellCommand")
def createSummary(self, log): # Stats: 168 successful, 0 failures, 20 errors, 105 didn't run ourRe = re.compile(r'Stats: (\d+) successful, (\d+) failures, (\d+) errors, (\d+) didn\'t run') self.descriptionDone = [] for line in StringIO(log.getText()).readlines(): result = ourRe.match(line) if result: (mySuccess, myFail, myErrors, myNorun) =\ result.group(1,2,3,4) self.descriptionDone.append("Succeeded=%s"%mySuccess) self.descriptionDone.append("Failed=%s"%myFail) self.descriptionDone.append("Errored=%s"%myErrors) self.descriptionDone.append("BadLoad=%s"%myNorun) self.setProperty("test-succeeded", mySuccess) self.setProperty("test-failed", myFail) self.setProperty("test-errored", myErrors) self.setProperty("test-badload", myNorun)
def createSummary(self, log): counts = {"log":0} summaries = {self.name:{'log': [], 'state':None}} io = StringIO(log.getText()).readlines() for line in io: if line.find("ERROR") != -1: pos = line.find("ERROR") + len("ERROR") line = line[pos:] summaries[self.name]["log"].append(line) counts["log"] += 1 else: pass self.summaries = summaries if counts["log"]: msg = "".join(summaries[self.name]["log"]) self.addCompleteLog("Bzr Merge : ERROR", msg) self.setProperty("Bzr Merge : ERROR", counts["log"]) if sum(counts.values()): self.setProperty("Bzr Merge : MessageCount", sum(counts.values()))
def createSummary(self, log): output = log.getText() # hlint warnings are of the format: 'WARNING: file:line:col: stuff # latex warnings start with "WARNING: LaTeX Warning: stuff", but # sometimes wrap around to a second line. lines = output.split("\n") warningLines = [] wantNext = False for line in lines: wantThis = wantNext wantNext = False if line.startswith("WARNING: "): wantThis = True wantNext = True if wantThis: warningLines.append(line) if warningLines: self.addCompleteLog("warnings", "\n".join(warningLines) + "\n") self.warnings = len(warningLines)
def createSummary(self, log): io = StringIO(log.getText()).readlines() summaries = {self.name:{'log': []}} counts = {"log": 0} for line in io: if line.find("ERROR") != -1: pos = line.find("ERROR") + len("ERROR") line = line[pos:] summaries[self.name]["log"].append(line) counts["log"] += 1 else: pass self.summaries = summaries if counts["log"]: msg = "".join(summaries[self.name]["log"]) self.addCompleteLog("Branch Update: ERROR", msg) self.setProperty("Branch Update: ERROR", counts["log"]) self.build_result = FAILURE if sum(counts.values()): self.setProperty("Branch Update: MessageCount", sum(counts.values()))
def createSummary(self, log): """ Try to read the file-lint.sh output and parse results """ severity = SUCCESS if self.args['repo_mode'] == 'server': repo_expr = r'bin/addons/([^/]+)/.+$' else: repo_expr = r'([^/]+)/.+$' t_results= {} repo_re = re.compile(repo_expr) for line in StringIO(log.getText()).readlines(): for rem, sev in self.known_res: m = rem.match(line) if not m: continue fname = m.group(1) if sev > severity: severity = sev mf = repo_re.match(fname) if mf: module = (mf.group(1), 'lint') else: module = ('lint', 'rest') if module not in t_results: t_results[module] = TestResult(name=module, results=SUCCESS, text='', logs={'stdout': u''}) if t_results[module].results < sev: t_results[module].results = sev if line.endswith('\r\n'): line = line[:-2] + '\n' elif not line.endswith('\n'): line += '\n' if sev > SUCCESS: t_results[module].text += ustr(line) else: t_results[module].logs['stdout'] += ustr(line) break # don't attempt more matching of the same line # use t_results for tr in t_results.values(): if self.build_result < tr.results: self.build_result = tr.results # and, after it's clean.. self.build.build_status.addTestResult(tr) self.build_result = severity build_id = self.build.requests[0].id # FIXME when builds have their class # self.descriptionDone = self.descriptionDone[:] self.build.builder.db.saveTResults(build_id, self.name, self.build_result, t_results.values()) if severity >= FAILURE: try: orm_id = self.getProperty('orm_id') or '?' except KeyError: orm_id = '?' self.setProperty('failure_tag', 'openerp-buildfail-%s-%s' % \ (orm_id, build_id) )
def message_formatter(mode, name, build, results, master_status): result = Results[results] limit_lines = 60 text = list() text.append('<h4>Build status: %s</h4>' % result.upper()) text.append("Buildslave for this Build: <b>%s</b>" % build.getSlavename()) text.append('<br>') if master_status.getURLForThing(build): text.append('Complete logs for all build steps: <a href="%s">%s</a>' % (master_status.getURLForThing(build), master_status.getURLForThing(build)) ) text.append('<br>') text.append("Build Reason: %s" % build.getReason()) text.append('<br>') try: build_properties = build.getProperties().asList() except: pass if build_properties: text.append("Build Properties:<br>") text.append("<ul>") for prop in build_properties: if str(prop[1]) != '': if prop[0] == "BBLAYERS" or prop[0] == "LOCALCONF": if prop[0] == 'BBLAYERS': text.append('<li>Contents of bblayers.conf (Note. LCONF_VERSION will not show up correctly)<br><code>') else: text.append('<li>Contents of auto.conf (local.conf for autobuilders)<br><code>') lines=prop[1].splitlines() for line in lines: text.append(line + "<br>") text.append('<br></code></li>') elif "trigger" not in prop[0] and "ss_" not in prop[0]: text.append('<li>' + prop[0] + " : " + str(prop[1])+"</li>") text.append('</ul>') url = "" for log in build.getLogs(): log_name = "%s.%s" % (log.getStep().getName(), log.getName()) log_status, dummy = log.getStep().getResults() log_body = [] for line in log.getText().splitlines(): # Note: can be VERY LARGE print line if "ERROR" in line or "|" in line: log_body.append(line) log_url = '%s/steps/%s/logs/%s' % (master_status.getURLForThing(build), log.getStep().getName(), log.getName()) if log_status == 2 and log_body: text.append('<i>Detailed log of last build step:</i> <a href="%s">%s</a>' % (log_url, log_url)) text.append('<br>') text.append('<h4>Last %d lines of "%s" Error log:</h4>' % (limit_lines, log_name)) text.append('<p>') text.append('<br>'.join([line for line in log_body[len(log_body)-limit_lines:]])) text.append('</p>') text.append('<br><br>') text.append('<b>-The Yocto BuildBot</b>') return {'body': "\n".join(text), 'type': 'html'}
def updateStats(self, log): """ Parse test results out of common test harnesses. Currently supported are: * Plone * Nose * Trial * Something mitchell wrote in Java """ stdio = log.getText() total = passed = skipped = fails = warnings = errors = 0 hastests = False # Plone? That has lines starting "Ran" and "Total". Total is missing if there is only a single layer. # For this reason, we total ourselves which lets us work even if someone runes 2 batches of plone tests # from a single target # Example:: # Ran 24 tests with 0 failures and 0 errors in 0.009 seconds if not hastests: outputs = re.findall( "Ran (?P<count>[\d]+) tests with (?P<fail>[\d]+) failures and (?P<error>[\d]+) errors", stdio) for output in outputs: total += int(output[0]) fails += int(output[1]) errors += int(output[2]) hastests = True # Twisted # Example:: # FAILED (errors=5, successes=11) # PASSED (successes=16) if not hastests: for line in stdio.split("\n"): if line.startswith("FAILED (") or line.startswith("PASSED ("): hastests = True line = line[8:][:-1] stats = line.split(", ") data = {} for stat in stats: k, v = stat.split("=") data[k] = int(v) if not "successes" in data: total = 0 for number in re.findall( "Ran (?P<count>[\d]+) tests in ", stdio): total += int(number) data["successes"] = total - sum(data.values()) # This matches Nose and Django output # Example:: # Ran 424 tests in 152.927s # FAILED (failures=1) # FAILED (errors=3) if not hastests: fails += len(re.findall('FAIL:', stdio)) errors += len( re.findall( '======================================================================\nERROR:', stdio)) for number in re.findall("Ran (?P<count>[\d]+)", stdio): total += int(number) hastests = True # We work out passed at the end because most test runners dont tell us # and we can't distinguish between different test systems easily so we # might double count. passed = total - (skipped + fails + errors + warnings) # Update the step statistics with out shiny new totals if hastests: self.step_status.setStatistic('total', total) self.step_status.setStatistic('fails', fails) self.step_status.setStatistic('errors', errors) self.step_status.setStatistic('warnings', warnings) self.step_status.setStatistic('skipped', skipped) self.step_status.setStatistic('passed', passed)