def evaluateCommand(self, cmd): superResult = self.super_class.evaluateCommand(self, cmd) # When a unittest fails we mark it orange, indicating with the # WARNINGS status. Therefore, FAILURE needs to become WARNINGS # However, we don't want to override EXCEPTION or RETRY, so we still # need to use worst_status in further status decisions. if superResult == FAILURE: superResult = WARNINGS if superResult != SUCCESS: return worst_status(superResult, WARNINGS) failIdent = r"^\d+ INFO Failed: 0" # Support browser-chrome result summary format which differs # from MozillaMochitest's. if self.name == 'mochitest-browser-chrome': failIdent = r"^\tFailed: 0" # Assume that having the 'failIdent' line # means the tests run completed (successfully). # Also check for "^TEST-UNEXPECTED-" for harness errors. if not re.search(failIdent, cmd.logs["stdio"].getText(), re.MULTILINE) or \ re.search("^TEST-UNEXPECTED-", cmd.logs["stdio"].getText(), re.MULTILINE): return worst_status(superResult, WARNINGS) return worst_status(superResult, SUCCESS)
def evaluateCommand(self, cmd): superResult = self.super_class.evaluateCommand(self, cmd) # When a unittest fails we mark it orange, indicating with the # WARNINGS status. Therefore, FAILURE needs to become WARNINGS # However, we don't want to override EXCEPTION or RETRY, so we still # need to use worst_status in further status decisions. if superResult == FAILURE: superResult = WARNINGS if superResult != SUCCESS: return worst_status(superResult, WARNINGS) # Xpcshell tests (only): # Assume that having the "Failed: 0" line # means the tests run completed (successfully). if 'xpcshell' in self.name and \ not re.search(r"^INFO \| Failed: 0", cmd.logs["stdio"].getText(), re.MULTILINE): return worst_status(superResult, WARNINGS) # Also check for "^TEST-UNEXPECTED-" for harness errors. if re.search("^TEST-UNEXPECTED-", cmd.logs["stdio"].getText(), re.MULTILINE): return worst_status(superResult, WARNINGS) return worst_status(superResult, SUCCESS)
def evaluateCommand(self, cmd): superResult = self.super_class.evaluateCommand(self, cmd) if superResult == FAILURE: superResult = WARNINGS # If we find "TEST-UNEXPECTED", then return WARNINGS if "TEST-UNEXPECTED" in cmd.logs['stdio'].getText(): return worst_status(superResult, WARNINGS) return worst_status(superResult, SUCCESS)
def evaluateCommand(self, cmd): superResult = self.super_class.evaluateCommand(self, cmd) try: leaks = self.getProperty('leaks') except: log.msg("Could not find build property: leaks") return worst_status(superResult, FAILURE) if leaks and int(leaks) > 0: return worst_status(superResult, WARNINGS) return superResult
def evaluateCommand(self, cmd): worst = self.super_class.evaluateCommand(self, cmd) try: output = cmd.logs['stdio'].getText().strip() if output: self.setProperty(self.property_name, output) worst = worst_status(worst, SUCCESS) else: worst = worst_status(worst, FAILURE) except: pass return worst
def evaluateCommand(self, cmd): superResult = self.super_class.evaluateCommand(self, cmd) # When a unittest fails we mark it orange, indicating with the # WARNINGS status. Therefore, FAILURE needs to become WARNINGS # However, we don't want to override EXCEPTION or RETRY, so we still # need to use worst_status in further status decisions. if superResult == FAILURE: superResult = WARNINGS if superResult != SUCCESS: return worst_status(superResult, WARNINGS) return worst_status(superResult, SUCCESS)
def evaluateCommand(self, cmd): superResult = self.my_shellcommand.evaluateCommand(self, cmd) for line in cmd.logs["stdio"].readlines(channel=HEADER): if "command timed out" in line: self.addCompleteLog('timeout', "TinderboxPrint: " + self.name + "<br/>" + emphasizeFailureText("timeout") + "\n") # We don't need to print a second error if we timed out return worst_status(superResult, WARNINGS) if cmd.rc != 0: self.addCompleteLog('error', 'Unknown Error: command finished with exit code: %d' % cmd.rc) return worst_status(superResult, WARNINGS) return superResult
def evaluateCommand(self, cmd): superResult = self.super_class.evaluateCommand(self, cmd) try: leakStats = self.getProperty('leakStats') except: log.msg("Could not find build property: leakStats") return worst_status(superResult, FAILURE) return superResult
def evaluateCommand(self, cmd): lbs_status = obj.evaluateCommand(self, cmd) # If we don't have a custom log evalution function, run through # some global checks if self.log_eval_func is None: regex_status = regex_log_evaluator(cmd, self.step_status, global_errors) return worst_status(lbs_status, regex_status) return lbs_status
def evaluateReftest(log, superResult): # When a unittest fails we mark it orange, indicating with the # WARNINGS status. Therefore, FAILURE needs to become WARNINGS # However, we don't want to override EXCEPTION or RETRY, so we still # need to use worst_status in further status decisions. if superResult == FAILURE: superResult = WARNINGS if superResult != SUCCESS: return superResult # Assume that having the "Unexpected: 0" line # means the tests run completed (successfully). # Also check for "^TEST-UNEXPECTED-" for harness errors. if not re.search(r"^REFTEST INFO \| Unexpected: 0 \(", log, re.MULTILINE) or \ re.search("^TEST-UNEXPECTED-", log, re.MULTILINE): return worst_status(superResult, WARNINGS) return worst_status(superResult, SUCCESS)
def evaluateCommand(self, cmd): superResult = self.super_class.evaluateCommand(self, cmd) # When a unittest fails we mark it orange, indicating with the # WARNINGS status. Therefore, FAILURE needs to become WARNINGS # However, we don't want to override EXCEPTION or RETRY, so we still # need to use worst_status in further status decisions. if superResult == FAILURE: superResult = WARNINGS if superResult != SUCCESS: return superResult # Assume that having the "Failed: 0" line # means the tests run completed (successfully). # Also check for "^TEST-UNEXPECTED-" for harness errors. if not re.search(r"^INFO \| Failed: 0", cmd.logs["stdio"].getText(), re.MULTILINE) or \ re.search("^TEST-UNEXPECTED-", cmd.logs["stdio"].getText(), re.MULTILINE): return worst_status(superResult, WARNINGS) return worst_status(superResult, SUCCESS)
def regex_log_evaluator(cmd, step_status, regexes): worst = SUCCESS for err, possible_status in regexes: # worst_status returns the worse of the two status' passed to it. # we won't be changing "worst" unless possible_status is worse than it, # so we don't even need to check the log if that's the case if worst_status(worst, possible_status) == possible_status: if isinstance(err, (basestring)): err = re.compile(".*%s.*" % err, re.DOTALL) for l in cmd.logs.values(): if err.search(l.getText()): worst = possible_status return worst
def evaluateRemoteMochitest(name, log, superResult): # When a unittest fails we mark it orange, indicating with the # WARNINGS status. Therefore, FAILURE needs to become WARNINGS # However, we don't want to override EXCEPTION or RETRY, so we still # need to use worst_status in further status decisions. if superResult == FAILURE: superResult = WARNINGS if superResult != SUCCESS: return superResult failIdent = r"^\d+ INFO Failed:\s+0" # Support browser-chrome result summary format which differs from # MozillaMochitest's. if 'browser-chrome' in name: failIdent = r"^\tFailed:\s+0" # Assume that having the 'failIdent' line # means the tests run completed (successfully). # Also check for "^TEST-UNEXPECTED-" for harness errors. if not re.search(failIdent, log, re.MULTILINE) or \ re.search("^TEST-UNEXPECTED-", log, re.MULTILINE): return worst_status(superResult, WARNINGS) return worst_status(superResult, SUCCESS)
def stepDone(self, result, step): """This method is called when the BuildStep completes. It is passed a status object from the BuildStep and is responsible for merging the Step's results into those of the overall Build.""" terminate = False text = None if type(result) == types.TupleType: result, text = result assert type(result) == type(SUCCESS) log.msg(" step '%s' complete: %s" % (step.name, Results[result])) self.results.append(result) if text: self.text.extend(text) if not self.remote: terminate = True possible_overall_result = result if result == FAILURE: if not step.flunkOnFailure: possible_overall_result = SUCCESS if step.warnOnFailure: possible_overall_result = WARNINGS if step.flunkOnFailure: possible_overall_result = FAILURE if step.haltOnFailure: terminate = True elif result == WARNINGS: if not step.warnOnWarnings: possible_overall_result = SUCCESS else: possible_overall_result = WARNINGS if step.flunkOnWarnings: possible_overall_result = FAILURE elif result in (EXCEPTION, RETRY): terminate = True # if we skipped this step, then don't adjust the build status if result != SKIPPED: self.result = worst_status(self.result, possible_overall_result) return terminate
def evaluateCommand(self, cmd): worst = self.super_class.evaluateCommand(self, cmd) for line in cmd.logs['stdio'].getText().split("\n"): if line.startswith('FAIL'): worst = worst_status(worst, FAILURE) return worst
def evaluateCommand(self, cmd): result = self.super_class.evaluateCommand(self, cmd) if None != re.search('File exists', cmd.logs['stdio'].getText()): result = worst_status(result, SUCCESS) return result