def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False): passes = {} flaky = {} regressions = {} def add_to_dict_of_lists(dict, key, value): dict.setdefault(key, []).append(value) def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions): actual = results['actual'].split(" ") expected = results['expected'].split(" ") if 'is_unexpected' not in results or not results['is_unexpected']: # Don't print anything for tests that ran as expected. return if actual == ['PASS']: if 'CRASH' in expected: add_to_dict_of_lists(passes, 'Expected to crash, but passed', test) elif 'TIMEOUT' in expected: add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test) else: add_to_dict_of_lists(passes, 'Expected to fail, but passed', test) elif enabled_pixel_tests_in_retry and actual == [ 'TEXT', 'IMAGE+TEXT' ]: add_to_dict_of_lists(regressions, actual[0], test) elif len(actual) > 1: # We group flaky tests by the first actual result we got. add_to_dict_of_lists(flaky, actual[0], test) else: add_to_dict_of_lists(regressions, results['actual'], test) resultsjsonparser.for_each_test(summarized_results['tests'], add_result) if len(passes) or len(flaky) or len(regressions): self._print("") if len(passes): for key, tests in passes.iteritems(): self._print("%s: (%d)" % (key, len(tests))) tests.sort() for test in tests: self._print(" %s" % test) self._print("") self._print("") if len(flaky): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in flaky.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests))) tests.sort() for test in tests: result = resultsjsonparser.result_for_test( summarized_results['tests'], test) actual = result['actual'].split(" ") expected = result['expected'].split(" ") result = TestExpectations.EXPECTATIONS[key.lower()] # FIXME: clean this up once the old syntax is gone new_expectations_list = [ TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected)) ] self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list))) self._print("") self._print("") if len(regressions): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in regressions.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests))) tests.sort() for test in tests: self._print(" %s [ %s ]" % (test, TestExpectationParser. _inverted_expectation_tokens[key])) self._print("") if len(summarized_results['tests']) and self.debug_logging: self._print("%s" % ("-" * 78))
def _print_unexpected_results(self, unexpected_results): # Prints to the buildbot stream passes = {} flaky = {} regressions = {} def add_to_dict_of_lists(dict, key, value): dict.setdefault(key, []).append(value) def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions): actual = results["actual"].split(" ") expected = results["expected"].split(" ") if actual == ["PASS"]: if "CRASH" in expected: add_to_dict_of_lists(passes, "Expected to crash, but passed", test) elif "TIMEOUT" in expected: add_to_dict_of_lists(passes, "Expected to timeout, but passed", test) else: add_to_dict_of_lists(passes, "Expected to fail, but passed", test) elif len(actual) > 1: # We group flaky tests by the first actual result we got. add_to_dict_of_lists(flaky, actual[0], test) else: add_to_dict_of_lists(regressions, results["actual"], test) resultsjsonparser.for_each_test(unexpected_results["tests"], add_result) if len(passes) or len(flaky) or len(regressions): self._print_for_bot("") if len(passes): for key, tests in passes.iteritems(): self._print_for_bot("%s: (%d)" % (key, len(tests))) tests.sort() for test in tests: self._print_for_bot(" %s" % test) self._print_for_bot("") self._print_for_bot("") if len(flaky): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in flaky.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result][0], len(tests))) tests.sort() for test in tests: result = resultsjsonparser.result_for_test(unexpected_results["tests"], test) actual = result["actual"].split(" ") expected = result["expected"].split(" ") result = TestExpectations.EXPECTATIONS[key.lower()] new_expectations_list = list(set(actual) | set(expected)) self._print_for_bot(" %s = %s" % (test, " ".join(new_expectations_list))) self._print_for_bot("") self._print_for_bot("") if len(regressions): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in regressions.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print_for_bot("Regressions: Unexpected %s : (%d)" % (descriptions[result][0], len(tests))) tests.sort() for test in tests: self._print_for_bot(" %s = %s" % (test, key)) self._print_for_bot("") if len(unexpected_results["tests"]) and self._options.debug_rwt_logging: self._print_for_bot("%s" % ("-" * 78))
def _print_unexpected_results(self, unexpected_results): # Prints to the buildbot stream passes = {} flaky = {} regressions = {} def add_to_dict_of_lists(dict, key, value): dict.setdefault(key, []).append(value) def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions): actual = results['actual'].split(" ") expected = results['expected'].split(" ") if actual == ['PASS']: if 'CRASH' in expected: add_to_dict_of_lists(passes, 'Expected to crash, but passed', test) elif 'TIMEOUT' in expected: add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test) else: add_to_dict_of_lists(passes, 'Expected to fail, but passed', test) elif len(actual) > 1: # We group flaky tests by the first actual result we got. add_to_dict_of_lists(flaky, actual[0], test) else: add_to_dict_of_lists(regressions, results['actual'], test) resultsjsonparser.for_each_test(unexpected_results['tests'], add_result) if len(passes) or len(flaky) or len(regressions): self._print_for_bot("") if len(passes): for key, tests in passes.iteritems(): self._print_for_bot("%s: (%d)" % (key, len(tests))) tests.sort() for test in tests: self._print_for_bot(" %s" % test) self._print_for_bot("") self._print_for_bot("") if len(flaky): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in flaky.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests))) tests.sort() for test in tests: result = resultsjsonparser.result_for_test(unexpected_results['tests'], test) actual = result['actual'].split(" ") expected = result['expected'].split(" ") result = TestExpectations.EXPECTATIONS[key.lower()] # FIXME: clean this up once the old syntax is gone new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))] self._print_for_bot(" %s [ %s ]" % (test, " ".join(new_expectations_list))) self._print_for_bot("") self._print_for_bot("") if len(regressions): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in regressions.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print_for_bot("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests))) tests.sort() for test in tests: self._print_for_bot(" %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key])) self._print_for_bot("") if len(unexpected_results['tests']) and self._options.debug_rwt_logging: self._print_for_bot("%s" % ("-" * 78))
def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False): passes = {} flaky = {} regressions = {} def add_to_dict_of_lists(dict, key, value): dict.setdefault(key, []).append(value) def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions): actual = results['actual'].split(" ") expected = results['expected'].split(" ") def is_expected(result): return (result in expected) or (result in ('AUDIO', 'TEXT', 'IMAGE+TEXT') and 'FAIL' in expected) if all(is_expected(actual_result) for actual_result in actual): # Don't print anything for tests that ran as expected. return if actual == ['PASS']: if 'CRASH' in expected: add_to_dict_of_lists(passes, 'Expected to crash, but passed', test) elif 'TIMEOUT' in expected: add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test) else: add_to_dict_of_lists(passes, 'Expected to fail, but passed', test) elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']: add_to_dict_of_lists(regressions, actual[0], test) elif len(actual) > 1: # We group flaky tests by the first actual result we got. add_to_dict_of_lists(flaky, actual[0], test) else: add_to_dict_of_lists(regressions, results['actual'], test) resultsjsonparser.for_each_test(summarized_results['tests'], add_result) if len(passes) or len(flaky) or len(regressions): self._print("") if len(passes): for key, tests in passes.iteritems(): self._print("%s: (%d)" % (key, len(tests))) tests.sort() for test in tests: self._print(" %s" % test) self._print("") self._print("") if len(flaky): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in flaky.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests))) tests.sort() for test in tests: result = resultsjsonparser.result_for_test(summarized_results['tests'], test) actual = result['actual'].split(" ") expected = result['expected'].split(" ") result = TestExpectations.EXPECTATIONS[key.lower()] # FIXME: clean this up once the old syntax is gone new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))] self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list))) self._print("") self._print("") if len(regressions): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in regressions.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests))) tests.sort() for test in tests: self._print(" %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key])) self._print("") if len(summarized_results['tests']) and self.debug_logging: self._print("%s" % ("-" * 78))
def print_unexpected_results(self, unexpected_results): """Prints a list of the unexpected results to the buildbot stream.""" if self.disabled('unexpected-results'): return passes = {} flaky = {} regressions = {} def add_to_dict_of_lists(dict, key, value): dict.setdefault(key, []).append(value) def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions): actual = results['actual'].split(" ") expected = results['expected'].split(" ") if actual == ['PASS']: if 'CRASH' in expected: add_to_dict_of_lists(passes, 'Expected to crash, but passed', test) elif 'TIMEOUT' in expected: add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test) else: add_to_dict_of_lists(passes, 'Expected to fail, but passed', test) elif len(actual) > 1: # We group flaky tests by the first actual result we got. add_to_dict_of_lists(flaky, actual[0], test) else: add_to_dict_of_lists(regressions, results['actual'], test) resultsjsonparser.for_each_test(unexpected_results['tests'], add_result) if len(passes) or len(flaky) or len(regressions): self._buildbot_stream.write("\n") if len(passes): for key, tests in passes.iteritems(): self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests))) tests.sort() for test in tests: self._buildbot_stream.write(" %s\n" % test) self._buildbot_stream.write("\n") self._buildbot_stream.write("\n") if len(flaky): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in flaky.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n" % (descriptions[result][1], len(tests))) tests.sort() for test in tests: result = resultsjsonparser.result_for_test(unexpected_results['tests'], test) actual = result['actual'].split(" ") expected = result['expected'].split(" ") result = TestExpectations.EXPECTATIONS[key.lower()] new_expectations_list = list(set(actual) | set(expected)) self._buildbot_stream.write(" %s = %s\n" % (test, " ".join(new_expectations_list))) self._buildbot_stream.write("\n") self._buildbot_stream.write("\n") if len(regressions): descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS for key, tests in regressions.iteritems(): result = TestExpectations.EXPECTATIONS[key.lower()] self._buildbot_stream.write( "Regressions: Unexpected %s : (%d)\n" % ( descriptions[result][1], len(tests))) tests.sort() for test in tests: self._buildbot_stream.write(" %s = %s\n" % (test, key)) self._buildbot_stream.write("\n") self._buildbot_stream.write("\n") if len(unexpected_results['tests']) and self._options.verbose: self._buildbot_stream.write("%s\n" % ("-" * 78))