예제 #1
0
def _gather_unexpected_results(port):
    """Returns the unexpected results from the previous run, if any."""
    filesystem = port._filesystem
    results_directory = port.results_directory()
    options = port._options
    last_unexpected_results = []
    if options.print_last_failures or options.retest_last_failures:
        unexpected_results_filename = filesystem.join(results_directory, "unexpected_results.json")
        if filesystem.exists(unexpected_results_filename):
            results = json_results_generator.load_json(filesystem, unexpected_results_filename)
            resultsjsonparser.for_each_test(results['tests'], lambda test, result: last_unexpected_results.append(test))
    return last_unexpected_results
예제 #2
0
    def _gather_baselines(self, results_json):
        # Rebaseline server and it's associated JavaScript expected the tests subtree to
        # be key-value pairs instead of hierarchical.
        # FIXME: make the rebaseline server use the hierarchical tree.
        new_tests_subtree = {}

        def gather_baselines_for_test(test_name, result_dict):
            result = JSONTestResult(test_name, result_dict)
            if result.did_pass_or_run_as_expected():
                return
            result_dict['state'] = STATE_NEEDS_REBASELINE
            result_dict['baselines'] = get_test_baselines(test_name, self._test_config)
            new_tests_subtree[test_name] = result_dict

        for_each_test(results_json['tests'], gather_baselines_for_test)
        results_json['tests'] = new_tests_subtree
예제 #3
0
    def _gather_baselines(self, results_json):
        # Rebaseline server and it's associated JavaScript expected the tests subtree to
        # be key-value pairs instead of hierarchical.
        # FIXME: make the rebaseline server use the hierarchical tree.
        new_tests_subtree = {}

        def gather_baselines_for_test(test_name, result_dict):
            result = JSONTestResult(test_name, result_dict)
            if result.did_pass_or_run_as_expected():
                return
            result_dict["state"] = STATE_NEEDS_REBASELINE
            result_dict["baselines"] = get_test_baselines(test_name, self._test_config)
            new_tests_subtree[test_name] = result_dict

        for_each_test(results_json["tests"], gather_baselines_for_test)
        results_json["tests"] = new_tests_subtree
예제 #4
0
    def print_unexpected_results(self,
                                 summarized_results,
                                 enabled_pixel_tests_in_retry=False):
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test,
                       results,
                       passes=passes,
                       flaky=flaky,
                       regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")

            if 'is_unexpected' not in results or not results['is_unexpected']:
                # Don't print anything for tests that ran as expected.
                return

            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed', test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                         test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed', test)
            elif enabled_pixel_tests_in_retry and actual == [
                    'TEXT', 'IMAGE+TEXT'
            ]:
                add_to_dict_of_lists(regressions, actual[0], test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(summarized_results['tests'],
                                        add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s" % test)
                self._print("")
            self._print("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Unexpected flakiness: %s (%d)" %
                            (descriptions[result], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(
                        summarized_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    # FIXME: clean this up once the old syntax is gone
                    new_expectations_list = [
                        TestExpectationParser._inverted_expectation_tokens[exp]
                        for exp in list(set(actual) | set(expected))
                    ]
                    self._print("  %s [ %s ]" %
                                (test, " ".join(new_expectations_list)))
                self._print("")
            self._print("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Regressions: Unexpected %s (%d)" %
                            (descriptions[result], len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s [ %s ]" %
                                (test, TestExpectationParser.
                                 _inverted_expectation_tokens[key]))
                self._print("")

        if len(summarized_results['tests']) and self.debug_logging:
            self._print("%s" % ("-" * 78))
예제 #5
0
    def _print_unexpected_results(self, unexpected_results):
        # Prints to the buildbot stream
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results["actual"].split(" ")
            expected = results["expected"].split(" ")
            if actual == ["PASS"]:
                if "CRASH" in expected:
                    add_to_dict_of_lists(passes, "Expected to crash, but passed", test)
                elif "TIMEOUT" in expected:
                    add_to_dict_of_lists(passes, "Expected to timeout, but passed", test)
                else:
                    add_to_dict_of_lists(passes, "Expected to fail, but passed", test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results["actual"], test)

        resultsjsonparser.for_each_test(unexpected_results["tests"], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print_for_bot("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print_for_bot("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s" % test)
                self._print_for_bot("")
            self._print_for_bot("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result][0], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results["tests"], test)
                    actual = result["actual"].split(" ")
                    expected = result["expected"].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._print_for_bot("  %s = %s" % (test, " ".join(new_expectations_list)))
                self._print_for_bot("")
            self._print_for_bot("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Regressions: Unexpected %s : (%d)" % (descriptions[result][0], len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s = %s" % (test, key))
                self._print_for_bot("")

        if len(unexpected_results["tests"]) and self._options.debug_rwt_logging:
            self._print_for_bot("%s" % ("-" * 78))
예제 #6
0
파일: printing.py 프로젝트: kcomkar/webkit
    def _print_unexpected_results(self, unexpected_results):
        # Prints to the buildbot stream
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")
            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
                else:
                    add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print_for_bot("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print_for_bot("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s" % test)
                self._print_for_bot("")
            self._print_for_bot("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    # FIXME: clean this up once the old syntax is gone
                    new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
                    self._print_for_bot("  %s [ %s ]" % (test, " ".join(new_expectations_list)))
                self._print_for_bot("")
            self._print_for_bot("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
                self._print_for_bot("")

        if len(unexpected_results['tests']) and self._options.debug_rwt_logging:
            self._print_for_bot("%s" % ("-" * 78))
예제 #7
0
    def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")

            def is_expected(result):
                return (result in expected) or (result in ('AUDIO', 'TEXT', 'IMAGE+TEXT') and 'FAIL' in expected)

            if all(is_expected(actual_result) for actual_result in actual):
                # Don't print anything for tests that ran as expected.
                return

            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
                else:
                    add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
            elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
                add_to_dict_of_lists(regressions, actual[0], test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(summarized_results['tests'], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s" % test)
                self._print("")
            self._print("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(summarized_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    # FIXME: clean this up once the old syntax is gone
                    new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
                    self._print("  %s [ %s ]" % (test, " ".join(new_expectations_list)))
                self._print("")
            self._print("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
                self._print("")

        if len(summarized_results['tests']) and self.debug_logging:
            self._print("%s" % ("-" * 78))
예제 #8
0
    def print_unexpected_results(self, unexpected_results):
        """Prints a list of the unexpected results to the buildbot stream."""
        if self.disabled('unexpected-results'):
            return

        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")
            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed',
                                         test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                          test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed',
                                         test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._buildbot_stream.write("\n")

        if len(passes):
            for key, tests in passes.iteritems():
                self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s\n" % test)
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
                    % (descriptions[result][1], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._buildbot_stream.write("  %s = %s\n" %
                        (test, " ".join(new_expectations_list)))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write(
                    "Regressions: Unexpected %s : (%d)\n" % (
                    descriptions[result][1], len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s = %s\n" % (test, key))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(unexpected_results['tests']) and self._options.verbose:
            self._buildbot_stream.write("%s\n" % ("-" * 78))
예제 #9
0
    def print_unexpected_results(self, unexpected_results):
        """Prints a list of the unexpected results to the buildbot stream."""
        if self.disabled('unexpected-results'):
            return

        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")
            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed',
                                         test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                          test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed',
                                         test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._buildbot_stream.write("\n")

        if len(passes):
            for key, tests in passes.iteritems():
                self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s\n" % test)
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
                    % (descriptions[result][1], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._buildbot_stream.write("  %s = %s\n" %
                        (test, " ".join(new_expectations_list)))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._buildbot_stream.write(
                    "Regressions: Unexpected %s : (%d)\n" % (
                    descriptions[result][1], len(tests)))
                tests.sort()
                for test in tests:
                    self._buildbot_stream.write("  %s = %s\n" % (test, key))
                self._buildbot_stream.write("\n")
            self._buildbot_stream.write("\n")

        if len(unexpected_results['tests']) and self._options.verbose:
            self._buildbot_stream.write("%s\n" % ("-" * 78))
예제 #10
0
    def execute(self, options, args, tool):
        results_directory = args[0]
        filesystem = system.filesystem.FileSystem()
        scm = self._tool.scm()

        if options.dry_run:

            def no_op_copyfile(src, dest):
                pass

            def no_op_add(path, return_exit_code=False):
                if return_exit_code:
                    return 0

            filesystem.copyfile = no_op_copyfile
            scm.add = no_op_add

        print 'Parsing unexpected_results.json...'
        results_json_path = filesystem.join(results_directory, 'unexpected_results.json')
        results_json = json_results_generator.load_json(filesystem, results_json_path)

        port = factory.get()
        layout_tests_directory = port.layout_tests_dir()
        platforms = filesystem.listdir(
            filesystem.join(layout_tests_directory, 'platform'))
        test_config = TestConfig(
            port,
            layout_tests_directory,
            results_directory,
            platforms,
            filesystem,
            scm)

        print 'Gathering current baselines...'
        # Rebaseline server and it's associated JavaScript expected the tests subtree to
        # be key-value pairs instead of hierarchical.
        # FIXME: make the rebaseline server use the hierarchical tree.
        new_tests_subtree = {}

        def gather_baselines(test, result):
            result['state'] = STATE_NEEDS_REBASELINE
            result['baselines'] = _get_test_baselines(test, test_config)
            new_tests_subtree[test] = result

        resultsjsonparser.for_each_test(results_json['tests'], gather_baselines)
        results_json['tests'] = new_tests_subtree

        server_url = "http://localhost:%d/" % options.httpd_port
        print "Starting server at %s" % server_url
        print ("Use the 'Exit' link in the UI, %squitquitquit "
            "or Ctrl-C to stop") % server_url

        threading.Timer(
            .1, lambda: self._tool.user.open_url(server_url)).start()

        httpd = RebaselineHTTPServer(
            httpd_port=options.httpd_port,
            test_config=test_config,
            results_json=results_json,
            platforms_json={
                'platforms': platforms,
                'defaultPlatform': port.name(),
            })
        httpd.serve_forever()