Ejemplo n.º 1
0
    def testHasNewFailures(self):
        files = self.GetTestFiles()
        failures = self.GetTestFailures()

        # no changes, no new failures
        cf = compare_failures.CompareFailures(files, failures, set(), set(),
                                              self.GetTmpDir(), False)
        self.failUnless(not cf.HasNewFailures())

        # test goes from passing to failing
        pass_file = os.path.join(path_utils.LayoutTestsDir(), 'fast',
                                 'pass1.html')
        failures[pass_file] = [test_failures.FailureTextMismatch(None)]
        cf = compare_failures.CompareFailures(files, failures, set(), set(),
                                              self.GetTmpDir(), False)
        self.failUnless(cf.HasNewFailures())

        # Failing to passing
        failures = self.GetTestFailures()
        failure_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'bar',
                                    'fail2.html')
        del failures[failure_file]
        cf = compare_failures.CompareFailures(files, failures, set(), set(),
                                              self.GetTmpDir(), False)
        self.failUnless(not cf.HasNewFailures())

        # A new test that fails, this doesn't count as a new failure.
        new_test_file = os.path.join(path_utils.LayoutTestsDir(),
                                     "new-test.html")
        files.add(new_test_file)
        failures = self.GetTestFailures()
        failures[new_test_file] = [test_failures.FailureCrash()]
        cf = compare_failures.CompareFailures(files, failures, set(), set(),
                                              self.GetTmpDir(), False)
        self.failUnless(not cf.HasNewFailures())
Ejemplo n.º 2
0
    def testFailureToCrash(self):
        """When there's a new crash, we don't add it to the baseline or remove it
    from the failure list."""
        failures = self.GetTestFailures()

        # Test case where we don't update new baseline.  A failure moving to a
        # crash shouldn't be added to the expected-crashes.txt file.
        failure_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'foo',
                                    'fail1.html')
        failures[failure_file] = [test_failures.FailureCrash()]
        self.CheckNoChanges(failures)
Ejemplo n.º 3
0
    def GetTestFailures(self):
        """Get a dictionary representing the crashes and failures in the
    expected-*.txt files."""
        failures = {}
        for filename in self.GetTestFiles():
            if filename.find('crash') != -1:
                failures[filename] = [test_failures.FailureCrash()]
            elif filename.find('fail') != -1:
                failures[filename] = [test_failures.FailureTextMismatch(None)]

        return failures
Ejemplo n.º 4
0
    def testPassingToFailure(self):
        """When there's a new failure, we don't add it to the baseline."""
        failures = self.GetTestFailures()

        # Test case where we don't update new baseline.  We have a new failure,
        # but it shouldn't be added to the expected-failures.txt file.
        pass_file = os.path.join(path_utils.LayoutTestsDir(), 'fast',
                                 'pass1.html')
        failures[pass_file] = [test_failures.FailureTextMismatch(None)]
        self.CheckNoChanges(failures)

        # Same thing as before: pass -> crash
        failures[pass_file] = [test_failures.FailureCrash()]
        self.CheckNoChanges(failures)
Ejemplo n.º 5
0
    def testNewTestCrash(self):
        """After a merge, we need to update new crashing tests properly."""
        files = self.GetTestFiles()
        new_test_file = os.path.join(path_utils.LayoutTestsDir(),
                                     "new-test.html")
        files.add(new_test_file)
        failures = self.GetTestFailures()

        # New test file crashing
        failures[new_test_file] = [test_failures.FailureCrash()]
        cf = compare_failures.CompareFailures(files, failures, set(), set(),
                                              self.GetTmpDir(), False)
        cf.UpdateFailuresOnDisk()
        self.CheckOutputWithExpectedFiles('expected-passing.txt',
                                          'expected-failures.txt',
                                          'expected-crashes-new-test.txt')
def process_output(port, test_info, test_types, test_args, configuration,
                   output_dir, crash, timeout, test_run_time, actual_checksum,
                   output, error):
    """Receives the output from a DumpRenderTree process, subjects it to a
    number of tests, and returns a list of failure types the test produced.

    Args:
      port: port-specific hooks
      proc: an active DumpRenderTree process
      test_info: Object containing the test filename, uri and timeout
      test_types: list of test types to subject the output to
      test_args: arguments to be passed to each test
      configuration: Debug or Release
      output_dir: directory to put crash stack traces into

    Returns: a TestResult object
    """
    failures = []

    # Some test args, such as the image hash, may be added or changed on a
    # test-by-test basis.
    local_test_args = copy.copy(test_args)

    local_test_args.hash = actual_checksum

    if crash:
        failures.append(test_failures.FailureCrash())
    if timeout:
        failures.append(test_failures.FailureTimeout())

    if crash:
        _log.debug("Stacktrace for %s:\n%s" % (test_info.filename, error))
        # Strip off "file://" since RelativeTestFilename expects
        # filesystem paths.
        filename = os.path.join(
            output_dir, port.relative_test_filename(test_info.filename))
        filename = os.path.splitext(filename)[0] + "-stack.txt"
        port.maybe_make_directory(os.path.split(filename)[0])
        with codecs.open(filename, "wb", "utf-8") as file:
            file.write(error)
    elif error:
        _log.debug("Previous test output stderr lines:\n%s" % error)

    # Check the output and save the results.
    start_time = time.time()
    time_for_diffs = {}
    for test_type in test_types:
        start_diff_time = time.time()
        new_failures = test_type.compare_output(port, test_info.filename,
                                                output, local_test_args,
                                                configuration)
        # Don't add any more failures if we already have a crash, so we don't
        # double-report those tests. We do double-report for timeouts since
        # we still want to see the text and image output.
        if not crash:
            failures.extend(new_failures)
        time_for_diffs[test_type.__class__.__name__] = (time.time() -
                                                        start_diff_time)

    total_time_for_all_diffs = time.time() - start_diff_time
    return TestResult(test_info.filename, failures, test_run_time,
                      total_time_for_all_diffs, time_for_diffs)