Exemplo n.º 1
0
def GetResultsMap(observer):
  """Returns a map of TestResults."""

  test_results_map = dict()
  for test in observer.FailedTests(include_fails=True, include_flaky=True):
    test_results_map[canonical_name(test)] = TestResult(test, failed=True)
  for test in observer.PassedTests():
    test_results_map[canonical_name(test)] = TestResult(test, failed=False)

  return test_results_map
Exemplo n.º 2
0
def GetResultsMap(observer):
    """Returns a map of TestResults."""

    test_results_map = dict()
    for test in observer.FailedTests(include_fails=True, include_flaky=True):
        test_results_map[canonical_name(test)] = TestResult(test, failed=True)
    for test in observer.PassedTests():
        test_results_map[canonical_name(test)] = TestResult(test, failed=False)

    return test_results_map
Exemplo n.º 3
0
def GetResultsMapFromXML(results_xml):
    """Parse the given results XML file and returns a map of TestResults."""

    results_xml_file = None
    try:
        results_xml_file = open(results_xml)
    except IOError:
        logging.error('Cannot open file %s', results_xml)
        return dict()
    node = minidom.parse(results_xml_file).documentElement
    results_xml_file.close()

    test_results_map = dict()
    testcases = node.getElementsByTagName('testcase')

    for testcase in testcases:
        name = testcase.getAttribute('name')
        classname = testcase.getAttribute('classname')
        test_name = '%s.%s' % (classname, name)

        failures = testcase.getElementsByTagName('failure')
        not_run = testcase.getAttribute('status') == 'notrun'
        elapsed = float(testcase.getAttribute('time'))
        result = TestResult(test_name,
                            failed=bool(failures),
                            not_run=not_run,
                            elapsed_time=elapsed)
        test_results_map[canonical_name(test_name)] = result
    return test_results_map
Exemplo n.º 4
0
def GetResultsMapFromXML(results_xml):
  """Parse the given results XML file and returns a map of TestResults."""

  results_xml_file = None
  try:
    results_xml_file = open(results_xml)
  except IOError:
    logging.error('Cannot open file %s', results_xml)
    return dict()
  node = minidom.parse(results_xml_file).documentElement
  results_xml_file.close()

  test_results_map = dict()
  testcases = node.getElementsByTagName('testcase')

  for testcase in testcases:
    name = testcase.getAttribute('name')
    classname = testcase.getAttribute('classname')
    test_name = '%s.%s' % (classname, name)

    failures = testcase.getElementsByTagName('failure')
    not_run = testcase.getAttribute('status') == 'notrun'
    elapsed = float(testcase.getAttribute('time'))
    result = TestResult(test_name,
                        failed=bool(failures),
                        not_run=not_run,
                        elapsed_time=elapsed)
    test_results_map[canonical_name(test_name)] = [result]

  return test_results_map
Exemplo n.º 5
0
    def _test_json_generation(self,
                              passed_tests_list,
                              failed_tests_list,
                              expected_test_list=None):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        get_test_set = lambda ts, label: set(
            [t for t in ts if t.startswith(label)])
        DISABLED_tests = get_test_set(tests_set, 'DISABLED_')
        FLAKY_tests = get_test_set(tests_set, 'FLAKY_')
        MAYBE_tests = get_test_set(tests_set, 'MAYBE_')
        FAILS_tests = get_test_set(tests_set, 'FAILS_')
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests
                                  | MAYBE_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0

        test_results_map = dict()
        for test in tests_set:
            test_name = canonical_name(test)
            test_timings[test_name] = float(self._num_runs * 100 + i)
            i += 1
            test_results_map[test_name] = TestResult(
                test,
                failed=(test in failed_tests),
                elapsed_time=test_timings[test_name])

        # Do not write to an actual file.
        mock_writer = lambda path, data: True

        generator = JSONResultsGenerator(
            self.builder_name,
            self.build_name,
            self.build_number,
            '',
            None,  # don't fetch past json results archive
            test_results_map,
            svn_repositories=[('webkit', '.')],
            file_writer=mock_writer)

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json()
        self._verify_json_results(tests_set, test_timings, failed_count_map,
                                  len(PASS_tests), len(DISABLED_tests),
                                  len(FLAKY_tests),
                                  len(DISABLED_tests | failed_tests),
                                  incremental_json, 1, expected_test_list)

        # We don't verify the results here, but at least we make sure the code
        # runs without errors.
        generator.generate_json_output()
        generator.generate_times_ms_file()
  def _test_json_generation(self, passed_tests_list, failed_tests_list, expected_test_list=None):
    tests_set = set(passed_tests_list) | set(failed_tests_list)

    get_test_set = lambda ts, label: set([t for t in ts if t.startswith(label)])
    DISABLED_tests = get_test_set(tests_set, 'DISABLED_')
    FLAKY_tests = get_test_set(tests_set, 'FLAKY_')
    MAYBE_tests = get_test_set(tests_set, 'MAYBE_')
    FAILS_tests = get_test_set(tests_set, 'FAILS_')
    PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests |
        MAYBE_tests)

    failed_tests = set(failed_tests_list) - DISABLED_tests
    failed_count_map = dict([(t, 1) for t in failed_tests])

    test_timings = {}
    i = 0

    test_results_map = dict()
    for test in tests_set:
      test_name = canonical_name(test)
      test_timings[test_name] = float(self._num_runs * 100 + i)
      i += 1
      test_results_map[test_name] = TestResult(test,
        failed=(test in failed_tests),
        elapsed_time=test_timings[test_name])

    # Do not write to an actual file.
    mock_writer = lambda path, data: True

    generator = JSONResultsGenerator(
      self.builder_name, self.build_name, self.build_number,
      '',
      None,   # don't fetch past json results archive
      test_results_map,
      svn_revisions=[('blink', '.')],
      file_writer=mock_writer)

    failed_count_map = dict([(t, 1) for t in failed_tests])

    # Test incremental json results
    incremental_json = generator.get_json()
    self._verify_json_results(
        tests_set,
        test_timings,
        failed_count_map,
        len(PASS_tests),
        len(DISABLED_tests),
        len(FLAKY_tests),
        len(DISABLED_tests | failed_tests),
        incremental_json,
        1,
        expected_test_list)

    # We don't verify the results here, but at least we make sure the code
    # runs without errors.
    generator.generate_json_output()
    generator.generate_times_ms_file()
Exemplo n.º 7
0
    def _verify_json_results(self, tests_set, test_timings, failed_count_map,
                             PASS_count, DISABLED_count, FLAKY_count,
                             fixable_count, json, num_runs,
                             expected_test_list):
        # Aliasing to a short name for better access to its constants.
        JRG = JSONResultsGenerator

        self.assertTrue(JRG.VERSION_KEY in json)
        self.assertTrue(self.builder_name in json)

        buildinfo = json[self.builder_name]
        self.assertTrue(JRG.FIXABLE in buildinfo)
        self.assertTrue(JRG.TESTS in buildinfo)
        self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
        self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)

        if tests_set or DISABLED_count:
            fixable = {
                JRG.PASS_RESULT: 0,
                JRG.SKIP_RESULT: 0,
                JRG.FLAKY_RESULT: 0
            }
            for fixable_items in buildinfo[JRG.FIXABLE]:
                for (test_type, count) in fixable_items.iteritems():
                    if test_type in fixable:
                        fixable[test_type] = fixable[test_type] + count
                    else:
                        fixable[test_type] = count

            self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
            self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
            self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)

        if failed_count_map:
            tests = buildinfo[JRG.TESTS]
            for test_name in failed_count_map.iterkeys():
                canonical = canonical_name(test_name)
                if expected_test_list:
                    self.assertTrue(canonical in expected_test_list)
                test = self._find_test_in_trie(canonical, tests)

                failed = 0
                for result in test[JRG.RESULTS]:
                    if result[1] == JRG.FAIL_RESULT:
                        failed += result[0]
                self.assertEqual(failed_count_map[test_name], failed)

                timing_count = 0
                for timings in test[JRG.TIMES]:
                    if timings[1] == test_timings[canonical]:
                        timing_count = timings[0]
                self.assertEqual(1, timing_count)

        if fixable_count:
            self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
Exemplo n.º 8
0
  def GetResultsMap(self):
    """Returns a map of TestResults."""

    if self._current_test:
      self._failed_tests.add(self._current_test)

    test_results_map = dict()
    for test in self._failed_tests:
      test_results_map[canonical_name(test)] = [TestResult(test, failed=True)]

    return test_results_map
Exemplo n.º 9
0
def GetResultsMap(observer):
  """Returns a map of TestResults.  Returns an empty map if no current test
  has been recorded."""

  if not observer.GetCurrentTest():
    return dict()

  test_results_map = dict()
  for test in observer.FailedTests(include_fails=True, include_flaky=True):
    test_results_map[canonical_name(test)] = TestResult(test, failed=True)

  return test_results_map
Exemplo n.º 10
0
    def GetResultsMap(self):
        """Returns a map of TestResults."""

        if self._current_test:
            self._failed_tests.add(self._current_test)

        test_results_map = dict()
        for test in self._failed_tests:
            test_results_map[canonical_name(test)] = TestResult(test,
                                                                failed=True)

        return test_results_map
Exemplo n.º 11
0
def GetResultsMap(observer):
    """Returns a map of TestResults.  Returns an empty map if no current test
  has been recorded."""

    if not observer.GetCurrentTest():
        return dict()

    test_results_map = dict()
    for test in observer.FailedTests(include_fails=True, include_flaky=True):
        test_results_map[canonical_name(test)] = TestResult(test, failed=True)

    return test_results_map
  def _verify_json_results(self, tests_set, test_timings, failed_count_map,
                           PASS_count, DISABLED_count, FLAKY_count,
                           fixable_count,
                           json, num_runs, expected_test_list):
    # Aliasing to a short name for better access to its constants.
    JRG = JSONResultsGenerator

    self.assertTrue(JRG.VERSION_KEY in json)
    self.assertTrue(self.builder_name in json)

    buildinfo = json[self.builder_name]
    self.assertTrue(JRG.FIXABLE in buildinfo)
    self.assertTrue(JRG.TESTS in buildinfo)
    self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
    self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)

    if tests_set or DISABLED_count:
      fixable = {JRG.PASS_RESULT:0, JRG.SKIP_RESULT:0, JRG.FLAKY_RESULT:0}
      for fixable_items in buildinfo[JRG.FIXABLE]:
        for (test_type, count) in fixable_items.iteritems():
          if test_type in fixable:
            fixable[test_type] = fixable[test_type] + count
          else:
            fixable[test_type] = count

      self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
      self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
      self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)

    if failed_count_map:
      tests = buildinfo[JRG.TESTS]
      for test_name in failed_count_map.iterkeys():
        canonical = canonical_name(test_name)
        if expected_test_list:
          self.assertTrue(canonical in expected_test_list)
        test = self._find_test_in_trie(canonical, tests)

        failed = 0
        for result in test[JRG.RESULTS]:
          if result[1] == JRG.FAIL_RESULT:
            failed += result[0]
        self.assertEqual(failed_count_map[test_name], failed)

        timing_count = 0
        for timings in test[JRG.TIMES]:
          if timings[1] == test_timings[canonical]:
            timing_count = timings[0]
        self.assertEqual(1, timing_count)

    if fixable_count:
      self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
Exemplo n.º 13
0
  def GetResultsMap(self):
    """Returns a map of TestResults.  Returns an empty map if no current test
    has been recorded."""

    if not self._current_test:
      return dict()

    self._failed_tests.add(self._current_test)

    test_results_map = dict()
    for test in self._failed_tests:
      test_results_map[canonical_name(test)] = TestResult(test, failed=True)

    return test_results_map
Exemplo n.º 14
0
  def GetResultsMap(self):
    """Returns a map of TestResults.  Returns an empty map if no current test
    has been recorded."""

    if not self._current_test:
      return dict()

    self._failed_tests.add(self._current_test)

    test_results_map = dict()
    for test in self._failed_tests:
      test_results_map[canonical_name(test)] = TestResult(test, failed=True)

    return test_results_map
Exemplo n.º 15
0
def GetResultsMap(observer):
  """Returns a map of TestResults."""

  test_results_map = dict()
  tests = (observer.FailedTests(include_fails=True, include_flaky=True) +
           observer.PassedTests())
  for test in tests:
    key = canonical_name(test)
    test_results_map[key] = []
    tries = observer.TriesForTest(test)
    for test_try in tries:
      # FIXME: Store the actual failure type so we can expose whether the test
      # crashed or timed out. See crbug.com/249965.
      failed = (test_try != gtest_utils.TEST_SUCCESS_LABEL)
      test_results_map[key].append(TestResult(test, failed=failed))

  return test_results_map
def GetResultsMap(observer):
    """Returns a map of TestResults."""

    test_results_map = dict()
    tests = (observer.FailedTests(include_fails=True, include_flaky=True) +
             observer.PassedTests())
    for test in tests:
        key = canonical_name(test)
        test_results_map[key] = []
        tries = observer.TriesForTest(test)
        for test_try in tries:
            # FIXME: Store the actual failure type so we can expose whether the test
            # crashed or timed out. See crbug.com/249965.
            failed = (test_try != gtest_utils.TEST_SUCCESS_LABEL)
            test_results_map[key].append(TestResult(test, failed=failed))

    return test_results_map
    def _generate_and_test_full_results_json(self, passed_tests_list,
                                             failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        get_test_set = lambda ts, label: set(
            [t for t in ts if t.startswith(label)])
        DISABLED_tests = get_test_set(tests_set, 'DISABLED_')
        FLAKY_tests = get_test_set(tests_set, 'FLAKY_')
        MAYBE_tests = get_test_set(tests_set, 'MAYBE_')
        FAILS_tests = get_test_set(tests_set, 'FAILS_')
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests
                                  | MAYBE_tests) - set(failed_tests_list)

        failed_tests = set(failed_tests_list) - DISABLED_tests

        test_timings = {}
        test_results_map = {}
        for i, test in enumerate(tests_set):
            test_name = canonical_name(test)
            test_timings[test_name] = i
            test_results_map[test_name] = [
                TestResult(test,
                           failed=(test in failed_tests),
                           elapsed_time=test_timings[test_name])
            ]

        # Do not write to an actual file.
        mock_writer = lambda path, data: True

        generator = JSONResultsGenerator(
            self.builder_name,
            self.build_name,
            self.build_number,
            '',
            None,  # don't fetch past json results archive
            test_results_map,
            svn_revisions=[('blink', '12345')],
            file_writer=mock_writer)

        results_json = generator.get_full_results_json()
        self._verify_full_json_results(results_json, tests_set, PASS_tests,
                                       failed_tests, test_timings)
        self.assertEqual(results_json.get('blink_revision'), '12345')
  def _generate_and_test_full_results_json(self, passed_tests_list,
                                           failed_tests_list):
    tests_set = set(passed_tests_list) | set(failed_tests_list)

    get_test_set = lambda ts, label: set([t for t in ts if t.startswith(label)])
    DISABLED_tests = get_test_set(tests_set, 'DISABLED_')
    FLAKY_tests = get_test_set(tests_set, 'FLAKY_')
    MAYBE_tests = get_test_set(tests_set, 'MAYBE_')
    FAILS_tests = get_test_set(tests_set, 'FAILS_')
    PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests |
        MAYBE_tests) - set(failed_tests_list)

    failed_tests = set(failed_tests_list) - DISABLED_tests

    test_timings = {}
    test_results_map = {}
    for i, test in enumerate(tests_set):
      test_name = canonical_name(test)
      test_timings[test_name] = i
      test_results_map[test_name] = TestResult(test,
        failed=(test in failed_tests),
        elapsed_time=test_timings[test_name])

    # Do not write to an actual file.
    mock_writer = lambda path, data: True

    generator = JSONResultsGenerator(
      self.builder_name, self.build_name, self.build_number,
      '',
      None,   # don't fetch past json results archive
      test_results_map,
      svn_revisions=[('blink', '12345')],
      file_writer=mock_writer)


    results_json = generator.get_full_results_json()
    self._verify_full_json_results(results_json, tests_set, PASS_tests,
                                   failed_tests, test_timings)
    self.assertEqual(results_json.get('blink_revision'), '12345')