def testOutputResultsUnsupportedFormat(self):
     """Tests that passing in an unsupported format is an error."""
     with self.assertRaises(RuntimeError):
         result_output.OutputResults(data_types.TestExpectationMap(),
                                     data_types.TestExpectationMap(),
                                     data_types.TestExpectationMap(), {},
                                     {}, 'asdf')
Example #2
0
    def CreateTestExpectationMap(self, expectation_files, tests, grace_period):
        """Creates an expectation map based off a file or list of tests.

    Args:
      expectation_files: A filepath or list of filepaths to expectation files to
          read from, or None. If a filepath is specified, |tests| must be None.
      tests: An iterable of strings containing test names to check. If
          specified, |expectation_file| must be None.
      grace_period: An int specifying how many days old an expectation must
          be in order to be parsed, i.e. how many days old an expectation must
          be before it is a candidate for removal/modification.

    Returns:
      A data_types.TestExpectationMap, although all its BuilderStepMap contents
      will be empty.
    """
        def AddContentToMap(content, ex_map, expectation_file_name):
            list_parser = expectations_parser.TaggedTestListParser(content)
            expectations_for_file = ex_map.setdefault(
                expectation_file_name, data_types.ExpectationBuilderMap())
            logging.debug('Parsed %d expectations',
                          len(list_parser.expectations))
            for e in list_parser.expectations:
                if 'Skip' in e.raw_results:
                    continue
                # Expectations that only have a Pass expectation (usually used to
                # override a broader, failing expectation) are not handled by the
                # unexpected pass finder, so ignore those.
                if e.raw_results == ['Pass']:
                    continue
                expectation = data_types.Expectation(e.test, e.tags,
                                                     e.raw_results, e.reason)
                assert expectation not in expectations_for_file
                expectations_for_file[expectation] = data_types.BuilderStepMap(
                )

        logging.info('Creating test expectation map')
        assert expectation_files or tests
        assert not (expectation_files and tests)

        expectation_map = data_types.TestExpectationMap()

        if expectation_files:
            if not isinstance(expectation_files, list):
                expectation_files = [expectation_files]
            for ef in expectation_files:
                expectation_file_name = os.path.normpath(ef)
                content = self._GetNonRecentExpectationContent(
                    expectation_file_name, grace_period)
                AddContentToMap(content, expectation_map,
                                expectation_file_name)
        else:
            expectation_file_name = ''
            content = '# results: [ RetryOnFailure ]\n'
            for t in tests:
                content += '%s [ RetryOnFailure ]\n' % t
            AddContentToMap(content, expectation_map, expectation_file_name)

        return expectation_map
Example #3
0
  def _FillExpectationMapForBuilders(self, expectation_map, builders,
                                     builder_type):
    """Fills |expectation_map| with results from |builders|.

    Args:
      expectation_map: A data_types.TestExpectationMap. Will be modified
          in-place.
      builders: A list of strings containing the names of builders to query.
      builder_type: A string containing the type of builder to query, either
          "ci" or "try".

    Returns:
      A dict containing any results that were retrieved that did not have a
      matching expectation in |expectation_map| in the following format:
      {
        |builder_type|:|builder_name| (str): [
          result1 (data_types.Result),
          result2 (data_types.Result),
          ...
        ],
      }
    """
    assert isinstance(expectation_map, data_types.TestExpectationMap)

    # Filter out any builders that we can easily determine do not currently
    # produce data we care about.
    builders = self._FilterOutInactiveBuilders(builders, builder_type)

    # Spin up a separate process for each query/add step. This is wasteful in
    # the sense that we'll have a bunch of idle processes once faster steps
    # start finishing, but ensures that we start slow queries early and avoids
    # the overhead of passing large amounts of data between processes. See
    # crbug.com/1182459 for more information on performance considerations.
    process_pool = multiprocessing_utils.GetProcessPool(nodes=len(builders))

    args = [(b, builder_type, expectation_map) for b in builders]

    results = process_pool.map(self._QueryAddCombined, args)

    tmp_expectation_map = data_types.TestExpectationMap()
    all_unmatched_results = {}

    for (unmatched_results, prefixed_builder_name, merge_map) in results:
      tmp_expectation_map.Merge(merge_map, expectation_map)
      if unmatched_results:
        all_unmatched_results[prefixed_builder_name] = unmatched_results

    expectation_map.clear()
    expectation_map.update(tmp_expectation_map)

    return all_unmatched_results
Example #4
0
    def testValidResults(self):
        """Tests functionality when valid results are returned by the query."""
        def SideEffect(builder, *args):
            del args
            if builder == 'matched_builder':
                return ([
                    data_types.Result('foo', ['win'], 'Pass', 'step_name',
                                      'build_id')
                ], None)
            else:
                return ([
                    data_types.Result('bar', [], 'Pass', 'step_name',
                                      'build_id')
                ], None)

        self._query_mock.side_effect = SideEffect

        expectation = data_types.Expectation('foo', ['win'], 'RetryOnFailure')
        expectation_map = data_types.TestExpectationMap({
            'foo':
            data_types.ExpectationBuilderMap({
                expectation:
                data_types.BuilderStepMap(),
            }),
        })
        unmatched_results = self._querier._FillExpectationMapForBuilders(
            expectation_map, ['matched_builder', 'unmatched_builder'], 'ci')
        stats = data_types.BuildStats()
        stats.AddPassedBuild()
        expected_expectation_map = {
            'foo': {
                expectation: {
                    'ci:matched_builder': {
                        'step_name': stats,
                    },
                },
            },
        }
        self.assertEqual(expectation_map, expected_expectation_map)
        self.assertEqual(
            unmatched_results, {
                'ci:unmatched_builder': [
                    data_types.Result('bar', [], 'Pass', 'step_name',
                                      'build_id'),
                ],
            })
Example #5
0
 def testQueryFailureIsSurfaced(self):
     """Tests that a query failure is properly surfaced despite being async."""
     self._query_mock.side_effect = IndexError('failure')
     with self.assertRaises(IndexError):
         self._querier._FillExpectationMapForBuilders(
             data_types.TestExpectationMap(), ['matched_builder'], 'ci')
 def testEmptyMap(self):
     """Tests that providing an empty map is a no-op."""
     self.assertEqual(
         result_output._ConvertTestExpectationMapToStringDict(
             data_types.TestExpectationMap()), {})
    def testOutputResultsSmoketest(self):
        """Test that nothing blows up when outputting."""
        expectation_map = data_types.TestExpectationMap({
            'foo':
            data_types.ExpectationBuilderMap({
                data_types.Expectation('foo', ['win', 'intel'], 'RetryOnFailure'):
                data_types.BuilderStepMap({
                    'stale':
                    data_types.StepBuildStatsMap({
                        'all_pass':
                        uu.CreateStatsWithPassFails(2, 0),
                    }),
                }),
                data_types.Expectation('foo', ['linux'], 'Failure'):
                data_types.BuilderStepMap({
                    'semi_stale':
                    data_types.StepBuildStatsMap({
                        'all_pass':
                        uu.CreateStatsWithPassFails(2, 0),
                        'some_pass':
                        uu.CreateStatsWithPassFails(1, 1),
                        'none_pass':
                        uu.CreateStatsWithPassFails(0, 2),
                    }),
                }),
                data_types.Expectation('foo', ['mac'], 'Failure'):
                data_types.BuilderStepMap({
                    'active':
                    data_types.StepBuildStatsMap({
                        'none_pass':
                        uu.CreateStatsWithPassFails(0, 2),
                    }),
                }),
            }),
        })
        unmatched_results = {
            'builder': [
                data_types.Result('foo', ['win', 'intel'], 'Failure',
                                  'step_name', 'build_id'),
            ],
        }
        unmatched_expectations = {
            'foo_file': [
                data_types.Expectation('foo', ['linux'], 'RetryOnFailure'),
            ],
        }

        stale, semi_stale, active = expectation_map.SplitByStaleness()

        result_output.OutputResults(stale, semi_stale, active, {}, {}, 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, {}, 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active, {},
                                    unmatched_expectations, 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, unmatched_expectations,
                                    'print', self._file_handle)

        result_output.OutputResults(stale, semi_stale, active, {}, {}, 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, {}, 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active, {},
                                    unmatched_expectations, 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, unmatched_expectations,
                                    'html', self._file_handle)
    def testSemiStaleMap(self):
        """Tests that everything functions when regular data is provided."""
        expectation_map = data_types.TestExpectationMap({
            'expectation_file':
            data_types.ExpectationBuilderMap({
                data_types.Expectation('foo/test', ['win', 'intel'], [
                                           'RetryOnFailure'
                                       ]):
                data_types.BuilderStepMap({
                    'builder':
                    data_types.StepBuildStatsMap({
                        'all_pass':
                        uu.CreateStatsWithPassFails(2, 0),
                        'all_fail':
                        uu.CreateStatsWithPassFails(0, 2),
                        'some_pass':
                        uu.CreateStatsWithPassFails(1, 1),
                    }),
                }),
                data_types.Expectation('foo/test', ['linux', 'intel'], [
                                           'RetryOnFailure'
                                       ]):
                data_types.BuilderStepMap({
                    'builder':
                    data_types.StepBuildStatsMap({
                        'all_pass':
                        uu.CreateStatsWithPassFails(2, 0),
                    }),
                }),
                data_types.Expectation('foo/test', ['mac', 'intel'], [
                                           'RetryOnFailure'
                                       ]):
                data_types.BuilderStepMap({
                    'builder':
                    data_types.StepBuildStatsMap({
                        'all_fail':
                        uu.CreateStatsWithPassFails(0, 2),
                    }),
                }),
            }),
        })
        # TODO(crbug.com/1198237): Remove the Python 2 version once we are fully
        # switched to Python 3.
        if six.PY2:
            expected_output = {
                'expectation_file': {
                    'foo/test': {
                        '"RetryOnFailure" expectation on "win intel"': {
                            'builder': {
                                'Fully passed in the following': [
                                    'all_pass (2/2 passed)',
                                ],
                                'Never passed in the following': [
                                    'all_fail (0/2 passed)',
                                ],
                                'Partially passed in the following': {
                                    'some_pass (1/2 passed)': [
                                        data_types.BuildLinkFromBuildId(
                                            'build_id0'),
                                    ],
                                },
                            },
                        },
                        '"RetryOnFailure" expectation on "intel linux"': {
                            'builder': {
                                'Fully passed in the following': [
                                    'all_pass (2/2 passed)',
                                ],
                            },
                        },
                        '"RetryOnFailure" expectation on "mac intel"': {
                            'builder': {
                                'Never passed in the following': [
                                    'all_fail (0/2 passed)',
                                ],
                            },
                        },
                    },
                },
            }
        else:
            # Set ordering does not appear to be stable between test runs, as we can
            # get either order of tags. So, generate them now instead of hard coding
            # them.
            linux_tags = ' '.join(set(['linux', 'intel']))
            win_tags = ' '.join(set(['win', 'intel']))
            mac_tags = ' '.join(set(['mac', 'intel']))
            expected_output = {
                'expectation_file': {
                    'foo/test': {
                        '"RetryOnFailure" expectation on "%s"' % linux_tags: {
                            'builder': {
                                'Fully passed in the following': [
                                    'all_pass (2/2 passed)',
                                ],
                            },
                        },
                        '"RetryOnFailure" expectation on "%s"' % win_tags: {
                            'builder': {
                                'Fully passed in the following': [
                                    'all_pass (2/2 passed)',
                                ],
                                'Partially passed in the following': {
                                    'some_pass (1/2 passed)': [
                                        data_types.BuildLinkFromBuildId(
                                            'build_id0'),
                                    ],
                                },
                                'Never passed in the following': [
                                    'all_fail (0/2 passed)',
                                ],
                            },
                        },
                        '"RetryOnFailure" expectation on "%s"' % mac_tags: {
                            'builder': {
                                'Never passed in the following': [
                                    'all_fail (0/2 passed)',
                                ],
                            },
                        },
                    },
                },
            }

        str_dict = result_output._ConvertTestExpectationMapToStringDict(
            expectation_map)
        self.assertEqual(str_dict, expected_output)