def testOutputResultsSmoketest(self):
        """Test that nothing blows up when outputting."""
        expectation_map = {
            'foo': {
                data_types.Expectation('foo', ['win', 'intel'], 'RetryOnFailure'):
                {
                    'stale': {
                        'all_pass': uu.CreateStatsWithPassFails(2, 0),
                    },
                },
                data_types.Expectation('foo', ['linux'], 'Failure'): {
                    'semi_stale': {
                        'all_pass': uu.CreateStatsWithPassFails(2, 0),
                        'some_pass': uu.CreateStatsWithPassFails(1, 1),
                        'none_pass': uu.CreateStatsWithPassFails(0, 2),
                    },
                },
                data_types.Expectation('foo', ['mac'], 'Failure'): {
                    'active': {
                        'none_pass': uu.CreateStatsWithPassFails(0, 2),
                    },
                },
            },
        }
        unmatched_results = {
            'builder': [
                data_types.Result('foo', ['win', 'intel'], 'Failure',
                                  'step_name', 'build_id'),
            ],
        }
        unmatched_expectations = [
            data_types.Expectation('foo', ['linux'], 'RetryOnFailure')
        ]

        stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
            expectation_map)

        result_output.OutputResults(stale, semi_stale, active, {}, [], 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, [], 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active, {},
                                    unmatched_expectations, 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, unmatched_expectations,
                                    'print', self._file_handle)

        result_output.OutputResults(stale, semi_stale, active, {}, [], 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, [], 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active, {},
                                    unmatched_expectations, 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, unmatched_expectations,
                                    'html', self._file_handle)
Пример #2
0
def main():
    args = ParseArgs()
    # TODO(crbug.com/1108016): Remove this warning once ResultDB is enabled on all
    # builders and there is enough data for the results to be trusted.
    WarnUserOfIncompleteRollout()
    test_expectation_map = expectations.CreateTestExpectationMap(
        args.expectation_file, args.tests)
    ci_builders = builders.GetCiBuilders(
        SUITE_TO_TELEMETRY_SUITE_MAP.get(args.suite, args.suite))
    # Unmatched results are mainly useful for script maintainers, as they don't
    # provide any additional information for the purposes of finding unexpectedly
    # passing tests or unused expectations.
    unmatched = queries.FillExpectationMapForCiBuilders(
        test_expectation_map, ci_builders, args.suite, args.project,
        args.num_samples)
    try_builders = builders.GetTryBuilders(ci_builders)
    unmatched.update(
        queries.FillExpectationMapForTryBuilders(test_expectation_map,
                                                 try_builders, args.suite,
                                                 args.project,
                                                 args.num_samples))
    unused_expectations = expectations.FilterOutUnusedExpectations(
        test_expectation_map)
    result_output.OutputResults(test_expectation_map, unmatched,
                                unused_expectations, args.output_format)
Пример #3
0
 def testOutputResultsUnsupportedFormat(self):
   """Tests that passing in an unsupported format is an error."""
   with self.assertRaises(RuntimeError):
     result_output.OutputResults(data_types.TestExpectationMap(),
                                 data_types.TestExpectationMap(),
                                 data_types.TestExpectationMap(), {}, [],
                                 'asdf')
def main():
    args = ParseArgs()
    test_expectation_map = expectations.CreateTestExpectationMap(
        args.expectation_file, args.tests)
    ci_builders = builders.GetCiBuilders(
        SUITE_TO_TELEMETRY_SUITE_MAP.get(args.suite, args.suite))

    querier = queries.BigQueryQuerier(args.suite, args.project,
                                      args.num_samples, args.large_query_mode)
    # Unmatched results are mainly useful for script maintainers, as they don't
    # provide any additional information for the purposes of finding unexpectedly
    # passing tests or unused expectations.
    unmatched = querier.FillExpectationMapForCiBuilders(
        test_expectation_map, ci_builders)
    try_builders = builders.GetTryBuilders(ci_builders)
    unmatched.update(
        querier.FillExpectationMapForTryBuilders(test_expectation_map,
                                                 try_builders))
    unused_expectations = expectations.FilterOutUnusedExpectations(
        test_expectation_map)
    stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
        test_expectation_map)
    result_output.OutputResults(stale, semi_stale, active, unmatched,
                                unused_expectations, args.output_format)

    affected_urls = set()
    stale_message = ''
    if args.remove_stale_expectations:
        stale_expectations = []
        for _, expectation_map in stale.iteritems():
            stale_expectations.extend(expectation_map.keys())
        stale_expectations.extend(unused_expectations)
        affected_urls |= expectations.RemoveExpectationsFromFile(
            stale_expectations, args.expectation_file)
        stale_message += (
            'Stale expectations removed from %s. Stale comments, '
            'etc. may still need to be removed.\n' % args.expectation_file)

    if args.modify_semi_stale_expectations:
        affected_urls |= expectations.ModifySemiStaleExpectations(
            semi_stale, args.expectation_file)
        stale_message += ('Semi-stale expectations modified in %s. Stale '
                          'comments, etc. may still need to be removed.\n' %
                          args.expectation_file)

    if stale_message:
        print(stale_message)
    if affected_urls:
        result_output.OutputAffectedUrls(affected_urls)
Пример #5
0
def main():
    args = ParseArgs()
    # TODO(crbug.com/1108016): Remove this warning once ResultDB is enabled on all
    # builders and there is enough data for the results to be trusted.
    WarnUserOfIncompleteRollout()
    test_expectation_map = expectations.CreateTestExpectationMap(
        args.expectation_file, args.tests)
    ci_builders = builders.GetCiBuilders(
        SUITE_TO_TELEMETRY_SUITE_MAP.get(args.suite, args.suite))
    # Unmatched results are mainly useful for script maintainers, as they don't
    # provide any additional information for the purposes of finding unexpectedly
    # passing tests or unused expectations.
    unmatched = queries.FillExpectationMapForCiBuilders(
        test_expectation_map, ci_builders, args.suite, args.project,
        args.num_samples)
    try_builders = builders.GetTryBuilders(ci_builders)
    unmatched.update(
        queries.FillExpectationMapForTryBuilders(test_expectation_map,
                                                 try_builders, args.suite,
                                                 args.project,
                                                 args.num_samples))
    unused_expectations = expectations.FilterOutUnusedExpectations(
        test_expectation_map)
    stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
        test_expectation_map)
    result_output.OutputResults(stale, semi_stale, active, unmatched,
                                unused_expectations, args.output_format)

    if args.remove_stale_expectations:
        stale_expectations = []
        for _, expectation_map in stale.iteritems():
            stale_expectations.extend(expectation_map.keys())
        stale_expectations.extend(unused_expectations)
        removed_urls = expectations.RemoveExpectationsFromFile(
            stale_expectations, args.expectation_file)
        print(
            'Stale expectations removed from %s. Stale comments, etc. may still '
            'need to be removed.' % args.expectation_file)
        result_output.OutputRemovedUrls(removed_urls)
 def testOutputResultsUnsupportedFormat(self):
     """Tests that passing in an unsupported format is an error."""
     with self.assertRaises(RuntimeError):
         result_output.OutputResults({}, {}, {}, {}, [], 'asdf')